diff --git a/.gitattributes b/.gitattributes index a13116aaf641e..ff96567ca54db 100644 --- a/.gitattributes +++ b/.gitattributes @@ -33,3 +33,4 @@ src/backend/regex/re_syntax.n -whitespace src/backend/snowball/libstemmer/*.c -whitespace src/backend/utils/mb/Unicode/*-std.txt -whitespace src/include/snowball/libstemmer/* -whitespace +src/timezone/data/* -whitespace diff --git a/COPYRIGHT b/COPYRIGHT index 9660827d7f762..01b6e3601e9fa 100644 --- a/COPYRIGHT +++ b/COPYRIGHT @@ -1,7 +1,7 @@ PostgreSQL Database Management System (formerly known as Postgres, then as Postgres95) -Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group +Portions Copyright (c) 1996-2016, PostgreSQL Global Development Group Portions Copyright (c) 1994, The Regents of the University of California diff --git a/config/Makefile b/config/Makefile index da1283868eaf1..67e7998f55648 100644 --- a/config/Makefile +++ b/config/Makefile @@ -7,9 +7,11 @@ include $(top_builddir)/src/Makefile.global install: all installdirs $(INSTALL_SCRIPT) $(srcdir)/install-sh '$(DESTDIR)$(pgxsdir)/config/install-sh' + $(INSTALL_SCRIPT) $(srcdir)/missing '$(DESTDIR)$(pgxsdir)/config/missing' installdirs: $(MKDIR_P) '$(DESTDIR)$(pgxsdir)/config' uninstall: rm -f '$(DESTDIR)$(pgxsdir)/config/install-sh' + rm -f '$(DESTDIR)$(pgxsdir)/config/missing' diff --git a/config/programs.m4 b/config/programs.m4 index 76c0158973a89..8444a0f9c7daf 100644 --- a/config/programs.m4 +++ b/config/programs.m4 @@ -77,7 +77,7 @@ else echo '%%' > conftest.l if $pgac_candidate -t conftest.l 2>/dev/null | grep FLEX_SCANNER >/dev/null 2>&1; then pgac_flex_version=`$pgac_candidate --version 2>/dev/null` - if echo "$pgac_flex_version" | sed ['s/[.a-z]/ /g'] | $AWK '{ if ([$]1 = 2 && [$]2 = 5 && [$]3 >= 31) exit 0; else exit 1;}' + if echo "$pgac_flex_version" | sed ['s/[.a-z]/ /g'] | $AWK '{ if ([$]1 = 2 && ([$]2 > 5 || ([$]2 = 5 && [$]3 >= 31))) exit 0; else exit 1;}' then pgac_cv_path_flex=$pgac_candidate break 2 diff --git a/config/python.m4 b/config/python.m4 index 7012c536d796e..c4209d0c7590b 100644 --- a/config/python.m4 +++ b/config/python.m4 @@ -95,18 +95,4 @@ AC_SUBST(python_libspec)[]dnl AC_SUBST(python_additional_libs)[]dnl AC_SUBST(python_enable_shared)[]dnl -# threaded python is not supported on OpenBSD -AC_MSG_CHECKING(whether Python is compiled with thread support) -pythreads=`${PYTHON} -c "import sys; print(int('thread' in sys.builtin_module_names))"` -if test "$pythreads" = "1"; then - AC_MSG_RESULT(yes) - case $host_os in - openbsd*) - AC_MSG_ERROR([threaded Python not supported on this platform]) - ;; - esac -else - AC_MSG_RESULT(no) -fi - ])# PGAC_CHECK_PYTHON_EMBED_SETUP diff --git a/config/test_quiet_include.h b/config/test_quiet_include.h index f4fa4d30dd774..732b23149e946 100644 --- a/config/test_quiet_include.h +++ b/config/test_quiet_include.h @@ -7,3 +7,12 @@ fun() { return 0; } + +/* + * "IBM XL C/C++ for AIX, V12.1" miscompiles, for 32-bit, some inline + * expansions of ginCompareItemPointers() "long long" arithmetic. To take + * advantage of inlining, build a 64-bit PostgreSQL. + */ +#if defined(__ILP32__) && defined(__IBMC__) +#error "known inlining bug" +#endif diff --git a/configure b/configure index ed1ff0acb98f7..52047189e8b3a 100755 --- a/configure +++ b/configure @@ -1,6 +1,6 @@ #! /bin/sh # Guess values for system-dependent variables and create Makefiles. -# Generated by GNU Autoconf 2.69 for PostgreSQL 9.4beta1. +# Generated by GNU Autoconf 2.69 for PostgreSQL 9.4.6. # # Report bugs to . # @@ -582,8 +582,8 @@ MAKEFLAGS= # Identity of this package. PACKAGE_NAME='PostgreSQL' PACKAGE_TARNAME='postgresql' -PACKAGE_VERSION='9.4beta1' -PACKAGE_STRING='PostgreSQL 9.4beta1' +PACKAGE_VERSION='9.4.6' +PACKAGE_STRING='PostgreSQL 9.4.6' PACKAGE_BUGREPORT='pgsql-bugs@postgresql.org' PACKAGE_URL='' @@ -627,6 +627,7 @@ ac_includes_default="\ ac_subst_vars='LTLIBOBJS vpath_build +PG_VERSION_NUM PROVE OSX XSLTPROC @@ -728,6 +729,7 @@ CPPFLAGS LDFLAGS CFLAGS CC +enable_tap_tests enable_dtrace DTRACEFLAGS DTRACE @@ -806,6 +808,7 @@ enable_debug enable_profiling enable_coverage enable_dtrace +enable_tap_tests with_blocksize with_segsize with_wal_blocksize @@ -1390,7 +1393,7 @@ if test "$ac_init_help" = "long"; then # Omit some internal or obsolete options to make the list less imposing. # This message is too long to be a string in the A/UX 3.1 sh. cat <<_ACEOF -\`configure' configures PostgreSQL 9.4beta1 to adapt to many kinds of systems. +\`configure' configures PostgreSQL 9.4.6 to adapt to many kinds of systems. Usage: $0 [OPTION]... [VAR=VALUE]... @@ -1455,7 +1458,7 @@ fi if test -n "$ac_init_help"; then case $ac_init_help in - short | recursive ) echo "Configuration of PostgreSQL 9.4beta1:";; + short | recursive ) echo "Configuration of PostgreSQL 9.4.6:";; esac cat <<\_ACEOF @@ -1474,6 +1477,7 @@ Optional Features: --enable-profiling build with profiling enabled --enable-coverage build with coverage testing instrumentation --enable-dtrace build with DTrace support + --enable-tap-tests enable TAP tests (requires Perl and IPC::Run) --enable-depend turn on automatic dependency tracking --enable-cassert enable assertion checks (for debugging) --disable-thread-safety disable thread-safety in client libraries @@ -1603,7 +1607,7 @@ fi test -n "$ac_init_help" && exit $ac_status if $ac_init_version; then cat <<\_ACEOF -PostgreSQL configure 9.4beta1 +PostgreSQL configure 9.4.6 generated by GNU Autoconf 2.69 Copyright (C) 2012 Free Software Foundation, Inc. @@ -2314,7 +2318,7 @@ cat >config.log <<_ACEOF This file contains any messages produced by compilers while running configure, to aid debugging if configure makes a mistake. -It was created by PostgreSQL $as_me 9.4beta1, which was +It was created by PostgreSQL $as_me 9.4.6, which was generated by GNU Autoconf 2.69. Invocation command line was $ $0 $@ @@ -3436,6 +3440,34 @@ fi +# +# TAP tests +# + + +# Check whether --enable-tap-tests was given. +if test "${enable_tap_tests+set}" = set; then : + enableval=$enable_tap_tests; + case $enableval in + yes) + : + ;; + no) + : + ;; + *) + as_fn_error $? "no argument expected for --enable-tap-tests option" "$LINENO" 5 + ;; + esac + +else + enable_tap_tests=no + +fi + + + + # # Block size # @@ -4326,6 +4358,10 @@ else fi fi +# CFLAGS we determined above will be added back at the end +user_CFLAGS=$CFLAGS +CFLAGS="" + # set CFLAGS_VECTOR from the environment, if available if test "$ac_env_CFLAGS_VECTOR_set" = set; then CFLAGS_VECTOR=$ac_env_CFLAGS_VECTOR_value @@ -4337,7 +4373,7 @@ fi # but has its own. Also check other compiler-specific flags here. if test "$GCC" = yes -a "$ICC" = no; then - CFLAGS="$CFLAGS -Wall -Wmissing-prototypes -Wpointer-arith" + CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith" # These work in some but not all gcc versions { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wdeclaration-after-statement" >&5 $as_echo_n "checking whether $CC supports -Wdeclaration-after-statement... " >&6; } @@ -4659,6 +4695,47 @@ if test x"$pgac_cv_prog_cc_cflags__ftree_vectorize" = x"yes"; then CFLAGS_VECTOR="${CFLAGS_VECTOR} -ftree-vectorize" fi + # We want to suppress clang's unhelpful unused-command-line-argument warnings + # but gcc won't complain about unrecognized -Wno-foo switches, so we have to + # test for the positive form and if that works, add the negative form + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -Wunused-command-line-argument" >&5 +$as_echo_n "checking whether $CC supports -Wunused-command-line-argument... " >&6; } +if ${pgac_cv_prog_cc_cflags__Wunused_command_line_argument+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +CFLAGS="$pgac_save_CFLAGS -Wunused-command-line-argument" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_cc_cflags__Wunused_command_line_argument=yes +else + pgac_cv_prog_cc_cflags__Wunused_command_line_argument=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__Wunused_command_line_argument" >&5 +$as_echo "$pgac_cv_prog_cc_cflags__Wunused_command_line_argument" >&6; } +if test x"$pgac_cv_prog_cc_cflags__Wunused_command_line_argument" = x"yes"; then + NOT_THE_CFLAGS="${NOT_THE_CFLAGS} -Wunused-command-line-argument" +fi + + if test -n "$NOT_THE_CFLAGS"; then + CFLAGS="$CFLAGS -Wno-unused-command-line-argument" + fi elif test "$ICC" = yes; then # Intel's compiler has a bug/misoptimization in checking for # division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS. @@ -4770,6 +4847,41 @@ if test x"$pgac_cv_prog_cc_cflags__qnoansialias" = x"yes"; then CFLAGS="$CFLAGS -qnoansialias" fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -qlonglong" >&5 +$as_echo_n "checking whether $CC supports -qlonglong... " >&6; } +if ${pgac_cv_prog_cc_cflags__qlonglong+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +CFLAGS="$pgac_save_CFLAGS -qlonglong" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_cc_cflags__qlonglong=yes +else + pgac_cv_prog_cc_cflags__qlonglong=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__qlonglong" >&5 +$as_echo "$pgac_cv_prog_cc_cflags__qlonglong" >&6; } +if test x"$pgac_cv_prog_cc_cflags__qlonglong" = x"yes"; then + CFLAGS="$CFLAGS -qlonglong" +fi + elif test "$PORTNAME" = "hpux"; then # On some versions of HP-UX, libm functions do not set errno by default. # Fix that by using +Olibmerrno if the compiler recognizes it. @@ -4844,7 +4956,12 @@ if test "$PORTNAME" = "win32"; then CPPFLAGS="$CPPFLAGS -I$srcdir/src/include/port/win32 -DEXEC_BACKEND" fi -# Check if the compiler still works with the template settings +# Now that we're done automatically adding stuff to CFLAGS, put back the +# user-specified flags (if any) at the end. This lets users override +# the automatic additions. +CFLAGS="$CFLAGS $user_CFLAGS" + +# Check if the compiler still works with the final flag settings { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler still works" >&5 $as_echo_n "checking whether the C compiler still works... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext @@ -7126,7 +7243,7 @@ else echo '%%' > conftest.l if $pgac_candidate -t conftest.l 2>/dev/null | grep FLEX_SCANNER >/dev/null 2>&1; then pgac_flex_version=`$pgac_candidate --version 2>/dev/null` - if echo "$pgac_flex_version" | sed 's/[.a-z]/ /g' | $AWK '{ if ($1 = 2 && $2 = 5 && $3 >= 31) exit 0; else exit 1;}' + if echo "$pgac_flex_version" | sed 's/[.a-z]/ /g' | $AWK '{ if ($1 = 2 && ($2 > 5 || ($2 = 5 && $3 >= 31))) exit 0; else exit 1;}' then pgac_cv_path_flex=$pgac_candidate break 2 @@ -7407,23 +7524,6 @@ python_additional_libs=`${PYTHON} -c "import distutils.sysconfig; print(' '.join $as_echo "${python_libspec} ${python_additional_libs}" >&6; } -# threaded python is not supported on OpenBSD -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether Python is compiled with thread support" >&5 -$as_echo_n "checking whether Python is compiled with thread support... " >&6; } -pythreads=`${PYTHON} -c "import sys; print(int('thread' in sys.builtin_module_names))"` -if test "$pythreads" = "1"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } - case $host_os in - openbsd*) - as_fn_error $? "threaded Python not supported on this platform" "$LINENO" 5 - ;; - esac -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - fi @@ -8091,6 +8191,63 @@ if test "$ac_res" != no; then : fi +# Required for thread_test.c on Solaris +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sched_yield" >&5 +$as_echo_n "checking for library containing sched_yield... " >&6; } +if ${ac_cv_search_sched_yield+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char sched_yield (); +int +main () +{ +return sched_yield (); + ; + return 0; +} +_ACEOF +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_sched_yield=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_sched_yield+:} false; then : + break +fi +done +if ${ac_cv_search_sched_yield+:} false; then : + +else + ac_cv_search_sched_yield=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_sched_yield" >&5 +$as_echo "$ac_cv_search_sched_yield" >&6; } +ac_res=$ac_cv_search_sched_yield +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +fi + # Required for thread_test.c on Solaris 2.5: # Other ports use it too (HP-UX) so test unconditionally { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gethostbyname_r" >&5 @@ -8629,6 +8786,17 @@ else fi fi + for ac_func in SSL_get_current_compression +do : + ac_fn_c_check_func "$LINENO" "SSL_get_current_compression" "ac_cv_func_SSL_get_current_compression" +if test "x$ac_cv_func_SSL_get_current_compression" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SSL_GET_CURRENT_COMPRESSION 1 +_ACEOF + +fi +done + fi if test "$with_pam" = yes ; then @@ -9466,6 +9634,17 @@ fi fi +# PGAC_LDAP_SAFE +# -------------- +# PostgreSQL sometimes loads libldap_r and plain libldap into the same +# process. Check for OpenLDAP versions known not to tolerate doing so; assume +# non-OpenLDAP implementations are safe. The dblink test suite exercises the +# hazardous interaction directly. + + + + + if test "$with_ldap" = yes ; then if test "$PORTNAME" != "win32"; then for ac_header in ldap.h @@ -9482,6 +9661,47 @@ fi done + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for compatible LDAP implementation" >&5 +$as_echo_n "checking for compatible LDAP implementation... " >&6; } +if ${pgac_cv_ldap_safe+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#if !defined(LDAP_VENDOR_VERSION) || \ + (defined(LDAP_API_FEATURE_X_OPENLDAP) && \ + LDAP_VENDOR_VERSION >= 20424 && LDAP_VENDOR_VERSION <= 20431) +choke me +#endif +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_ldap_safe=yes +else + pgac_cv_ldap_safe=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_ldap_safe" >&5 +$as_echo "$pgac_cv_ldap_safe" >&6; } + +if test "$pgac_cv_ldap_safe" != yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: +*** With OpenLDAP versions 2.4.24 through 2.4.31, inclusive, each backend +*** process that loads libpq (via WAL receiver, dblink, or postgres_fdw) and +*** also uses LDAP will crash on exit." >&5 +$as_echo "$as_me: WARNING: +*** With OpenLDAP versions 2.4.24 through 2.4.31, inclusive, each backend +*** process that loads libpq (via WAL receiver, dblink, or postgres_fdw) and +*** also uses LDAP will crash on exit." >&2;} +fi else for ac_header in winldap.h do : @@ -11207,7 +11427,7 @@ fi LIBS_including_readline="$LIBS" LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'` -for ac_func in cbrt dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat readlink setproctitle setsid shm_open sigprocmask symlink sync_file_range towlower utime utimes wcstombs wcstombs_l +for ac_func in cbrt dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat pthread_is_threaded_np readlink setproctitle setsid shm_open sigprocmask symlink sync_file_range towlower utime utimes wcstombs wcstombs_l do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -11650,6 +11870,19 @@ esac fi +ac_fn_c_check_func "$LINENO" "mkdtemp" "ac_cv_func_mkdtemp" +if test "x$ac_cv_func_mkdtemp" = xyes; then : + $as_echo "#define HAVE_MKDTEMP 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" mkdtemp.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS mkdtemp.$ac_objext" + ;; +esac + +fi + ac_fn_c_check_func "$LINENO" "random" "ac_cv_func_random" if test "x$ac_cv_func_random" = xyes; then : $as_echo "#define HAVE_RANDOM 1" >>confdefs.h @@ -11857,7 +12090,7 @@ esac fi -# Win32 support +# Win32 (really MinGW) support if test "$PORTNAME" = "win32"; then ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday" if test "x$ac_cv_func_gettimeofday" = xyes; then : @@ -11873,6 +12106,12 @@ esac fi + case " $LIBOBJS " in + *" dirmod.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" + ;; +esac + case " $LIBOBJS " in *" kill.$ac_objext "* ) ;; *) LIBOBJS="$LIBOBJS kill.$ac_objext" @@ -11938,6 +12177,16 @@ else fi +# Cygwin needs only a bit of that +if test "$PORTNAME" = "cygwin"; then + case " $LIBOBJS " in + *" dirmod.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" + ;; +esac + +fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for sigsetjmp" >&5 $as_echo_n "checking for sigsetjmp... " >&6; } if ${pgac_cv_func_sigsetjmp+:} false; then : @@ -12166,7 +12415,7 @@ if test x"$pgac_cv_var_rl_completion_append_character" = x"yes"; then $as_echo "#define HAVE_RL_COMPLETION_APPEND_CHARACTER 1" >>confdefs.h fi - for ac_func in rl_completion_matches rl_filename_completion_function + for ac_func in rl_completion_matches rl_filename_completion_function rl_reset_screen_size do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -14579,7 +14828,8 @@ done # # Check for test tools # -for ac_prog in prove +if test "$enable_tap_tests" = yes; then + for ac_prog in prove do # Extract the first word of "$ac_prog", so it can be a program name with args. set dummy $ac_prog; ac_word=$2 @@ -14621,6 +14871,13 @@ fi test -n "$PROVE" && break done + if test -z "$PROVE"; then + as_fn_error $? "prove not found" "$LINENO" 5 + fi + if test -z "$PERL"; then + as_fn_error $? "Perl not found" "$LINENO" 5 + fi +fi # Thread testing @@ -14835,6 +15092,7 @@ _ACEOF + # Begin output steps { $as_echo "$as_me:${as_lineno-$LINENO}: using compiler=$cc_string" >&5 @@ -15393,7 +15651,7 @@ cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 # report actual input values of CONFIG_FILES etc. instead of their # values after options handling. ac_log=" -This file was extended by PostgreSQL $as_me 9.4beta1, which was +This file was extended by PostgreSQL $as_me 9.4.6, which was generated by GNU Autoconf 2.69. Invocation command line was CONFIG_FILES = $CONFIG_FILES @@ -15463,7 +15721,7 @@ _ACEOF cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" ac_cs_version="\\ -PostgreSQL config.status 9.4beta1 +PostgreSQL config.status 9.4.6 configured by $0, generated by GNU Autoconf 2.69, with options \\"\$ac_cs_config\\" diff --git a/configure.in b/configure.in index 80df1d76510c7..245c873a1771d 100644 --- a/configure.in +++ b/configure.in @@ -17,7 +17,7 @@ dnl Read the Autoconf manual for details. dnl m4_pattern_forbid(^PGAC_)dnl to catch undefined macros -AC_INIT([PostgreSQL], [9.4beta1], [pgsql-bugs@postgresql.org]) +AC_INIT([PostgreSQL], [9.4.6], [pgsql-bugs@postgresql.org]) m4_if(m4_defn([m4_PACKAGE_VERSION]), [2.69], [], [m4_fatal([Autoconf version 2.69 is required. Untested combinations of 'autoconf' and PostgreSQL versions are not @@ -223,6 +223,13 @@ fi AC_SUBST(DTRACEFLAGS)]) AC_SUBST(enable_dtrace) +# +# TAP tests +# +PGAC_ARG_BOOL(enable, tap-tests, no, + [enable TAP tests (requires Perl and IPC::Run)]) +AC_SUBST(enable_tap_tests) + # # Block size # @@ -402,6 +409,10 @@ else fi fi +# CFLAGS we determined above will be added back at the end +user_CFLAGS=$CFLAGS +CFLAGS="" + # set CFLAGS_VECTOR from the environment, if available if test "$ac_env_CFLAGS_VECTOR_set" = set; then CFLAGS_VECTOR=$ac_env_CFLAGS_VECTOR_value @@ -413,7 +424,7 @@ fi # but has its own. Also check other compiler-specific flags here. if test "$GCC" = yes -a "$ICC" = no; then - CFLAGS="$CFLAGS -Wall -Wmissing-prototypes -Wpointer-arith" + CFLAGS="-Wall -Wmissing-prototypes -Wpointer-arith" # These work in some but not all gcc versions PGAC_PROG_CC_CFLAGS_OPT([-Wdeclaration-after-statement]) PGAC_PROG_CC_CFLAGS_OPT([-Wendif-labels]) @@ -429,6 +440,13 @@ if test "$GCC" = yes -a "$ICC" = no; then # Optimization flags for specific files that benefit from vectorization PGAC_PROG_CC_VAR_OPT(CFLAGS_VECTOR, [-funroll-loops]) PGAC_PROG_CC_VAR_OPT(CFLAGS_VECTOR, [-ftree-vectorize]) + # We want to suppress clang's unhelpful unused-command-line-argument warnings + # but gcc won't complain about unrecognized -Wno-foo switches, so we have to + # test for the positive form and if that works, add the negative form + PGAC_PROG_CC_VAR_OPT(NOT_THE_CFLAGS, [-Wunused-command-line-argument]) + if test -n "$NOT_THE_CFLAGS"; then + CFLAGS="$CFLAGS -Wno-unused-command-line-argument" + fi elif test "$ICC" = yes; then # Intel's compiler has a bug/misoptimization in checking for # division by NAN (NaN == 0), -mp1 fixes it, so add it to the CFLAGS. @@ -438,6 +456,7 @@ elif test "$ICC" = yes; then elif test "$PORTNAME" = "aix"; then # AIX's xlc has to have strict aliasing turned off too PGAC_PROG_CC_CFLAGS_OPT([-qnoansialias]) + PGAC_PROG_CC_CFLAGS_OPT([-qlonglong]) elif test "$PORTNAME" = "hpux"; then # On some versions of HP-UX, libm functions do not set errno by default. # Fix that by using +Olibmerrno if the compiler recognizes it. @@ -476,7 +495,12 @@ if test "$PORTNAME" = "win32"; then CPPFLAGS="$CPPFLAGS -I$srcdir/src/include/port/win32 -DEXEC_BACKEND" fi -# Check if the compiler still works with the template settings +# Now that we're done automatically adding stuff to CFLAGS, put back the +# user-specified flags (if any) at the end. This lets users override +# the automatic additions. +CFLAGS="$CFLAGS $user_CFLAGS" + +# Check if the compiler still works with the final flag settings AC_MSG_CHECKING([whether the C compiler still works]) AC_TRY_LINK([], [return 0;], [AC_MSG_RESULT(yes)], @@ -906,6 +930,8 @@ AC_SEARCH_LIBS(shm_open, rt) AC_SEARCH_LIBS(shm_unlink, rt) # Solaris: AC_SEARCH_LIBS(fdatasync, [rt posix4]) +# Required for thread_test.c on Solaris +AC_SEARCH_LIBS(sched_yield, rt) # Required for thread_test.c on Solaris 2.5: # Other ports use it too (HP-UX) so test unconditionally AC_SEARCH_LIBS(gethostbyname_r, nsl) @@ -955,6 +981,7 @@ if test "$with_openssl" = yes ; then AC_SEARCH_LIBS(CRYPTO_new_ex_data, eay32 crypto, [], [AC_MSG_ERROR([library 'eay32' or 'crypto' is required for OpenSSL])]) AC_SEARCH_LIBS(SSL_library_init, ssleay32 ssl, [], [AC_MSG_ERROR([library 'ssleay32' or 'ssl' is required for OpenSSL])]) fi + AC_CHECK_FUNCS([SSL_get_current_compression]) fi if test "$with_pam" = yes ; then @@ -1097,10 +1124,39 @@ if test "$with_libxslt" = yes ; then AC_CHECK_HEADER(libxslt/xslt.h, [], [AC_MSG_ERROR([header file is required for XSLT support])]) fi +# PGAC_LDAP_SAFE +# -------------- +# PostgreSQL sometimes loads libldap_r and plain libldap into the same +# process. Check for OpenLDAP versions known not to tolerate doing so; assume +# non-OpenLDAP implementations are safe. The dblink test suite exercises the +# hazardous interaction directly. + +AC_DEFUN([PGAC_LDAP_SAFE], +[AC_CACHE_CHECK([for compatible LDAP implementation], [pgac_cv_ldap_safe], +[AC_COMPILE_IFELSE([AC_LANG_PROGRAM( +[#include +#if !defined(LDAP_VENDOR_VERSION) || \ + (defined(LDAP_API_FEATURE_X_OPENLDAP) && \ + LDAP_VENDOR_VERSION >= 20424 && LDAP_VENDOR_VERSION <= 20431) +choke me +#endif], [])], +[pgac_cv_ldap_safe=yes], +[pgac_cv_ldap_safe=no])]) + +if test "$pgac_cv_ldap_safe" != yes; then + AC_MSG_WARN([ +*** With OpenLDAP versions 2.4.24 through 2.4.31, inclusive, each backend +*** process that loads libpq (via WAL receiver, dblink, or postgres_fdw) and +*** also uses LDAP will crash on exit.]) +fi]) + + + if test "$with_ldap" = yes ; then if test "$PORTNAME" != "win32"; then AC_CHECK_HEADERS(ldap.h, [], [AC_MSG_ERROR([header file is required for LDAP])]) + PGAC_LDAP_SAFE else AC_CHECK_HEADERS(winldap.h, [], [AC_MSG_ERROR([header file is required for LDAP])], @@ -1243,7 +1299,7 @@ PGAC_FUNC_GETTIMEOFDAY_1ARG LIBS_including_readline="$LIBS" LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'` -AC_CHECK_FUNCS([cbrt dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat readlink setproctitle setsid shm_open sigprocmask symlink sync_file_range towlower utime utimes wcstombs wcstombs_l]) +AC_CHECK_FUNCS([cbrt dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat pthread_is_threaded_np readlink setproctitle setsid shm_open sigprocmask symlink sync_file_range towlower utime utimes wcstombs wcstombs_l]) AC_REPLACE_FUNCS(fseeko) case $host_os in @@ -1357,7 +1413,7 @@ else AC_CHECK_FUNCS([fpclass fp_class fp_class_d class], [break]) fi -AC_REPLACE_FUNCS([crypt fls getopt getrusage inet_aton random rint srandom strerror strlcat strlcpy]) +AC_REPLACE_FUNCS([crypt fls getopt getrusage inet_aton mkdtemp random rint srandom strerror strlcat strlcpy]) case $host_os in @@ -1405,9 +1461,10 @@ if test "$PORTNAME" = "win32"; then AC_LIBOBJ(getopt_long) fi -# Win32 support +# Win32 (really MinGW) support if test "$PORTNAME" = "win32"; then AC_REPLACE_FUNCS(gettimeofday) + AC_LIBOBJ(dirmod) AC_LIBOBJ(kill) AC_LIBOBJ(open) AC_LIBOBJ(system) @@ -1428,6 +1485,11 @@ else AC_SUBST(have_win32_dbghelp,no) fi +# Cygwin needs only a bit of that +if test "$PORTNAME" = "cygwin"; then + AC_LIBOBJ(dirmod) +fi + dnl Cannot use AC_CHECK_FUNC because sigsetjmp may be a macro dnl (especially on GNU libc) dnl See also comments in c.h. @@ -1483,7 +1545,7 @@ LIBS="$LIBS_including_readline" if test "$with_readline" = yes; then PGAC_VAR_RL_COMPLETION_APPEND_CHARACTER - AC_CHECK_FUNCS([rl_completion_matches rl_filename_completion_function]) + AC_CHECK_FUNCS([rl_completion_matches rl_filename_completion_function rl_reset_screen_size]) AC_CHECK_FUNCS([append_history history_truncate_file]) fi @@ -1878,7 +1940,15 @@ AC_CHECK_PROGS(OSX, [osx sgml2xml sx]) # # Check for test tools # -AC_CHECK_PROGS(PROVE, prove) +if test "$enable_tap_tests" = yes; then + AC_CHECK_PROGS(PROVE, prove) + if test -z "$PROVE"; then + AC_MSG_ERROR([prove not found]) + fi + if test -z "$PERL"; then + AC_MSG_ERROR([Perl not found]) + fi +fi # Thread testing @@ -1953,6 +2023,7 @@ AC_DEFINE_UNQUOTED(PG_VERSION_STR, tr '.' ' ' | $AWK '{printf "%d%02d%02d", $1, $2, (NF >= 3) ? $3 : 0}'`"] AC_DEFINE_UNQUOTED(PG_VERSION_NUM, $PG_VERSION_NUM, [PostgreSQL version as a number]) +AC_SUBST(PG_VERSION_NUM) # Begin output steps diff --git a/contrib/auto_explain/auto_explain.c b/contrib/auto_explain/auto_explain.c index cbbd25753f72d..d5fa6ac2d3136 100644 --- a/contrib/auto_explain/auto_explain.c +++ b/contrib/auto_explain/auto_explain.c @@ -26,7 +26,7 @@ static bool auto_explain_log_analyze = false; static bool auto_explain_log_verbose = false; static bool auto_explain_log_buffers = false; static bool auto_explain_log_triggers = false; -static bool auto_explain_log_timing = false; +static bool auto_explain_log_timing = true; static int auto_explain_log_format = EXPLAIN_FORMAT_TEXT; static bool auto_explain_log_nested_statements = false; @@ -200,8 +200,6 @@ explain_ExecutorStart(QueryDesc *queryDesc, int eflags) queryDesc->instrument_options |= INSTRUMENT_TIMER; else queryDesc->instrument_options |= INSTRUMENT_ROWS; - - if (auto_explain_log_buffers) queryDesc->instrument_options |= INSTRUMENT_BUFFERS; } @@ -302,6 +300,8 @@ explain_ExecutorEnd(QueryDesc *queryDesc) es.analyze = (queryDesc->instrument_options && auto_explain_log_analyze); es.verbose = auto_explain_log_verbose; es.buffers = (es.analyze && auto_explain_log_buffers); + es.timing = (es.analyze && auto_explain_log_timing); + es.summary = es.analyze; es.format = auto_explain_log_format; ExplainBeginOutput(&es); diff --git a/contrib/btree_gin/btree_gin--unpackaged--1.0.sql b/contrib/btree_gin/btree_gin--unpackaged--1.0.sql index 8dfafc1e8b925..3dae2dd38ff44 100644 --- a/contrib/btree_gin/btree_gin--unpackaged--1.0.sql +++ b/contrib/btree_gin/btree_gin--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/btree_gin/btree_gin--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION btree_gin" to load this file. \quit +\echo Use "CREATE EXTENSION btree_gin FROM unpackaged" to load this file. \quit ALTER EXTENSION btree_gin ADD function gin_btree_consistent(internal,smallint,anyelement,integer,internal,internal); ALTER EXTENSION btree_gin ADD function gin_extract_value_int2(smallint,internal); diff --git a/contrib/btree_gist/btree_gist--unpackaged--1.0.sql b/contrib/btree_gist/btree_gist--unpackaged--1.0.sql index 838ad7ec1e1dd..e9913ab7f2521 100644 --- a/contrib/btree_gist/btree_gist--unpackaged--1.0.sql +++ b/contrib/btree_gist/btree_gist--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/btree_gist/btree_gist--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION btree_gist" to load this file. \quit +\echo Use "CREATE EXTENSION btree_gist FROM unpackaged" to load this file. \quit ALTER EXTENSION btree_gist ADD type gbtreekey4; ALTER EXTENSION btree_gist ADD function gbtreekey4_in(cstring); diff --git a/contrib/btree_gist/btree_ts.c b/contrib/btree_gist/btree_ts.c index a13dcc8beaa3d..88ce35b2cb61d 100644 --- a/contrib/btree_gist/btree_ts.c +++ b/contrib/btree_gist/btree_ts.c @@ -200,27 +200,11 @@ tstz_dist(PG_FUNCTION_ARGS) **************************************************/ -static Timestamp +static inline Timestamp tstz_to_ts_gmt(TimestampTz ts) { - Timestamp gmt; - int val, - tz; - - gmt = ts; - DecodeSpecial(0, "gmt", &val); - - if (ts < DT_NOEND && ts > DT_NOBEGIN) - { - tz = val * 60; - -#ifdef HAVE_INT64_TIMESTAMP - gmt -= (tz * INT64CONST(1000000)); -#else - gmt -= tz; -#endif - } - return gmt; + /* No timezone correction is needed, since GMT is offset 0 by definition */ + return (Timestamp) ts; } @@ -378,7 +362,7 @@ gbt_ts_penalty(PG_FUNCTION_ARGS) newdbl[2]; /* - * We are allways using "double" timestamps here. Precision should be good + * We are always using "double" timestamps here. Precision should be good * enough. */ orgdbl[0] = ((double) origentry->lower); diff --git a/contrib/btree_gist/btree_utils_var.c b/contrib/btree_gist/btree_utils_var.c index b7dd060a944ba..c4340223c0f28 100644 --- a/contrib/btree_gist/btree_utils_var.c +++ b/contrib/btree_gist/btree_utils_var.c @@ -51,7 +51,7 @@ gbt_var_decompress(PG_FUNCTION_ARGS) PG_RETURN_POINTER(entry); } -/* Returns a better readable representaion of variable key ( sets pointer ) */ +/* Returns a better readable representation of variable key ( sets pointer ) */ GBT_VARKEY_R gbt_var_key_readable(const GBT_VARKEY *k) { diff --git a/contrib/chkpass/chkpass--1.0.sql b/contrib/chkpass/chkpass--1.0.sql index d1fbedc4468a1..406a61924cc46 100644 --- a/contrib/chkpass/chkpass--1.0.sql +++ b/contrib/chkpass/chkpass--1.0.sql @@ -10,12 +10,15 @@ CREATE FUNCTION chkpass_in(cstring) RETURNS chkpass AS 'MODULE_PATHNAME' - LANGUAGE C STRICT; + LANGUAGE C STRICT VOLATILE; +-- Note: chkpass_in actually is volatile, because of its use of random(). +-- In hindsight that was a bad idea, but there's no way to change it without +-- breaking some usage patterns. CREATE FUNCTION chkpass_out(chkpass) RETURNS cstring AS 'MODULE_PATHNAME' - LANGUAGE C STRICT; + LANGUAGE C STRICT IMMUTABLE; CREATE TYPE chkpass ( internallength = 16, diff --git a/contrib/chkpass/chkpass--unpackaged--1.0.sql b/contrib/chkpass/chkpass--unpackaged--1.0.sql index 7bbfb142a6a87..8bdecddfa5be8 100644 --- a/contrib/chkpass/chkpass--unpackaged--1.0.sql +++ b/contrib/chkpass/chkpass--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/chkpass/chkpass--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION chkpass" to load this file. \quit +\echo Use "CREATE EXTENSION chkpass FROM unpackaged" to load this file. \quit ALTER EXTENSION chkpass ADD type chkpass; ALTER EXTENSION chkpass ADD function chkpass_in(cstring); diff --git a/contrib/citext/Makefile b/contrib/citext/Makefile index 65942528dd030..e2ca7bb32c895 100644 --- a/contrib/citext/Makefile +++ b/contrib/citext/Makefile @@ -3,7 +3,9 @@ MODULES = citext EXTENSION = citext -DATA = citext--1.0.sql citext--unpackaged--1.0.sql +DATA = citext--1.0.sql citext--1.1.sql citext--1.0--1.1.sql \ + citext--1.1--1.0.sql citext--unpackaged--1.0.sql +PGFILEDESC = "citext - case-insensitive character string data type" REGRESS = citext diff --git a/contrib/citext/citext--1.0--1.1.sql b/contrib/citext/citext--1.0--1.1.sql new file mode 100644 index 0000000000000..e06627e0258cc --- /dev/null +++ b/contrib/citext/citext--1.0--1.1.sql @@ -0,0 +1,21 @@ +/* contrib/citext/citext--1.0--1.1.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION citext UPDATE TO '1.1'" to load this file. \quit + +/* First we have to remove them from the extension */ +ALTER EXTENSION citext DROP FUNCTION regexp_matches( citext, citext ); +ALTER EXTENSION citext DROP FUNCTION regexp_matches( citext, citext, text ); + +/* Then we can drop them */ +DROP FUNCTION regexp_matches( citext, citext ); +DROP FUNCTION regexp_matches( citext, citext, text ); + +/* Now redefine */ +CREATE FUNCTION regexp_matches( citext, citext ) RETURNS SETOF TEXT[] AS $$ + SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, 'i' ); +$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 1; + +CREATE FUNCTION regexp_matches( citext, citext, text ) RETURNS SETOF TEXT[] AS $$ + SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, CASE WHEN pg_catalog.strpos($3, 'c') = 0 THEN $3 || 'i' ELSE $3 END ); +$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 10; diff --git a/contrib/citext/citext--1.1--1.0.sql b/contrib/citext/citext--1.1--1.0.sql new file mode 100644 index 0000000000000..81f243c8421d8 --- /dev/null +++ b/contrib/citext/citext--1.1--1.0.sql @@ -0,0 +1,21 @@ +/* contrib/citext/citext--1.1--1.0.sql */ + +-- complain if script is sourced in psql, rather than via ALTER EXTENSION +\echo Use "ALTER EXTENSION citext UPDATE TO '1.0'" to load this file. \quit + +/* First we have to remove them from the extension */ +ALTER EXTENSION citext DROP FUNCTION regexp_matches( citext, citext ); +ALTER EXTENSION citext DROP FUNCTION regexp_matches( citext, citext, text ); + +/* Then we can drop them */ +DROP FUNCTION regexp_matches( citext, citext ); +DROP FUNCTION regexp_matches( citext, citext, text ); + +/* Now redefine */ +CREATE FUNCTION regexp_matches( citext, citext ) RETURNS TEXT[] AS $$ + SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, 'i' ); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION regexp_matches( citext, citext, text ) RETURNS TEXT[] AS $$ + SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, CASE WHEN pg_catalog.strpos($3, 'c') = 0 THEN $3 || 'i' ELSE $3 END ); +$$ LANGUAGE SQL IMMUTABLE STRICT; diff --git a/contrib/citext/citext--1.1.sql b/contrib/citext/citext--1.1.sql new file mode 100644 index 0000000000000..9ea7c64709a10 --- /dev/null +++ b/contrib/citext/citext--1.1.sql @@ -0,0 +1,489 @@ +/* contrib/citext/citext--1.1.sql */ + +-- complain if script is sourced in psql, rather than via CREATE EXTENSION +\echo Use "CREATE EXTENSION citext" to load this file. \quit + +-- +-- PostgreSQL code for CITEXT. +-- +-- Most I/O functions, and a few others, piggyback on the "text" type +-- functions via the implicit cast to text. +-- + +-- +-- Shell type to keep things a bit quieter. +-- + +CREATE TYPE citext; + +-- +-- Input and output functions. +-- +CREATE FUNCTION citextin(cstring) +RETURNS citext +AS 'textin' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION citextout(citext) +RETURNS cstring +AS 'textout' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION citextrecv(internal) +RETURNS citext +AS 'textrecv' +LANGUAGE internal STABLE STRICT; + +CREATE FUNCTION citextsend(citext) +RETURNS bytea +AS 'textsend' +LANGUAGE internal STABLE STRICT; + +-- +-- The type itself. +-- + +CREATE TYPE citext ( + INPUT = citextin, + OUTPUT = citextout, + RECEIVE = citextrecv, + SEND = citextsend, + INTERNALLENGTH = VARIABLE, + STORAGE = extended, + -- make it a non-preferred member of string type category + CATEGORY = 'S', + PREFERRED = false, + COLLATABLE = true +); + +-- +-- Type casting functions for those situations where the I/O casts don't +-- automatically kick in. +-- + +CREATE FUNCTION citext(bpchar) +RETURNS citext +AS 'rtrim1' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION citext(boolean) +RETURNS citext +AS 'booltext' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION citext(inet) +RETURNS citext +AS 'network_show' +LANGUAGE internal IMMUTABLE STRICT; + +-- +-- Implicit and assignment type casts. +-- + +CREATE CAST (citext AS text) WITHOUT FUNCTION AS IMPLICIT; +CREATE CAST (citext AS varchar) WITHOUT FUNCTION AS IMPLICIT; +CREATE CAST (citext AS bpchar) WITHOUT FUNCTION AS ASSIGNMENT; +CREATE CAST (text AS citext) WITHOUT FUNCTION AS ASSIGNMENT; +CREATE CAST (varchar AS citext) WITHOUT FUNCTION AS ASSIGNMENT; +CREATE CAST (bpchar AS citext) WITH FUNCTION citext(bpchar) AS ASSIGNMENT; +CREATE CAST (boolean AS citext) WITH FUNCTION citext(boolean) AS ASSIGNMENT; +CREATE CAST (inet AS citext) WITH FUNCTION citext(inet) AS ASSIGNMENT; + +-- +-- Operator Functions. +-- + +CREATE FUNCTION citext_eq( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION citext_ne( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION citext_lt( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION citext_le( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION citext_gt( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION citext_ge( citext, citext ) +RETURNS bool +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +-- +-- Operators. +-- + +CREATE OPERATOR = ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + COMMUTATOR = =, + NEGATOR = <>, + PROCEDURE = citext_eq, + RESTRICT = eqsel, + JOIN = eqjoinsel, + HASHES, + MERGES +); + +CREATE OPERATOR <> ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = =, + COMMUTATOR = <>, + PROCEDURE = citext_ne, + RESTRICT = neqsel, + JOIN = neqjoinsel +); + +CREATE OPERATOR < ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = >=, + COMMUTATOR = >, + PROCEDURE = citext_lt, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +CREATE OPERATOR <= ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = >, + COMMUTATOR = >=, + PROCEDURE = citext_le, + RESTRICT = scalarltsel, + JOIN = scalarltjoinsel +); + +CREATE OPERATOR >= ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = <, + COMMUTATOR = <=, + PROCEDURE = citext_ge, + RESTRICT = scalargtsel, + JOIN = scalargtjoinsel +); + +CREATE OPERATOR > ( + LEFTARG = CITEXT, + RIGHTARG = CITEXT, + NEGATOR = <=, + COMMUTATOR = <, + PROCEDURE = citext_gt, + RESTRICT = scalargtsel, + JOIN = scalargtjoinsel +); + +-- +-- Support functions for indexing. +-- + +CREATE FUNCTION citext_cmp(citext, citext) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +CREATE FUNCTION citext_hash(citext) +RETURNS int4 +AS 'MODULE_PATHNAME' +LANGUAGE C STRICT IMMUTABLE; + +-- +-- The btree indexing operator class. +-- + +CREATE OPERATOR CLASS citext_ops +DEFAULT FOR TYPE CITEXT USING btree AS + OPERATOR 1 < (citext, citext), + OPERATOR 2 <= (citext, citext), + OPERATOR 3 = (citext, citext), + OPERATOR 4 >= (citext, citext), + OPERATOR 5 > (citext, citext), + FUNCTION 1 citext_cmp(citext, citext); + +-- +-- The hash indexing operator class. +-- + +CREATE OPERATOR CLASS citext_ops +DEFAULT FOR TYPE citext USING hash AS + OPERATOR 1 = (citext, citext), + FUNCTION 1 citext_hash(citext); + +-- +-- Aggregates. +-- + +CREATE FUNCTION citext_smaller(citext, citext) +RETURNS citext +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE FUNCTION citext_larger(citext, citext) +RETURNS citext +AS 'MODULE_PATHNAME' +LANGUAGE C IMMUTABLE STRICT; + +CREATE AGGREGATE min(citext) ( + SFUNC = citext_smaller, + STYPE = citext, + SORTOP = < +); + +CREATE AGGREGATE max(citext) ( + SFUNC = citext_larger, + STYPE = citext, + SORTOP = > +); + +-- +-- CITEXT pattern matching. +-- + +CREATE FUNCTION texticlike(citext, citext) +RETURNS bool AS 'texticlike' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION texticnlike(citext, citext) +RETURNS bool AS 'texticnlike' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION texticregexeq(citext, citext) +RETURNS bool AS 'texticregexeq' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION texticregexne(citext, citext) +RETURNS bool AS 'texticregexne' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE OPERATOR ~ ( + PROCEDURE = texticregexeq, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = !~, + RESTRICT = icregexeqsel, + JOIN = icregexeqjoinsel +); + +CREATE OPERATOR ~* ( + PROCEDURE = texticregexeq, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = !~*, + RESTRICT = icregexeqsel, + JOIN = icregexeqjoinsel +); + +CREATE OPERATOR !~ ( + PROCEDURE = texticregexne, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = ~, + RESTRICT = icregexnesel, + JOIN = icregexnejoinsel +); + +CREATE OPERATOR !~* ( + PROCEDURE = texticregexne, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = ~*, + RESTRICT = icregexnesel, + JOIN = icregexnejoinsel +); + +CREATE OPERATOR ~~ ( + PROCEDURE = texticlike, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = !~~, + RESTRICT = iclikesel, + JOIN = iclikejoinsel +); + +CREATE OPERATOR ~~* ( + PROCEDURE = texticlike, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = !~~*, + RESTRICT = iclikesel, + JOIN = iclikejoinsel +); + +CREATE OPERATOR !~~ ( + PROCEDURE = texticnlike, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = ~~, + RESTRICT = icnlikesel, + JOIN = icnlikejoinsel +); + +CREATE OPERATOR !~~* ( + PROCEDURE = texticnlike, + LEFTARG = citext, + RIGHTARG = citext, + NEGATOR = ~~*, + RESTRICT = icnlikesel, + JOIN = icnlikejoinsel +); + +-- +-- Matching citext to text. +-- + +CREATE FUNCTION texticlike(citext, text) +RETURNS bool AS 'texticlike' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION texticnlike(citext, text) +RETURNS bool AS 'texticnlike' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION texticregexeq(citext, text) +RETURNS bool AS 'texticregexeq' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE FUNCTION texticregexne(citext, text) +RETURNS bool AS 'texticregexne' +LANGUAGE internal IMMUTABLE STRICT; + +CREATE OPERATOR ~ ( + PROCEDURE = texticregexeq, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = !~, + RESTRICT = icregexeqsel, + JOIN = icregexeqjoinsel +); + +CREATE OPERATOR ~* ( + PROCEDURE = texticregexeq, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = !~*, + RESTRICT = icregexeqsel, + JOIN = icregexeqjoinsel +); + +CREATE OPERATOR !~ ( + PROCEDURE = texticregexne, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = ~, + RESTRICT = icregexnesel, + JOIN = icregexnejoinsel +); + +CREATE OPERATOR !~* ( + PROCEDURE = texticregexne, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = ~*, + RESTRICT = icregexnesel, + JOIN = icregexnejoinsel +); + +CREATE OPERATOR ~~ ( + PROCEDURE = texticlike, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = !~~, + RESTRICT = iclikesel, + JOIN = iclikejoinsel +); + +CREATE OPERATOR ~~* ( + PROCEDURE = texticlike, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = !~~*, + RESTRICT = iclikesel, + JOIN = iclikejoinsel +); + +CREATE OPERATOR !~~ ( + PROCEDURE = texticnlike, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = ~~, + RESTRICT = icnlikesel, + JOIN = icnlikejoinsel +); + +CREATE OPERATOR !~~* ( + PROCEDURE = texticnlike, + LEFTARG = citext, + RIGHTARG = text, + NEGATOR = ~~*, + RESTRICT = icnlikesel, + JOIN = icnlikejoinsel +); + +-- +-- Matching citext in string comparison functions. +-- XXX TODO Ideally these would be implemented in C. +-- + +CREATE FUNCTION regexp_matches( citext, citext ) RETURNS SETOF TEXT[] AS $$ + SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, 'i' ); +$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 1; + +CREATE FUNCTION regexp_matches( citext, citext, text ) RETURNS SETOF TEXT[] AS $$ + SELECT pg_catalog.regexp_matches( $1::pg_catalog.text, $2::pg_catalog.text, CASE WHEN pg_catalog.strpos($3, 'c') = 0 THEN $3 || 'i' ELSE $3 END ); +$$ LANGUAGE SQL IMMUTABLE STRICT ROWS 10; + +CREATE FUNCTION regexp_replace( citext, citext, text ) returns TEXT AS $$ + SELECT pg_catalog.regexp_replace( $1::pg_catalog.text, $2::pg_catalog.text, $3, 'i'); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION regexp_replace( citext, citext, text, text ) returns TEXT AS $$ + SELECT pg_catalog.regexp_replace( $1::pg_catalog.text, $2::pg_catalog.text, $3, CASE WHEN pg_catalog.strpos($4, 'c') = 0 THEN $4 || 'i' ELSE $4 END); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION regexp_split_to_array( citext, citext ) RETURNS TEXT[] AS $$ + SELECT pg_catalog.regexp_split_to_array( $1::pg_catalog.text, $2::pg_catalog.text, 'i' ); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION regexp_split_to_array( citext, citext, text ) RETURNS TEXT[] AS $$ + SELECT pg_catalog.regexp_split_to_array( $1::pg_catalog.text, $2::pg_catalog.text, CASE WHEN pg_catalog.strpos($3, 'c') = 0 THEN $3 || 'i' ELSE $3 END ); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION regexp_split_to_table( citext, citext ) RETURNS SETOF TEXT AS $$ + SELECT pg_catalog.regexp_split_to_table( $1::pg_catalog.text, $2::pg_catalog.text, 'i' ); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION regexp_split_to_table( citext, citext, text ) RETURNS SETOF TEXT AS $$ + SELECT pg_catalog.regexp_split_to_table( $1::pg_catalog.text, $2::pg_catalog.text, CASE WHEN pg_catalog.strpos($3, 'c') = 0 THEN $3 || 'i' ELSE $3 END ); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION strpos( citext, citext ) RETURNS INT AS $$ + SELECT pg_catalog.strpos( pg_catalog.lower( $1::pg_catalog.text ), pg_catalog.lower( $2::pg_catalog.text ) ); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION replace( citext, citext, citext ) RETURNS TEXT AS $$ + SELECT pg_catalog.regexp_replace( $1::pg_catalog.text, pg_catalog.regexp_replace($2::pg_catalog.text, '([^a-zA-Z_0-9])', E'\\\\\\1', 'g'), $3::pg_catalog.text, 'gi' ); +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION split_part( citext, citext, int ) RETURNS TEXT AS $$ + SELECT (pg_catalog.regexp_split_to_array( $1::pg_catalog.text, pg_catalog.regexp_replace($2::pg_catalog.text, '([^a-zA-Z_0-9])', E'\\\\\\1', 'g'), 'i'))[$3]; +$$ LANGUAGE SQL IMMUTABLE STRICT; + +CREATE FUNCTION translate( citext, citext, text ) RETURNS TEXT AS $$ + SELECT pg_catalog.translate( pg_catalog.translate( $1::pg_catalog.text, pg_catalog.lower($2::pg_catalog.text), $3), pg_catalog.upper($2::pg_catalog.text), $3); +$$ LANGUAGE SQL IMMUTABLE STRICT; diff --git a/contrib/citext/citext--unpackaged--1.0.sql b/contrib/citext/citext--unpackaged--1.0.sql index 102743c5281d5..ef6d6b0639601 100644 --- a/contrib/citext/citext--unpackaged--1.0.sql +++ b/contrib/citext/citext--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/citext/citext--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION citext" to load this file. \quit +\echo Use "CREATE EXTENSION citext FROM unpackaged" to load this file. \quit ALTER EXTENSION citext ADD type citext; ALTER EXTENSION citext ADD function citextin(cstring); @@ -105,7 +105,12 @@ UPDATE pg_catalog.pg_attribute SET attcollation = 100 FROM typeoids WHERE atttypid = typeoids.typoid; -UPDATE pg_catalog.pg_index SET indcollation[0] = 100 +-- Updating the index indcollations is particularly tedious, but since we +-- don't currently allow SQL assignment to individual elements of oidvectors, +-- there's little choice. + +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, '^0', '100')::pg_catalog.oidvector WHERE indclass[0] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -115,7 +120,8 @@ WHERE indclass[0] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[1] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[1] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -125,7 +131,8 @@ WHERE indclass[1] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[2] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[2] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -135,7 +142,8 @@ WHERE indclass[2] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[3] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[3] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -145,7 +153,8 @@ WHERE indclass[3] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[4] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[4] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -155,7 +164,8 @@ WHERE indclass[4] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[5] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[5] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -165,7 +175,8 @@ WHERE indclass[5] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[6] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[6] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION @@ -175,7 +186,8 @@ WHERE indclass[6] IN ( WHERE opcintype = typeoids.typoid ); -UPDATE pg_catalog.pg_index SET indcollation[7] = 100 +UPDATE pg_catalog.pg_index SET indcollation = + pg_catalog.regexp_replace(indcollation::pg_catalog.text, E'^(\\d+ \\d+ \\d+ \\d+ \\d+ \\d+ \\d+) 0', E'\\1 100')::pg_catalog.oidvector WHERE indclass[7] IN ( WITH RECURSIVE typeoids(typoid) AS ( SELECT 'citext'::pg_catalog.regtype UNION diff --git a/contrib/cube/cube--unpackaged--1.0.sql b/contrib/cube/cube--unpackaged--1.0.sql index 6859682786164..1065512a290f0 100644 --- a/contrib/cube/cube--unpackaged--1.0.sql +++ b/contrib/cube/cube--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/cube/cube--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION cube" to load this file. \quit +\echo Use "CREATE EXTENSION cube FROM unpackaged" to load this file. \quit ALTER EXTENSION cube ADD type cube; ALTER EXTENSION cube ADD function cube_in(cstring); diff --git a/contrib/cube/cube.c b/contrib/cube/cube.c index b0305ef431dca..f8c80eded2b6f 100644 --- a/contrib/cube/cube.c +++ b/contrib/cube/cube.c @@ -819,7 +819,7 @@ cube_inter(PG_FUNCTION_ARGS) Max(LL_COORD(b, i), UR_COORD(b, i)) ); } - /* continue on the higher dimemsions only present in 'a' */ + /* continue on the higher dimensions only present in 'a' */ for (; i < DIM(a); i++) { result->x[i] = Max(0, diff --git a/contrib/dblink/Makefile b/contrib/dblink/Makefile index 32314a0abb48e..e833b9203aca3 100644 --- a/contrib/dblink/Makefile +++ b/contrib/dblink/Makefile @@ -4,12 +4,14 @@ MODULE_big = dblink OBJS = dblink.o PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) -SHLIB_PREREQS = submake-libpq EXTENSION = dblink DATA = dblink--1.1.sql dblink--1.0--1.1.sql dblink--unpackaged--1.0.sql -REGRESS = dblink +REGRESS = paths dblink +REGRESS_OPTS = --dlpath=$(top_builddir)/src/test/regress \ + --create-role=dblink_regression_test +EXTRA_CLEAN = sql/paths.sql expected/paths.out # the db name is hard-coded in the tests override USE_MODULE_DB = @@ -19,6 +21,7 @@ PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +SHLIB_PREREQS = submake-libpq subdir = contrib/dblink top_builddir = ../.. include $(top_builddir)/src/Makefile.global diff --git a/contrib/dblink/dblink--unpackaged--1.0.sql b/contrib/dblink/dblink--unpackaged--1.0.sql index 29f5bed0c13a2..f3923b5b350be 100644 --- a/contrib/dblink/dblink--unpackaged--1.0.sql +++ b/contrib/dblink/dblink--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/dblink/dblink--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION dblink" to load this file. \quit +\echo Use "CREATE EXTENSION dblink FROM unpackaged" to load this file. \quit ALTER EXTENSION dblink ADD function dblink_connect(text); ALTER EXTENSION dblink ADD function dblink_connect(text,text); diff --git a/contrib/dblink/dblink.c b/contrib/dblink/dblink.c index a81853fa91181..009b877e47161 100644 --- a/contrib/dblink/dblink.c +++ b/contrib/dblink/dblink.c @@ -94,8 +94,8 @@ static void materializeQueryResult(FunctionCallInfo fcinfo, const char *conname, const char *sql, bool fail); -static PGresult *storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql); -static void storeRow(storeInfo *sinfo, PGresult *res, bool first); +static PGresult *storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql); +static void storeRow(volatile storeInfo *sinfo, PGresult *res, bool first); static remoteConn *getConnectionByName(const char *name); static HTAB *createConnHash(void); static void createNewConnection(const char *name, remoteConn *rconn); @@ -966,17 +966,24 @@ materializeQueryResult(FunctionCallInfo fcinfo, { ReturnSetInfo *rsinfo = (ReturnSetInfo *) fcinfo->resultinfo; PGresult *volatile res = NULL; - storeInfo sinfo; + volatile storeInfo sinfo; /* prepTuplestoreResult must have been called previously */ Assert(rsinfo->returnMode == SFRM_Materialize); /* initialize storeInfo to empty */ - memset(&sinfo, 0, sizeof(sinfo)); + memset((void *) &sinfo, 0, sizeof(sinfo)); sinfo.fcinfo = fcinfo; PG_TRY(); { + /* Create short-lived memory context for data conversions */ + sinfo.tmpcontext = AllocSetContextCreate(CurrentMemoryContext, + "dblink temporary context", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); + /* execute query, collecting any tuples into the tuplestore */ res = storeQueryResult(&sinfo, conn, sql); @@ -1041,6 +1048,12 @@ materializeQueryResult(FunctionCallInfo fcinfo, PQclear(res); res = NULL; } + + /* clean up data conversion short-lived memory context */ + if (sinfo.tmpcontext != NULL) + MemoryContextDelete(sinfo.tmpcontext); + sinfo.tmpcontext = NULL; + PQclear(sinfo.last_res); sinfo.last_res = NULL; PQclear(sinfo.cur_res); @@ -1064,7 +1077,7 @@ materializeQueryResult(FunctionCallInfo fcinfo, * Execute query, and send any result rows to sinfo->tuplestore. */ static PGresult * -storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql) +storeQueryResult(volatile storeInfo *sinfo, PGconn *conn, const char *sql) { bool first = true; int nestlevel = -1; @@ -1132,7 +1145,7 @@ storeQueryResult(storeInfo *sinfo, PGconn *conn, const char *sql) * (in this case the PGresult might contain either zero or one row). */ static void -storeRow(storeInfo *sinfo, PGresult *res, bool first) +storeRow(volatile storeInfo *sinfo, PGresult *res, bool first) { int nfields = PQnfields(res); HeapTuple tuple; @@ -1204,15 +1217,6 @@ storeRow(storeInfo *sinfo, PGresult *res, bool first) if (sinfo->cstrs) pfree(sinfo->cstrs); sinfo->cstrs = (char **) palloc(nfields * sizeof(char *)); - - /* Create short-lived memory context for data conversions */ - if (!sinfo->tmpcontext) - sinfo->tmpcontext = - AllocSetContextCreate(CurrentMemoryContext, - "dblink temporary context", - ALLOCSET_DEFAULT_MINSIZE, - ALLOCSET_DEFAULT_INITSIZE, - ALLOCSET_DEFAULT_MAXSIZE); } /* Should have a single-row result if we get here */ diff --git a/contrib/dblink/expected/.gitignore b/contrib/dblink/expected/.gitignore new file mode 100644 index 0000000000000..d9c7942c6467c --- /dev/null +++ b/contrib/dblink/expected/.gitignore @@ -0,0 +1 @@ +/paths.out diff --git a/contrib/dblink/expected/dblink.out b/contrib/dblink/expected/dblink.out index f237c43d3d421..87eb142bd30b3 100644 --- a/contrib/dblink/expected/dblink.out +++ b/contrib/dblink/expected/dblink.out @@ -103,6 +103,33 @@ SELECT * FROM dblink('SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; ERROR: connection not available +-- The first-level connection's backend will crash on exit given OpenLDAP +-- [2.4.24, 2.4.31]. We won't see evidence of any crash until the victim +-- process terminates and the postmaster responds. If process termination +-- entails writing a core dump, that can take awhile. Wait for the process to +-- vanish. At that point, the postmaster has called waitpid() on the crashed +-- process, and it will accept no new connections until it has reinitialized +-- the cluster. (We can't exploit pg_stat_activity, because the crash happens +-- after the backend updates shared memory to reflect its impending exit.) +DO $pl$ +DECLARE + detail text; +BEGIN + PERFORM wait_pid(crash_pid) + FROM dblink('dbname=contrib_regression', $$ + SELECT pg_backend_pid() FROM dblink( + 'service=test_ldap dbname=contrib_regression', + -- This string concatenation is a hack to shoehorn a + -- set_pgservicefile call into the SQL statement. + 'SELECT 1' || set_pgservicefile('pg_service.conf') + ) t(c int) + $$) AS t(crash_pid int); +EXCEPTION WHEN OTHERS THEN + GET STACKED DIAGNOSTICS detail = PG_EXCEPTION_DETAIL; + -- Expected error in a non-LDAP build. + IF NOT detail LIKE 'syntax error in service file%' THEN RAISE; END IF; +END +$pl$; -- create a persistent connection SELECT dblink_connect('dbname=contrib_regression'); dblink_connect @@ -782,7 +809,6 @@ SELECT dblink_disconnect('dtest1'); (1 row) -- test foreign data wrapper functionality -CREATE USER dblink_regression_test; CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw OPTIONS (dbname 'contrib_regression'); CREATE USER MAPPING FOR public SERVER fdtest @@ -824,7 +850,6 @@ SELECT * FROM dblink('myconn','SELECT * FROM foo') AS t(a int, b text, c text[]) \c - :ORIGINAL_USER REVOKE USAGE ON FOREIGN SERVER fdtest FROM dblink_regression_test; REVOKE EXECUTE ON FUNCTION dblink_connect_u(text, text) FROM dblink_regression_test; -DROP USER dblink_regression_test; DROP USER MAPPING FOR public SERVER fdtest; DROP SERVER fdtest; -- test asynchronous notifications diff --git a/contrib/dblink/input/paths.source b/contrib/dblink/input/paths.source new file mode 100644 index 0000000000000..aab3a3b2bfb42 --- /dev/null +++ b/contrib/dblink/input/paths.source @@ -0,0 +1,14 @@ +-- Initialization that requires path substitution. + +CREATE FUNCTION putenv(text) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@', 'regress_putenv' + LANGUAGE C STRICT; + +CREATE FUNCTION wait_pid(int) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@' + LANGUAGE C STRICT; + +CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL + AS $$SELECT putenv('PGSERVICEFILE=@abs_srcdir@/' || $1)$$; diff --git a/contrib/dblink/output/paths.source b/contrib/dblink/output/paths.source new file mode 100644 index 0000000000000..e1097f0996fea --- /dev/null +++ b/contrib/dblink/output/paths.source @@ -0,0 +1,11 @@ +-- Initialization that requires path substitution. +CREATE FUNCTION putenv(text) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@', 'regress_putenv' + LANGUAGE C STRICT; +CREATE FUNCTION wait_pid(int) + RETURNS void + AS '@libdir@/regress@DLSUFFIX@' + LANGUAGE C STRICT; +CREATE FUNCTION set_pgservicefile(text) RETURNS void LANGUAGE SQL + AS $$SELECT putenv('PGSERVICEFILE=@abs_srcdir@/' || $1)$$; diff --git a/contrib/dblink/pg_service.conf b/contrib/dblink/pg_service.conf new file mode 100644 index 0000000000000..92201f0ad460a --- /dev/null +++ b/contrib/dblink/pg_service.conf @@ -0,0 +1,7 @@ +# pg_service.conf for minimally exercising libpq use of LDAP. + +# Having failed to reach an LDAP server, libpq essentially ignores the +# "service=test_ldap" in its connection string. Contact the "discard" +# service; the test works whether or not it answers. +[test_ldap] +ldap://127.0.0.1:9/base?attribute?one?filter diff --git a/contrib/dblink/sql/.gitignore b/contrib/dblink/sql/.gitignore new file mode 100644 index 0000000000000..d17507846d03c --- /dev/null +++ b/contrib/dblink/sql/.gitignore @@ -0,0 +1 @@ +/paths.sql diff --git a/contrib/dblink/sql/dblink.sql b/contrib/dblink/sql/dblink.sql index 2a107601c5565..5305d5a8f5b0b 100644 --- a/contrib/dblink/sql/dblink.sql +++ b/contrib/dblink/sql/dblink.sql @@ -65,6 +65,34 @@ SELECT * FROM dblink('SELECT * FROM foo') AS t(a int, b text, c text[]) WHERE t.a > 7; +-- The first-level connection's backend will crash on exit given OpenLDAP +-- [2.4.24, 2.4.31]. We won't see evidence of any crash until the victim +-- process terminates and the postmaster responds. If process termination +-- entails writing a core dump, that can take awhile. Wait for the process to +-- vanish. At that point, the postmaster has called waitpid() on the crashed +-- process, and it will accept no new connections until it has reinitialized +-- the cluster. (We can't exploit pg_stat_activity, because the crash happens +-- after the backend updates shared memory to reflect its impending exit.) +DO $pl$ +DECLARE + detail text; +BEGIN + PERFORM wait_pid(crash_pid) + FROM dblink('dbname=contrib_regression', $$ + SELECT pg_backend_pid() FROM dblink( + 'service=test_ldap dbname=contrib_regression', + -- This string concatenation is a hack to shoehorn a + -- set_pgservicefile call into the SQL statement. + 'SELECT 1' || set_pgservicefile('pg_service.conf') + ) t(c int) + $$) AS t(crash_pid int); +EXCEPTION WHEN OTHERS THEN + GET STACKED DIAGNOSTICS detail = PG_EXCEPTION_DETAIL; + -- Expected error in a non-LDAP build. + IF NOT detail LIKE 'syntax error in service file%' THEN RAISE; END IF; +END +$pl$; + -- create a persistent connection SELECT dblink_connect('dbname=contrib_regression'); @@ -359,7 +387,6 @@ SELECT dblink_error_message('dtest1'); SELECT dblink_disconnect('dtest1'); -- test foreign data wrapper functionality -CREATE USER dblink_regression_test; CREATE SERVER fdtest FOREIGN DATA WRAPPER dblink_fdw OPTIONS (dbname 'contrib_regression'); CREATE USER MAPPING FOR public SERVER fdtest @@ -380,7 +407,6 @@ SELECT * FROM dblink('myconn','SELECT * FROM foo') AS t(a int, b text, c text[]) \c - :ORIGINAL_USER REVOKE USAGE ON FOREIGN SERVER fdtest FROM dblink_regression_test; REVOKE EXECUTE ON FUNCTION dblink_connect_u(text, text) FROM dblink_regression_test; -DROP USER dblink_regression_test; DROP USER MAPPING FOR public SERVER fdtest; DROP SERVER fdtest; diff --git a/contrib/dict_int/dict_int--unpackaged--1.0.sql b/contrib/dict_int/dict_int--unpackaged--1.0.sql index ef59b046ee67f..1b2d862e1f63f 100644 --- a/contrib/dict_int/dict_int--unpackaged--1.0.sql +++ b/contrib/dict_int/dict_int--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/dict_int/dict_int--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION dict_int" to load this file. \quit +\echo Use "CREATE EXTENSION dict_int FROM unpackaged" to load this file. \quit ALTER EXTENSION dict_int ADD function dintdict_init(internal); ALTER EXTENSION dict_int ADD function dintdict_lexize(internal,internal,internal,internal); diff --git a/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql b/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql index 1d193f7981a42..7533da1902244 100644 --- a/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql +++ b/contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/dict_xsyn/dict_xsyn--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION dict_xsyn" to load this file. \quit +\echo Use "CREATE EXTENSION dict_xsyn FROM unpackaged" to load this file. \quit ALTER EXTENSION dict_xsyn ADD function dxsyn_init(internal); ALTER EXTENSION dict_xsyn ADD function dxsyn_lexize(internal,internal,internal,internal); diff --git a/contrib/earthdistance/earthdistance--unpackaged--1.0.sql b/contrib/earthdistance/earthdistance--unpackaged--1.0.sql index 362e0ac1071c3..ae787f68775e7 100644 --- a/contrib/earthdistance/earthdistance--unpackaged--1.0.sql +++ b/contrib/earthdistance/earthdistance--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/earthdistance/earthdistance--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION earthdistance" to load this file. \quit +\echo Use "CREATE EXTENSION earthdistance FROM unpackaged" to load this file. \quit ALTER EXTENSION earthdistance ADD function earth(); ALTER EXTENSION earthdistance ADD type earth; diff --git a/contrib/file_fdw/output/file_fdw.source b/contrib/file_fdw/output/file_fdw.source index bc183b88744d9..3af3cf2afed5c 100644 --- a/contrib/file_fdw/output/file_fdw.source +++ b/contrib/file_fdw/output/file_fdw.source @@ -265,9 +265,9 @@ RESET ROLE; DROP EXTENSION file_fdw CASCADE; NOTICE: drop cascades to 8 other objects DETAIL: drop cascades to server file_server -drop cascades to user mapping for file_fdw_user -drop cascades to user mapping for file_fdw_superuser -drop cascades to user mapping for no_priv_user +drop cascades to user mapping for file_fdw_user on server file_server +drop cascades to user mapping for file_fdw_superuser on server file_server +drop cascades to user mapping for no_priv_user on server file_server drop cascades to foreign table agg_text drop cascades to foreign table agg_csv drop cascades to foreign table agg_bad diff --git a/contrib/fuzzystrmatch/dmetaphone.c b/contrib/fuzzystrmatch/dmetaphone.c index 5001288bb6424..dcbc7782d4567 100644 --- a/contrib/fuzzystrmatch/dmetaphone.c +++ b/contrib/fuzzystrmatch/dmetaphone.c @@ -359,7 +359,10 @@ StringAt(metastring *s, int start, int length,...) { test = va_arg(ap, char *); if (*test && (strncmp(pos, test, length) == 0)) + { + va_end(ap); return 1; + } } while (strcmp(test, "") != 0); diff --git a/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql b/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql index b9a805a4fe517..14491a9fa72bc 100644 --- a/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql +++ b/contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/fuzzystrmatch/fuzzystrmatch--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION fuzzystrmatch" to load this file. \quit +\echo Use "CREATE EXTENSION fuzzystrmatch FROM unpackaged" to load this file. \quit ALTER EXTENSION fuzzystrmatch ADD function levenshtein(text,text); ALTER EXTENSION fuzzystrmatch ADD function levenshtein(text,text,integer,integer,integer); diff --git a/contrib/hstore/expected/hstore.out b/contrib/hstore/expected/hstore.out index 9749e45c14392..6773a2b72f3d8 100644 --- a/contrib/hstore/expected/hstore.out +++ b/contrib/hstore/expected/hstore.out @@ -1466,10 +1466,10 @@ select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f= {"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"} (1 row) -select hstore_to_json_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'); - hstore_to_json_loose ------------------------------------------------------------------------------------------- - {"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4, "a key": 1} +select hstore_to_json_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4, h=> "2016-01-01"'); + hstore_to_json_loose +------------------------------------------------------------------------------------------------------------- + {"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4, "h": "2016-01-01", "a key": 1} (1 row) select hstore_to_jsonb('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'); @@ -1484,10 +1484,10 @@ select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f= {"b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4", "a key": "1"} (1 row) -select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'); - hstore_to_jsonb_loose ---------------------------------------------------------------------------------------- - {"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 23450, "a key": 1} +select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4, h=> "2016-01-01"'); + hstore_to_jsonb_loose +---------------------------------------------------------------------------------------------------------- + {"b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 23450, "h": "2016-01-01", "a key": 1} (1 row) create table test_json_agg (f1 text, f2 hstore); diff --git a/contrib/hstore/hstore--unpackaged--1.0.sql b/contrib/hstore/hstore--unpackaged--1.0.sql index b7e73f41232cd..19a78028051a4 100644 --- a/contrib/hstore/hstore--unpackaged--1.0.sql +++ b/contrib/hstore/hstore--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/hstore/hstore--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION hstore" to load this file. \quit +\echo Use "CREATE EXTENSION hstore FROM unpackaged" to load this file. \quit ALTER EXTENSION hstore ADD type hstore; ALTER EXTENSION hstore ADD function hstore_in(cstring); diff --git a/contrib/hstore/hstore_io.c b/contrib/hstore/hstore_io.c index 6ce3047215ddf..079f6627484e5 100644 --- a/contrib/hstore/hstore_io.c +++ b/contrib/hstore/hstore_io.c @@ -12,6 +12,7 @@ #include "libpq/pqformat.h" #include "utils/builtins.h" #include "utils/json.h" +#include "utils/jsonapi.h" #include "utils/jsonb.h" #include "utils/lsyscache.h" #include "utils/memutils.h" @@ -1240,7 +1241,6 @@ hstore_to_json_loose(PG_FUNCTION_ARGS) int count = HS_COUNT(in); char *base = STRPTR(in); HEntry *entries = ARRPTR(in); - bool is_number; StringInfoData tmp, dst; @@ -1267,48 +1267,9 @@ hstore_to_json_loose(PG_FUNCTION_ARGS) appendStringInfoString(&dst, "false"); else { - is_number = false; resetStringInfo(&tmp); appendBinaryStringInfo(&tmp, HS_VAL(entries, base, i), HS_VALLEN(entries, i)); - - /* - * don't treat something with a leading zero followed by another - * digit as numeric - could be a zip code or similar - */ - if (tmp.len > 0 && - !(tmp.data[0] == '0' && - isdigit((unsigned char) tmp.data[1])) && - strspn(tmp.data, "+-0123456789Ee.") == tmp.len) - { - /* - * might be a number. See if we can input it as a numeric - * value. Ignore any actual parsed value. - */ - char *endptr = "junk"; - long lval; - - lval = strtol(tmp.data, &endptr, 10); - (void) lval; - if (*endptr == '\0') - { - /* - * strol man page says this means the whole string is - * valid - */ - is_number = true; - } - else - { - /* not an int - try a double */ - double dval; - - dval = strtod(tmp.data, &endptr); - (void) dval; - if (*endptr == '\0') - is_number = true; - } - } - if (is_number) + if (IsValidJsonNumber(tmp.data, tmp.len)) appendBinaryStringInfo(&dst, tmp.data, tmp.len); else escape_json(&dst, tmp.data); @@ -1377,7 +1338,7 @@ hstore_to_jsonb(PG_FUNCTION_ARGS) JsonbParseState *state = NULL; JsonbValue *res; - res = pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); + (void) pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); for (i = 0; i < count; i++) { @@ -1388,7 +1349,7 @@ hstore_to_jsonb(PG_FUNCTION_ARGS) key.val.string.len = HS_KEYLEN(entries, i); key.val.string.val = HS_KEY(entries, base, i); - res = pushJsonbValue(&state, WJB_KEY, &key); + (void) pushJsonbValue(&state, WJB_KEY, &key); if (HS_VALISNULL(entries, i)) { @@ -1400,7 +1361,7 @@ hstore_to_jsonb(PG_FUNCTION_ARGS) val.val.string.len = HS_VALLEN(entries, i); val.val.string.val = HS_VAL(entries, base, i); } - res = pushJsonbValue(&state, WJB_VALUE, &val); + (void) pushJsonbValue(&state, WJB_VALUE, &val); } res = pushJsonbValue(&state, WJB_END_OBJECT, NULL); @@ -1424,7 +1385,7 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS) initStringInfo(&tmp); - res = pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); + (void) pushJsonbValue(&state, WJB_BEGIN_OBJECT, NULL); for (i = 0; i < count; i++) { @@ -1435,7 +1396,7 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS) key.val.string.len = HS_KEYLEN(entries, i); key.val.string.val = HS_KEY(entries, base, i); - res = pushJsonbValue(&state, WJB_KEY, &key); + (void) pushJsonbValue(&state, WJB_KEY, &key); if (HS_VALISNULL(entries, i)) { @@ -1510,7 +1471,7 @@ hstore_to_jsonb_loose(PG_FUNCTION_ARGS) val.val.string.val = HS_VAL(entries, base, i); } } - res = pushJsonbValue(&state, WJB_VALUE, &val); + (void) pushJsonbValue(&state, WJB_VALUE, &val); } res = pushJsonbValue(&state, WJB_END_OBJECT, NULL); diff --git a/contrib/hstore/sql/hstore.sql b/contrib/hstore/sql/hstore.sql index 5a9e9ee5ae8b2..48514789e647c 100644 --- a/contrib/hstore/sql/hstore.sql +++ b/contrib/hstore/sql/hstore.sql @@ -334,11 +334,11 @@ select count(*) from testhstore where h = 'pos=>98, line=>371, node=>CBA, indexe -- json and jsonb select hstore_to_json('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'); select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4' as json); -select hstore_to_json_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'); +select hstore_to_json_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4, h=> "2016-01-01"'); select hstore_to_jsonb('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'); select cast( hstore '"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4' as jsonb); -select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'); +select hstore_to_jsonb_loose('"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4, h=> "2016-01-01"'); create table test_json_agg (f1 text, f2 hstore); insert into test_json_agg values ('rec1','"a key" =>1, b => t, c => null, d=> 12345, e => 012345, f=> 1.234, g=> 2.345e+4'), diff --git a/contrib/intagg/intagg--unpackaged--1.0.sql b/contrib/intagg/intagg--unpackaged--1.0.sql index 6a6663d092c66..a0b13f3f69143 100644 --- a/contrib/intagg/intagg--unpackaged--1.0.sql +++ b/contrib/intagg/intagg--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/intagg/intagg--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION intagg" to load this file. \quit +\echo Use "CREATE EXTENSION intagg FROM unpackaged" to load this file. \quit ALTER EXTENSION intagg ADD function int_agg_state(internal,integer); ALTER EXTENSION intagg ADD function int_agg_final_array(internal); diff --git a/contrib/intarray/_int_bool.c b/contrib/intarray/_int_bool.c index c3c39d194bc89..5d9e676660fd9 100644 --- a/contrib/intarray/_int_bool.c +++ b/contrib/intarray/_int_bool.c @@ -564,6 +564,9 @@ typedef struct static void infix(INFIX *in, bool first) { + /* since this function recurses, it could be driven to stack overflow. */ + check_stack_depth(); + if (in->curpol->type == VAL) { RESIZEBUF(in, 11); diff --git a/contrib/intarray/_int_tool.c b/contrib/intarray/_int_tool.c index 511c7acb54e81..3c52912bbf450 100644 --- a/contrib/intarray/_int_tool.c +++ b/contrib/intarray/_int_tool.c @@ -184,40 +184,34 @@ rt__int_size(ArrayType *a, float *size) *size = (float) ARRNELEMS(a); } +/* qsort_arg comparison function for isort() */ +static int +isort_cmp(const void *a, const void *b, void *arg) +{ + int32 aval = *((const int32 *) a); + int32 bval = *((const int32 *) b); + + if (aval < bval) + return -1; + if (aval > bval) + return 1; + + /* + * Report if we have any duplicates. If there are equal keys, qsort must + * compare them at some point, else it wouldn't know whether one should go + * before or after the other. + */ + *((bool *) arg) = true; + return 0; +} + /* Sort the given data (len >= 2). Return true if any duplicates found */ bool isort(int32 *a, int len) { - int32 cur, - prev; - int32 *pcur, - *pprev, - *end; - bool r = FALSE; + bool r = false; - /* - * We use a simple insertion sort. While this is O(N^2) in the worst - * case, it's quite fast if the input is already sorted or nearly so. - * Also, for not-too-large inputs it's faster than more complex methods - * anyhow. - */ - end = a + len; - for (pcur = a + 1; pcur < end; pcur++) - { - cur = *pcur; - for (pprev = pcur - 1; pprev >= a; pprev--) - { - prev = *pprev; - if (prev <= cur) - { - if (prev == cur) - r = TRUE; - break; - } - pprev[1] = prev; - } - pprev[1] = cur; - } + qsort_arg(a, len, sizeof(int32), isort_cmp, (void *) &r); return r; } diff --git a/contrib/intarray/intarray--unpackaged--1.0.sql b/contrib/intarray/intarray--unpackaged--1.0.sql index 5de64bf0aba18..63814cef980ac 100644 --- a/contrib/intarray/intarray--unpackaged--1.0.sql +++ b/contrib/intarray/intarray--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/intarray/intarray--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION intarray" to load this file. \quit +\echo Use "CREATE EXTENSION intarray FROM unpackaged" to load this file. \quit ALTER EXTENSION intarray ADD type query_int; ALTER EXTENSION intarray ADD function bqarr_in(cstring); diff --git a/contrib/isn/.gitignore b/contrib/isn/.gitignore new file mode 100644 index 0000000000000..5dcb3ff972350 --- /dev/null +++ b/contrib/isn/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ diff --git a/contrib/isn/Makefile b/contrib/isn/Makefile index bd8f193e9383f..0caf729db35cd 100644 --- a/contrib/isn/Makefile +++ b/contrib/isn/Makefile @@ -5,6 +5,8 @@ MODULES = isn EXTENSION = isn DATA = isn--1.0.sql isn--unpackaged--1.0.sql +REGRESS = isn + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/isn/expected/isn.out b/contrib/isn/expected/isn.out new file mode 100644 index 0000000000000..140bc86656147 --- /dev/null +++ b/contrib/isn/expected/isn.out @@ -0,0 +1,222 @@ +-- +-- Test ISN extension +-- +CREATE EXTENSION isn; +-- +-- test valid conversions +-- +SELECT '9780123456786'::EAN13, -- old book + '9790123456785'::EAN13, -- music + '9791234567896'::EAN13, -- new book + '9771234567898'::EAN13, -- serial + '0123456789012'::EAN13, -- upc + '1234567890128'::EAN13; + ean13 | ean13 | ean13 | ean13 | ean13 | ean13 +-------------------+-------------------+-----------------+-------------------+-----------------+----------------- + 978-0-12-345678-6 | 979-0-1234-5678-5 | 979-123456789-6 | 977-1234-567-89-8 | 012-345678901-2 | 123-456789012-8 +(1 row) + +SELECT '9780123456786'::ISBN, + '123456789X'::ISBN, + '9780123456786'::ISBN13::ISBN, + '9780123456786'::EAN13::ISBN; + isbn | isbn | isbn | isbn +---------------+---------------+---------------+--------------- + 0-12-345678-9 | 1-234-56789-X | 0-12-345678-9 | 0-12-345678-9 +(1 row) + +SELECT -- new books, shown as ISBN13 even for ISBN... + '9791234567896'::ISBN, + '9791234567896'::ISBN13::ISBN, + '9791234567896'::EAN13::ISBN; + isbn | isbn | isbn +-----------------+-----------------+----------------- + 979-123456789-6 | 979-123456789-6 | 979-123456789-6 +(1 row) + +SELECT '9780123456786'::ISBN13, + '123456789X'::ISBN13, + '9791234567896'::ISBN13, + '9791234567896'::EAN13::ISBN13; + isbn13 | isbn13 | isbn13 | isbn13 +-------------------+-------------------+-----------------+----------------- + 978-0-12-345678-6 | 978-1-234-56789-7 | 979-123456789-6 | 979-123456789-6 +(1 row) + +SELECT '9790123456785'::ISMN, + '9790123456785'::EAN13::ISMN, + 'M123456785'::ISMN, + 'M-1234-5678-5'::ISMN; + ismn | ismn | ismn | ismn +---------------+---------------+---------------+--------------- + M-1234-5678-5 | M-1234-5678-5 | M-1234-5678-5 | M-1234-5678-5 +(1 row) + +SELECT '9790123456785'::ISMN13, + 'M123456785'::ISMN13, + 'M-1234-5678-5'::ISMN13; + ismn13 | ismn13 | ismn13 +-------------------+-------------------+------------------- + 979-0-1234-5678-5 | 979-0-1234-5678-5 | 979-0-1234-5678-5 +(1 row) + +SELECT '9771234567003'::ISSN, + '12345679'::ISSN; + issn | issn +-----------+----------- + 1234-5679 | 1234-5679 +(1 row) + +SELECT '9771234567003'::ISSN13, + '12345679'::ISSN13, + '9771234567898'::ISSN13, + '9771234567898'::EAN13::ISSN13; + issn13 | issn13 | issn13 | issn13 +-------------------+-------------------+-------------------+------------------- + 977-1234-567-00-3 | 977-1234-567-00-3 | 977-1234-567-89-8 | 977-1234-567-89-8 +(1 row) + +SELECT '0123456789012'::UPC, + '0123456789012'::EAN13::UPC; + upc | upc +--------------+-------------- + 123456789012 | 123456789012 +(1 row) + +-- +-- test invalid checksums +-- +SELECT '1234567890'::ISBN; +ERROR: invalid check digit for ISBN number: "1234567890", should be X +LINE 1: SELECT '1234567890'::ISBN; + ^ +SELECT 'M123456780'::ISMN; +ERROR: invalid check digit for ISMN number: "M123456780", should be 5 +LINE 1: SELECT 'M123456780'::ISMN; + ^ +SELECT '12345670'::ISSN; +ERROR: invalid check digit for ISSN number: "12345670", should be 9 +LINE 1: SELECT '12345670'::ISSN; + ^ +SELECT '9780123456780'::ISBN; +ERROR: invalid check digit for ISBN number: "9780123456780", should be 6 +LINE 1: SELECT '9780123456780'::ISBN; + ^ +SELECT '9791234567890'::ISBN13; +ERROR: invalid check digit for ISBN number: "9791234567890", should be 6 +LINE 1: SELECT '9791234567890'::ISBN13; + ^ +SELECT '0123456789010'::UPC; +ERROR: invalid check digit for UPC number: "0123456789010", should be 2 +LINE 1: SELECT '0123456789010'::UPC; + ^ +SELECT '1234567890120'::EAN13; +ERROR: invalid check digit for EAN13 number: "1234567890120", should be 8 +LINE 1: SELECT '1234567890120'::EAN13; + ^ +-- +-- test invalid conversions +-- +SELECT '9790123456785'::ISBN; -- not a book +ERROR: cannot cast ISMN to ISBN for number: "9790123456785" +LINE 1: SELECT '9790123456785'::ISBN; + ^ +SELECT '9771234567898'::ISBN; -- not a book +ERROR: cannot cast ISSN to ISBN for number: "9771234567898" +LINE 1: SELECT '9771234567898'::ISBN; + ^ +SELECT '0123456789012'::ISBN; -- not a book +ERROR: cannot cast UPC to ISBN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISBN; + ^ +SELECT '9790123456785'::ISBN13; -- not a book +ERROR: cannot cast ISMN to ISBN for number: "9790123456785" +LINE 1: SELECT '9790123456785'::ISBN13; + ^ +SELECT '9771234567898'::ISBN13; -- not a book +ERROR: cannot cast ISSN to ISBN for number: "9771234567898" +LINE 1: SELECT '9771234567898'::ISBN13; + ^ +SELECT '0123456789012'::ISBN13; -- not a book +ERROR: cannot cast UPC to ISBN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISBN13; + ^ +SELECT '9780123456786'::ISMN; -- not music +ERROR: cannot cast ISBN to ISMN for number: "9780123456786" +LINE 1: SELECT '9780123456786'::ISMN; + ^ +SELECT '9771234567898'::ISMN; -- not music +ERROR: cannot cast ISSN to ISMN for number: "9771234567898" +LINE 1: SELECT '9771234567898'::ISMN; + ^ +SELECT '9791234567896'::ISMN; -- not music +ERROR: cannot cast ISBN to ISMN for number: "9791234567896" +LINE 1: SELECT '9791234567896'::ISMN; + ^ +SELECT '0123456789012'::ISMN; -- not music +ERROR: cannot cast UPC to ISMN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISMN; + ^ +SELECT '9780123456786'::ISSN; -- not serial +ERROR: cannot cast ISBN to ISSN for number: "9780123456786" +LINE 1: SELECT '9780123456786'::ISSN; + ^ +SELECT '9790123456785'::ISSN; -- not serial +ERROR: cannot cast ISMN to ISSN for number: "9790123456785" +LINE 1: SELECT '9790123456785'::ISSN; + ^ +SELECT '9791234567896'::ISSN; -- not serial +ERROR: cannot cast ISBN to ISSN for number: "9791234567896" +LINE 1: SELECT '9791234567896'::ISSN; + ^ +SELECT '0123456789012'::ISSN; -- not serial +ERROR: cannot cast UPC to ISSN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISSN; + ^ +SELECT '9780123456786'::UPC; -- not a product +ERROR: cannot cast ISBN to UPC for number: "9780123456786" +LINE 1: SELECT '9780123456786'::UPC; + ^ +SELECT '9771234567898'::UPC; -- not a product +ERROR: cannot cast ISSN to UPC for number: "9771234567898" +LINE 1: SELECT '9771234567898'::UPC; + ^ +SELECT '9790123456785'::UPC; -- not a product +ERROR: cannot cast ISMN to UPC for number: "9790123456785" +LINE 1: SELECT '9790123456785'::UPC; + ^ +SELECT '9791234567896'::UPC; -- not a product +ERROR: cannot cast ISBN to UPC for number: "9791234567896" +LINE 1: SELECT '9791234567896'::UPC; + ^ +SELECT 'postgresql...'::EAN13; +ERROR: invalid input syntax for EAN13 number: "postgresql..." +LINE 1: SELECT 'postgresql...'::EAN13; + ^ +SELECT 'postgresql...'::ISBN; +ERROR: invalid input syntax for ISBN number: "postgresql..." +LINE 1: SELECT 'postgresql...'::ISBN; + ^ +SELECT 9780123456786::EAN13; +ERROR: cannot cast type bigint to ean13 +LINE 1: SELECT 9780123456786::EAN13; + ^ +SELECT 9780123456786::ISBN; +ERROR: cannot cast type bigint to isbn +LINE 1: SELECT 9780123456786::ISBN; + ^ +-- +-- test some comparisons, must yield true +-- +SELECT '12345679'::ISSN = '9771234567003'::EAN13 AS "ok", + 'M-1234-5678-5'::ISMN = '9790123456785'::EAN13 AS "ok", + '9791234567896'::EAN13 != '123456789X'::ISBN AS "nope"; + ok | ok | nope +----+----+------ + t | t | t +(1 row) + +-- +-- cleanup +-- +DROP EXTENSION isn; diff --git a/contrib/isn/isn--unpackaged--1.0.sql b/contrib/isn/isn--unpackaged--1.0.sql index 30e5012156602..8a19d6a475d11 100644 --- a/contrib/isn/isn--unpackaged--1.0.sql +++ b/contrib/isn/isn--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/isn/isn--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION isn" to load this file. \quit +\echo Use "CREATE EXTENSION isn FROM unpackaged" to load this file. \quit ALTER EXTENSION isn ADD type ean13; ALTER EXTENSION isn ADD function ean13_in(cstring); diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c index 11247449799a8..9e62374acdb20 100644 --- a/contrib/isn/isn.c +++ b/contrib/isn/isn.c @@ -443,16 +443,23 @@ ean2ISBN(char *isn) char *aux; unsigned check; - /* the number should come in this format: 978-0-000-00000-0 */ - /* Strip the first part and calculate the new check digit */ - hyphenate(isn, isn + 4, NULL, NULL); - check = weight_checkdig(isn, 10); - aux = strchr(isn, '\0'); - while (!isdigit((unsigned char) *--aux)); - if (check == 10) - *aux = 'X'; - else - *aux = check + '0'; + /* + * The number should come in this format: 978-0-000-00000-0 + * or may be an ISBN-13 number, 979-..., which does not have a short + * representation. Do the short output version if possible. + */ + if (strncmp("978-", isn, 4) == 0) + { + /* Strip the first part and calculate the new check digit */ + hyphenate(isn, isn + 4, NULL, NULL); + check = weight_checkdig(isn, 10); + aux = strchr(isn, '\0'); + while (!isdigit((unsigned char) *--aux)); + if (check == 10) + *aux = 'X'; + else + *aux = check + '0'; + } } static inline void diff --git a/contrib/isn/sql/isn.sql b/contrib/isn/sql/isn.sql new file mode 100644 index 0000000000000..5ef6d8aa3bee2 --- /dev/null +++ b/contrib/isn/sql/isn.sql @@ -0,0 +1,104 @@ +-- +-- Test ISN extension +-- + +CREATE EXTENSION isn; + +-- +-- test valid conversions +-- +SELECT '9780123456786'::EAN13, -- old book + '9790123456785'::EAN13, -- music + '9791234567896'::EAN13, -- new book + '9771234567898'::EAN13, -- serial + '0123456789012'::EAN13, -- upc + '1234567890128'::EAN13; + +SELECT '9780123456786'::ISBN, + '123456789X'::ISBN, + '9780123456786'::ISBN13::ISBN, + '9780123456786'::EAN13::ISBN; + +SELECT -- new books, shown as ISBN13 even for ISBN... + '9791234567896'::ISBN, + '9791234567896'::ISBN13::ISBN, + '9791234567896'::EAN13::ISBN; + +SELECT '9780123456786'::ISBN13, + '123456789X'::ISBN13, + '9791234567896'::ISBN13, + '9791234567896'::EAN13::ISBN13; + +SELECT '9790123456785'::ISMN, + '9790123456785'::EAN13::ISMN, + 'M123456785'::ISMN, + 'M-1234-5678-5'::ISMN; + +SELECT '9790123456785'::ISMN13, + 'M123456785'::ISMN13, + 'M-1234-5678-5'::ISMN13; + +SELECT '9771234567003'::ISSN, + '12345679'::ISSN; + +SELECT '9771234567003'::ISSN13, + '12345679'::ISSN13, + '9771234567898'::ISSN13, + '9771234567898'::EAN13::ISSN13; + +SELECT '0123456789012'::UPC, + '0123456789012'::EAN13::UPC; + +-- +-- test invalid checksums +-- +SELECT '1234567890'::ISBN; +SELECT 'M123456780'::ISMN; +SELECT '12345670'::ISSN; +SELECT '9780123456780'::ISBN; +SELECT '9791234567890'::ISBN13; +SELECT '0123456789010'::UPC; +SELECT '1234567890120'::EAN13; + +-- +-- test invalid conversions +-- +SELECT '9790123456785'::ISBN; -- not a book +SELECT '9771234567898'::ISBN; -- not a book +SELECT '0123456789012'::ISBN; -- not a book + +SELECT '9790123456785'::ISBN13; -- not a book +SELECT '9771234567898'::ISBN13; -- not a book +SELECT '0123456789012'::ISBN13; -- not a book + +SELECT '9780123456786'::ISMN; -- not music +SELECT '9771234567898'::ISMN; -- not music +SELECT '9791234567896'::ISMN; -- not music +SELECT '0123456789012'::ISMN; -- not music + +SELECT '9780123456786'::ISSN; -- not serial +SELECT '9790123456785'::ISSN; -- not serial +SELECT '9791234567896'::ISSN; -- not serial +SELECT '0123456789012'::ISSN; -- not serial + +SELECT '9780123456786'::UPC; -- not a product +SELECT '9771234567898'::UPC; -- not a product +SELECT '9790123456785'::UPC; -- not a product +SELECT '9791234567896'::UPC; -- not a product + +SELECT 'postgresql...'::EAN13; +SELECT 'postgresql...'::ISBN; +SELECT 9780123456786::EAN13; +SELECT 9780123456786::ISBN; + +-- +-- test some comparisons, must yield true +-- +SELECT '12345679'::ISSN = '9771234567003'::EAN13 AS "ok", + 'M-1234-5678-5'::ISMN = '9790123456785'::EAN13 AS "ok", + '9791234567896'::EAN13 != '123456789X'::ISBN AS "nope"; + +-- +-- cleanup +-- +DROP EXTENSION isn; diff --git a/contrib/lo/lo--unpackaged--1.0.sql b/contrib/lo/lo--unpackaged--1.0.sql index 053185ba1d857..d6bcf1a46e284 100644 --- a/contrib/lo/lo--unpackaged--1.0.sql +++ b/contrib/lo/lo--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/lo/lo--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION lo" to load this file. \quit +\echo Use "CREATE EXTENSION lo FROM unpackaged" to load this file. \quit ALTER EXTENSION lo ADD domain lo; ALTER EXTENSION lo ADD function lo_oid(lo); diff --git a/contrib/ltree/_ltree_gist.c b/contrib/ltree/_ltree_gist.c index 41be68d7ee669..9272618f8c077 100644 --- a/contrib/ltree/_ltree_gist.c +++ b/contrib/ltree/_ltree_gist.c @@ -85,7 +85,7 @@ _ltree_compress(PG_FUNCTION_ARGS) (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), errmsg("array must not contain nulls"))); - key = (ltree_gist *) palloc(len); + key = (ltree_gist *) palloc0(len); SET_VARSIZE(key, len); key->flag = 0; @@ -116,7 +116,7 @@ _ltree_compress(PG_FUNCTION_ARGS) PG_RETURN_POINTER(retval); } len = LTG_HDRSIZE; - key = (ltree_gist *) palloc(len); + key = (ltree_gist *) palloc0(len); SET_VARSIZE(key, len); key->flag = LTG_ALLTRUE; @@ -196,7 +196,7 @@ _ltree_union(PG_FUNCTION_ARGS) } len = LTG_HDRSIZE + ((flag & LTG_ALLTRUE) ? 0 : ASIGLEN); - result = (ltree_gist *) palloc(len); + result = (ltree_gist *) palloc0(len); SET_VARSIZE(result, len); result->flag = flag; if (!LTG_ISALLTRUE(result)) @@ -333,26 +333,26 @@ _ltree_picksplit(PG_FUNCTION_ARGS) /* form initial .. */ if (LTG_ISALLTRUE(GETENTRY(entryvec, seed_1))) { - datum_l = (ltree_gist *) palloc(LTG_HDRSIZE); + datum_l = (ltree_gist *) palloc0(LTG_HDRSIZE); SET_VARSIZE(datum_l, LTG_HDRSIZE); datum_l->flag = LTG_ALLTRUE; } else { - datum_l = (ltree_gist *) palloc(LTG_HDRSIZE + ASIGLEN); + datum_l = (ltree_gist *) palloc0(LTG_HDRSIZE + ASIGLEN); SET_VARSIZE(datum_l, LTG_HDRSIZE + ASIGLEN); datum_l->flag = 0; memcpy((void *) LTG_SIGN(datum_l), (void *) LTG_SIGN(GETENTRY(entryvec, seed_1)), sizeof(ABITVEC)); } if (LTG_ISALLTRUE(GETENTRY(entryvec, seed_2))) { - datum_r = (ltree_gist *) palloc(LTG_HDRSIZE); + datum_r = (ltree_gist *) palloc0(LTG_HDRSIZE); SET_VARSIZE(datum_r, LTG_HDRSIZE); datum_r->flag = LTG_ALLTRUE; } else { - datum_r = (ltree_gist *) palloc(LTG_HDRSIZE + ASIGLEN); + datum_r = (ltree_gist *) palloc0(LTG_HDRSIZE + ASIGLEN); SET_VARSIZE(datum_r, LTG_HDRSIZE + ASIGLEN); datum_r->flag = 0; memcpy((void *) LTG_SIGN(datum_r), (void *) LTG_SIGN(GETENTRY(entryvec, seed_2)), sizeof(ABITVEC)); diff --git a/contrib/ltree/_ltree_op.c b/contrib/ltree/_ltree_op.c index 44270d4614a68..c0c56a40d4fb7 100644 --- a/contrib/ltree/_ltree_op.c +++ b/contrib/ltree/_ltree_op.c @@ -211,7 +211,7 @@ _ltree_extract_isparent(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } - item = (ltree *) palloc(VARSIZE(found)); + item = (ltree *) palloc0(VARSIZE(found)); memcpy(item, found, VARSIZE(found)); PG_FREE_IF_COPY(la, 0); @@ -234,7 +234,7 @@ _ltree_extract_risparent(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } - item = (ltree *) palloc(VARSIZE(found)); + item = (ltree *) palloc0(VARSIZE(found)); memcpy(item, found, VARSIZE(found)); PG_FREE_IF_COPY(la, 0); @@ -257,7 +257,7 @@ _ltq_extract_regex(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } - item = (ltree *) palloc(VARSIZE(found)); + item = (ltree *) palloc0(VARSIZE(found)); memcpy(item, found, VARSIZE(found)); PG_FREE_IF_COPY(la, 0); @@ -280,7 +280,7 @@ _ltxtq_extract_exec(PG_FUNCTION_ARGS) PG_RETURN_NULL(); } - item = (ltree *) palloc(VARSIZE(found)); + item = (ltree *) palloc0(VARSIZE(found)); memcpy(item, found, VARSIZE(found)); PG_FREE_IF_COPY(la, 0); diff --git a/contrib/ltree/ltree--1.0.sql b/contrib/ltree/ltree--1.0.sql index 5a2f375a4f3f4..7d55fc603f63e 100644 --- a/contrib/ltree/ltree--1.0.sql +++ b/contrib/ltree/ltree--1.0.sql @@ -6,12 +6,12 @@ CREATE FUNCTION ltree_in(cstring) RETURNS ltree AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION ltree_out(ltree) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE ltree ( INTERNALLENGTH = -1, @@ -303,12 +303,12 @@ CREATE OPERATOR CLASS ltree_ops CREATE FUNCTION lquery_in(cstring) RETURNS lquery AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION lquery_out(lquery) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE lquery ( INTERNALLENGTH = -1, @@ -414,12 +414,12 @@ CREATE OPERATOR ^? ( CREATE FUNCTION ltxtq_in(cstring) RETURNS ltxtquery AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION ltxtq_out(ltxtquery) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE ltxtquery ( INTERNALLENGTH = -1, @@ -481,12 +481,12 @@ CREATE OPERATOR ^@ ( CREATE FUNCTION ltree_gist_in(cstring) RETURNS ltree_gist AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION ltree_gist_out(ltree_gist) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE ltree_gist ( internallength = -1, diff --git a/contrib/ltree/ltree--unpackaged--1.0.sql b/contrib/ltree/ltree--unpackaged--1.0.sql index 1e24fa56c688c..30a94c2fc5cd6 100644 --- a/contrib/ltree/ltree--unpackaged--1.0.sql +++ b/contrib/ltree/ltree--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/ltree/ltree--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION ltree" to load this file. \quit +\echo Use "CREATE EXTENSION ltree FROM unpackaged" to load this file. \quit ALTER EXTENSION ltree ADD type ltree; ALTER EXTENSION ltree ADD function ltree_in(cstring); diff --git a/contrib/ltree/ltree_gist.c b/contrib/ltree/ltree_gist.c index 2d89f1aed4c6f..e4df3e50b7826 100644 --- a/contrib/ltree/ltree_gist.c +++ b/contrib/ltree/ltree_gist.c @@ -56,7 +56,7 @@ ltree_compress(PG_FUNCTION_ARGS) ltree *val = (ltree *) DatumGetPointer(PG_DETOAST_DATUM(entry->key)); int32 len = LTG_HDRSIZE + VARSIZE(val); - key = (ltree_gist *) palloc(len); + key = (ltree_gist *) palloc0(len); SET_VARSIZE(key, len); key->flag = LTG_ONENODE; memcpy((void *) LTG_NODE(key), (void *) val, VARSIZE(val)); @@ -213,7 +213,7 @@ ltree_union(PG_FUNCTION_ARGS) isleqr = (left == right || ISEQ(left, right)) ? true : false; *size = LTG_HDRSIZE + ((isalltrue) ? 0 : SIGLEN) + VARSIZE(left) + ((isleqr) ? 0 : VARSIZE(right)); - result = (ltree_gist *) palloc(*size); + result = (ltree_gist *) palloc0(*size); SET_VARSIZE(result, *size); result->flag = 0; @@ -386,7 +386,7 @@ ltree_picksplit(PG_FUNCTION_ARGS) lu_l = LTG_GETLNODE(GETENTRY(entryvec, array[FirstOffsetNumber].index)); isleqr = (lu_l == lu_r || ISEQ(lu_l, lu_r)) ? true : false; size = LTG_HDRSIZE + ((lisat) ? 0 : SIGLEN) + VARSIZE(lu_l) + ((isleqr) ? 0 : VARSIZE(lu_r)); - lu = (ltree_gist *) palloc(size); + lu = (ltree_gist *) palloc0(size); SET_VARSIZE(lu, size); lu->flag = 0; if (lisat) @@ -403,7 +403,7 @@ ltree_picksplit(PG_FUNCTION_ARGS) ru_l = LTG_GETLNODE(GETENTRY(entryvec, array[1 + ((maxoff - FirstOffsetNumber + 1) / 2)].index)); isleqr = (ru_l == ru_r || ISEQ(ru_l, ru_r)) ? true : false; size = LTG_HDRSIZE + ((risat) ? 0 : SIGLEN) + VARSIZE(ru_l) + ((isleqr) ? 0 : VARSIZE(ru_r)); - ru = (ltree_gist *) palloc(size); + ru = (ltree_gist *) palloc0(size); SET_VARSIZE(ru, size); ru->flag = 0; if (risat) @@ -445,7 +445,7 @@ gist_isparent(ltree_gist *key, ltree *query) static ltree * copy_ltree(ltree *src) { - ltree *dst = (ltree *) palloc(VARSIZE(src)); + ltree *dst = (ltree *) palloc0(VARSIZE(src)); memcpy(dst, src, VARSIZE(src)); return dst; diff --git a/contrib/ltree/ltree_op.c b/contrib/ltree/ltree_op.c index 4561073fa0933..aa1e9918befb5 100644 --- a/contrib/ltree/ltree_op.c +++ b/contrib/ltree/ltree_op.c @@ -211,7 +211,7 @@ inner_subltree(ltree *t, int32 startpos, int32 endpos) ptr = LEVEL_NEXT(ptr); } - res = (ltree *) palloc(LTREE_HDRSIZE + (end - start)); + res = (ltree *) palloc0(LTREE_HDRSIZE + (end - start)); SET_VARSIZE(res, LTREE_HDRSIZE + (end - start)); res->numlevel = endpos - startpos; @@ -268,7 +268,7 @@ ltree_concat(ltree *a, ltree *b) { ltree *r; - r = (ltree *) palloc(VARSIZE(a) + VARSIZE(b) - LTREE_HDRSIZE); + r = (ltree *) palloc0(VARSIZE(a) + VARSIZE(b) - LTREE_HDRSIZE); SET_VARSIZE(r, VARSIZE(a) + VARSIZE(b) - LTREE_HDRSIZE); r->numlevel = a->numlevel + b->numlevel; @@ -450,7 +450,7 @@ lca_inner(ltree **a, int len) l1 = LEVEL_NEXT(l1); } - res = (ltree *) palloc(reslen); + res = (ltree *) palloc0(reslen); SET_VARSIZE(res, reslen); res->numlevel = num; diff --git a/contrib/ltree/ltxtquery_io.c b/contrib/ltree/ltxtquery_io.c index ddc63d7b66b19..befda1344d5e9 100644 --- a/contrib/ltree/ltxtquery_io.c +++ b/contrib/ltree/ltxtquery_io.c @@ -350,7 +350,7 @@ queryin(char *buf) errmsg("ltxtquery is too large"))); commonlen = COMPUTESIZE(state.num, state.sumlen); - query = (ltxtquery *) palloc(commonlen); + query = (ltxtquery *) palloc0(commonlen); SET_VARSIZE(query, commonlen); query->size = state.num; ptr = GETQUERY(query); @@ -416,6 +416,9 @@ while( ( (inf)->cur - (inf)->buf ) + (addsize) + 1 >= (inf)->buflen ) \ static void infix(INFIX *in, bool first) { + /* since this function recurses, it could be driven to stack overflow. */ + check_stack_depth(); + if (in->curpol->type == VAL) { char *op = in->op + in->curpol->distance; diff --git a/contrib/ltree/ltxtquery_op.c b/contrib/ltree/ltxtquery_op.c index 64f9d219f7678..1428c8b47806f 100644 --- a/contrib/ltree/ltxtquery_op.c +++ b/contrib/ltree/ltxtquery_op.c @@ -8,6 +8,7 @@ #include #include "ltree.h" +#include "miscadmin.h" PG_FUNCTION_INFO_V1(ltxtq_exec); PG_FUNCTION_INFO_V1(ltxtq_rexec); @@ -18,6 +19,9 @@ PG_FUNCTION_INFO_V1(ltxtq_rexec); bool ltree_execute(ITEM *curitem, void *checkval, bool calcnot, bool (*chkcond) (void *checkval, ITEM *val)) { + /* since this function recurses, it could be driven to stack overflow */ + check_stack_depth(); + if (curitem->type == VAL) return (*chkcond) (checkval, curitem); else if (curitem->val == (int32) '!') diff --git a/contrib/pageinspect/pageinspect--1.0--1.1.sql b/contrib/pageinspect/pageinspect--1.0--1.1.sql index 49e83264d337f..0e2c3f45b35b0 100644 --- a/contrib/pageinspect/pageinspect--1.0--1.1.sql +++ b/contrib/pageinspect/pageinspect--1.0--1.1.sql @@ -1,7 +1,7 @@ /* contrib/pageinspect/pageinspect--1.0--1.1.sql */ -- complain if script is sourced in psql, rather than via ALTER EXTENSION -\echo Use "ALTER EXTENSION pageinspect UPDATE TO 1.1" to load this file. \quit +\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.1'" to load this file. \quit DROP FUNCTION page_header(bytea); CREATE FUNCTION page_header(IN page bytea, diff --git a/contrib/pageinspect/pageinspect--1.1--1.2.sql b/contrib/pageinspect/pageinspect--1.1--1.2.sql index 5e23ca4dd5914..31ffbb0eb53da 100644 --- a/contrib/pageinspect/pageinspect--1.1--1.2.sql +++ b/contrib/pageinspect/pageinspect--1.1--1.2.sql @@ -1,7 +1,7 @@ /* contrib/pageinspect/pageinspect--1.1--1.2.sql */ -- complain if script is sourced in psql, rather than via ALTER EXTENSION -\echo Use "ALTER EXTENSION pageinspect UPDATE TO 1.2" to load this file. \quit +\echo Use "ALTER EXTENSION pageinspect UPDATE TO '1.2'" to load this file. \quit DROP FUNCTION page_header(bytea); CREATE FUNCTION page_header(IN page bytea, diff --git a/contrib/pageinspect/pageinspect--unpackaged--1.0.sql b/contrib/pageinspect/pageinspect--unpackaged--1.0.sql index 13e2167dfc1ac..1bf6bccb79bac 100644 --- a/contrib/pageinspect/pageinspect--unpackaged--1.0.sql +++ b/contrib/pageinspect/pageinspect--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pageinspect/pageinspect--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pageinspect" to load this file. \quit +\echo Use "CREATE EXTENSION pageinspect FROM unpackaged" to load this file. \quit DROP FUNCTION heap_page_items(bytea); CREATE FUNCTION heap_page_items(IN page bytea, diff --git a/contrib/pageinspect/rawpage.c b/contrib/pageinspect/rawpage.c index cc66fc8b04d99..fe60fadaf30dc 100644 --- a/contrib/pageinspect/rawpage.c +++ b/contrib/pageinspect/rawpage.c @@ -131,9 +131,11 @@ get_raw_page_internal(text *relname, ForkNumber forknum, BlockNumber blkno) (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), errmsg("cannot access temporary tables of other sessions"))); - if (blkno >= RelationGetNumberOfBlocks(rel)) - elog(ERROR, "block number %u is out of range for relation \"%s\"", - blkno, RelationGetRelationName(rel)); + if (blkno >= RelationGetNumberOfBlocksInFork(rel, forknum)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("block number %u is out of range for relation \"%s\"", + blkno, RelationGetRelationName(rel)))); /* Initialize buffer to copy to */ raw_page = (bytea *) palloc(BLCKSZ + VARHDRSZ); diff --git a/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql b/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql index bfe6e52f8f40e..dc1cbdd6fe503 100644 --- a/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql +++ b/contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_buffercache/pg_buffercache--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_buffercache" to load this file. \quit +\echo Use "CREATE EXTENSION pg_buffercache FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_buffercache ADD function pg_buffercache_pages(); ALTER EXTENSION pg_buffercache ADD view pg_buffercache; diff --git a/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql b/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql index 5e8d7e472ee90..865137380016b 100644 --- a/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql +++ b/contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_freespacemap/pg_freespacemap--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_freespacemap" to load this file. \quit +\echo Use "CREATE EXTENSION pg_freespacemap FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_freespacemap ADD function pg_freespace(regclass,bigint); ALTER EXTENSION pg_freespacemap ADD function pg_freespace(regclass); diff --git a/contrib/pg_prewarm/pg_prewarm.c b/contrib/pg_prewarm/pg_prewarm.c index df20e888eff08..32c724e5ce2fd 100644 --- a/contrib/pg_prewarm/pg_prewarm.c +++ b/contrib/pg_prewarm/pg_prewarm.c @@ -159,6 +159,7 @@ pg_prewarm(PG_FUNCTION_ARGS) */ for (block = first_block; block <= last_block; ++block) { + CHECK_FOR_INTERRUPTS(); PrefetchBuffer(rel, forkNumber, block); ++blocks_done; } @@ -177,6 +178,7 @@ pg_prewarm(PG_FUNCTION_ARGS) */ for (block = first_block; block <= last_block; ++block) { + CHECK_FOR_INTERRUPTS(); smgrread(rel->rd_smgr, forkNumber, block, blockbuffer); ++blocks_done; } @@ -190,6 +192,7 @@ pg_prewarm(PG_FUNCTION_ARGS) { Buffer buf; + CHECK_FOR_INTERRUPTS(); buf = ReadBufferExtended(rel, forkNumber, block, RBM_NORMAL, NULL); ReleaseBuffer(buf); ++blocks_done; diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c index d6b169264c343..2f9f2b4d2e920 100644 --- a/contrib/pg_standby/pg_standby.c +++ b/contrib/pg_standby/pg_standby.c @@ -418,7 +418,7 @@ CheckForExternalTrigger(void) return; } - if ((len = read(fd, buf, sizeof(buf))) < 0) + if ((len = read(fd, buf, sizeof(buf) - 1)) < 0) { fprintf(stderr, "WARNING: could not read \"%s\": %s\n", triggerPath, strerror(errno)); diff --git a/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql b/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql index e84a3cbafc2c7..116e95834db44 100644 --- a/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql +++ b/contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_stat_statements/pg_stat_statements--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_stat_statements" to load this file. \quit +\echo Use "CREATE EXTENSION pg_stat_statements FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_stat_statements ADD function pg_stat_statements_reset(); ALTER EXTENSION pg_stat_statements ADD function pg_stat_statements(); diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index a3e8c595b8129..c3ba6058fb05d 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -165,7 +165,7 @@ typedef struct pgssEntry pgssHashKey key; /* hash key of entry - MUST BE FIRST */ Counters counters; /* the statistics for this query */ Size query_offset; /* query text offset in external file */ - int query_len; /* # of valid bytes in query string */ + int query_len; /* # of valid bytes in query string, or -1 */ int encoding; /* query text encoding */ slock_t mutex; /* protects the counters only */ } pgssEntry; @@ -735,11 +735,7 @@ pgss_shmem_shutdown(int code, Datum arg) /* * Rename file into place, so we atomically replace any old one. */ - if (rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE) != 0) - ereport(LOG, - (errcode_for_file_access(), - errmsg("could not rename pg_stat_statement file \"%s\": %m", - PGSS_DUMP_FILE ".tmp"))); + (void) durable_rename(PGSS_DUMP_FILE ".tmp", PGSS_DUMP_FILE, LOG); /* Unlink query-texts file; it's not needed while shutdown */ unlink(PGSS_TEXT_FILE); @@ -955,10 +951,13 @@ pgss_ProcessUtility(Node *parsetree, const char *queryString, * calculated from the query tree) would be used to accumulate costs of * ensuing EXECUTEs. This would be confusing, and inconsistent with other * cases where planning time is not included at all. + * + * Likewise, we don't track execution of DEALLOCATE. */ if (pgss_track_utility && pgss_enabled() && !IsA(parsetree, ExecuteStmt) && - !IsA(parsetree, PrepareStmt)) + !IsA(parsetree, PrepareStmt) && + !IsA(parsetree, DeallocateStmt)) { instr_time start; instr_time duration; @@ -1636,7 +1635,8 @@ entry_cmp(const void *lhs, const void *rhs) } /* - * Deallocate least used entries. + * Deallocate least-used entries. + * * Caller must hold an exclusive lock on pgss->lock. */ static void @@ -1647,17 +1647,27 @@ entry_dealloc(void) pgssEntry *entry; int nvictims; int i; - Size totlen = 0; + Size tottextlen; + int nvalidtexts; /* * Sort entries by usage and deallocate USAGE_DEALLOC_PERCENT of them. * While we're scanning the table, apply the decay factor to the usage - * values. + * values, and update the mean query length. + * + * Note that the mean query length is almost immediately obsolete, since + * we compute it before not after discarding the least-used entries. + * Hopefully, that doesn't affect the mean too much; it doesn't seem worth + * making two passes to get a more current result. Likewise, the new + * cur_median_usage includes the entries we're about to zap. */ entries = palloc(hash_get_num_entries(pgss_hash) * sizeof(pgssEntry *)); i = 0; + tottextlen = 0; + nvalidtexts = 0; + hash_seq_init(&hash_seq, pgss_hash); while ((entry = hash_seq_search(&hash_seq)) != NULL) { @@ -1667,20 +1677,27 @@ entry_dealloc(void) entry->counters.usage *= STICKY_DECREASE_FACTOR; else entry->counters.usage *= USAGE_DECREASE_FACTOR; - /* Accumulate total size, too. */ - totlen += entry->query_len + 1; + /* In the mean length computation, ignore dropped texts. */ + if (entry->query_len >= 0) + { + tottextlen += entry->query_len + 1; + nvalidtexts++; + } } + /* Sort into increasing order by usage */ qsort(entries, i, sizeof(pgssEntry *), entry_cmp); + /* Record the (approximate) median usage */ if (i > 0) - { - /* Record the (approximate) median usage */ pgss->cur_median_usage = entries[i / 2]->counters.usage; - /* Record the mean query length */ - pgss->mean_query_len = totlen / i; - } + /* Record the mean query length */ + if (nvalidtexts > 0) + pgss->mean_query_len = tottextlen / nvalidtexts; + else + pgss->mean_query_len = ASSUMED_LENGTH_INIT; + /* Now zap an appropriate fraction of lowest-usage entries */ nvictims = Max(10, i * USAGE_DEALLOC_PERCENT / 100); nvictims = Min(nvictims, i); @@ -1823,7 +1840,7 @@ qtext_load_file(Size *buffer_size) } /* Allocate buffer; beware that off_t might be wider than size_t */ - if (stat.st_size <= MaxAllocSize) + if (stat.st_size <= MaxAllocHugeSize) buf = (char *) malloc(stat.st_size); else buf = NULL; @@ -1831,7 +1848,9 @@ qtext_load_file(Size *buffer_size) { ereport(LOG, (errcode(ERRCODE_OUT_OF_MEMORY), - errmsg("out of memory"))); + errmsg("out of memory"), + errdetail("Could not allocate enough memory to read pg_stat_statement file \"%s\".", + PGSS_TEXT_FILE))); CloseTransientFile(fd); return NULL; } @@ -1933,13 +1952,17 @@ need_gc_qtexts(void) * occur in the foreseeable future. * * The caller must hold an exclusive lock on pgss->lock. + * + * At the first sign of trouble we unlink the query text file to get a clean + * slate (although existing statistics are retained), rather than risk + * thrashing by allowing the same problem case to recur indefinitely. */ static void gc_qtexts(void) { char *qbuffer; Size qbuffer_size; - FILE *qfile; + FILE *qfile = NULL; HASH_SEQ_STATUS hash_seq; pgssEntry *entry; Size extent; @@ -1954,12 +1977,15 @@ gc_qtexts(void) return; /* - * Load the old texts file. If we fail (out of memory, for instance) just - * skip the garbage collection. + * Load the old texts file. If we fail (out of memory, for instance), + * invalidate query texts. Hopefully this is rare. It might seem better + * to leave things alone on an OOM failure, but the problem is that the + * file is only going to get bigger; hoping for a future non-OOM result is + * risky and can easily lead to complete denial of service. */ qbuffer = qtext_load_file(&qbuffer_size); if (qbuffer == NULL) - return; + goto gc_fail; /* * We overwrite the query texts file in place, so as to reduce the risk of @@ -1994,6 +2020,7 @@ gc_qtexts(void) /* Trouble ... drop the text */ entry->query_offset = 0; entry->query_len = -1; + /* entry will not be counted in mean query length computation */ continue; } @@ -2078,7 +2105,36 @@ gc_qtexts(void) entry->query_len = -1; } - /* Seems like a good idea to bump the GC count even though we failed */ + /* + * Destroy the query text file and create a new, empty one + */ + (void) unlink(PGSS_TEXT_FILE); + qfile = AllocateFile(PGSS_TEXT_FILE, PG_BINARY_W); + if (qfile == NULL) + ereport(LOG, + (errcode_for_file_access(), + errmsg("could not write new pg_stat_statement file \"%s\": %m", + PGSS_TEXT_FILE))); + else + FreeFile(qfile); + + /* Reset the shared extent pointer */ + pgss->extent = 0; + + /* Reset mean_query_len to match the new state */ + pgss->mean_query_len = ASSUMED_LENGTH_INIT; + + /* + * Bump the GC count even though we failed. + * + * This is needed to make concurrent readers of file without any lock on + * pgss->lock notice existence of new version of file. Once readers + * subsequently observe a change in GC count with pgss->lock held, that + * forces a safe reopen of file. Writers also require that we bump here, + * of course. (As required by locking protocol, readers and writers don't + * trust earlier file contents until gc_count is found unchanged after + * pgss->lock acquired in shared or exclusive mode respectively.) + */ record_gc_qtexts(); } diff --git a/contrib/pg_test_fsync/pg_test_fsync.c b/contrib/pg_test_fsync/pg_test_fsync.c index 842295ae3d976..37298be56e623 100644 --- a/contrib/pg_test_fsync/pg_test_fsync.c +++ b/contrib/pg_test_fsync/pg_test_fsync.c @@ -259,8 +259,6 @@ test_sync(int writes_per_op) } else { - if ((tmpfile = open(filename, O_RDWR | O_DSYNC | PG_O_DIRECT, 0)) == -1) - die("could not open output file"); START_TIMER; for (ops = 0; alarm_triggered == false; ops++) { diff --git a/contrib/pg_test_timing/pg_test_timing.c b/contrib/pg_test_timing/pg_test_timing.c index e44c535d09305..e5c11de6bb44d 100644 --- a/contrib/pg_test_timing/pg_test_timing.c +++ b/contrib/pg_test_timing/pg_test_timing.c @@ -115,7 +115,7 @@ test_timing(int32 duration) end_time, temp; - total_time = duration > 0 ? duration * 1000000 : 0; + total_time = duration > 0 ? duration * INT64CONST(1000000) : 0; INSTR_TIME_SET_CURRENT(start_time); cur = INSTR_TIME_GET_MICROSEC(start_time); diff --git a/contrib/pg_trgm/pg_trgm--1.1.sql b/contrib/pg_trgm/pg_trgm--1.1.sql index 1fff7af2c480f..34b37e478721a 100644 --- a/contrib/pg_trgm/pg_trgm--1.1.sql +++ b/contrib/pg_trgm/pg_trgm--1.1.sql @@ -53,12 +53,12 @@ CREATE OPERATOR <-> ( CREATE FUNCTION gtrgm_in(cstring) RETURNS gtrgm AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE FUNCTION gtrgm_out(gtrgm) RETURNS cstring AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; +LANGUAGE C STRICT IMMUTABLE; CREATE TYPE gtrgm ( INTERNALLENGTH = -1, diff --git a/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql b/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql index 7243a6a410f93..d3eab97d41993 100644 --- a/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql +++ b/contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pg_trgm/pg_trgm--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pg_trgm" to load this file. \quit +\echo Use "CREATE EXTENSION pg_trgm FROM unpackaged" to load this file. \quit ALTER EXTENSION pg_trgm ADD function set_limit(real); ALTER EXTENSION pg_trgm ADD function show_limit(); diff --git a/contrib/pg_upgrade/.gitignore b/contrib/pg_upgrade/.gitignore index 9555f54e859e1..d24ec60184fe7 100644 --- a/contrib/pg_upgrade/.gitignore +++ b/contrib/pg_upgrade/.gitignore @@ -1,6 +1,8 @@ /pg_upgrade # Generated by test suite -analyze_new_cluster.sh -delete_old_cluster.sh +/analyze_new_cluster.sh +/delete_old_cluster.sh +/analyze_new_cluster.bat +/delete_old_cluster.bat /log/ /tmp_check/ diff --git a/contrib/pg_upgrade/check.c b/contrib/pg_upgrade/check.c index edfe7e114bd1b..0e1a1605a024b 100644 --- a/contrib/pg_upgrade/check.c +++ b/contrib/pg_upgrade/check.c @@ -17,12 +17,14 @@ static void set_locale_and_encoding(ClusterInfo *cluster); static void check_new_cluster_is_empty(void); static void check_locale_and_encoding(ControlData *oldctrl, ControlData *newctrl); -static bool equivalent_locale(const char *loca, const char *locb); +static bool equivalent_locale(int category, const char *loca, const char *locb); static bool equivalent_encoding(const char *chara, const char *charb); static void check_is_super_user(ClusterInfo *cluster); +static void check_proper_datallowconn(ClusterInfo *cluster); static void check_for_prepared_transactions(ClusterInfo *cluster); static void check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster); static void check_for_reg_data_type_usage(ClusterInfo *cluster); +static void check_for_jsonb_9_4_usage(ClusterInfo *cluster); static void get_bin_version(ClusterInfo *cluster); static char *get_canonical_locale_name(int category, const char *locale); @@ -95,10 +97,15 @@ check_and_dump_old_cluster(bool live_check, char **sequence_script_file_name) * Check for various failure cases */ check_is_super_user(&old_cluster); + check_proper_datallowconn(&old_cluster); check_for_prepared_transactions(&old_cluster); check_for_reg_data_type_usage(&old_cluster); check_for_isn_and_int8_passing_mismatch(&old_cluster); + if (GET_MAJOR_VERSION(old_cluster.major_version) == 904 && + old_cluster.controldata.cat_ver < JSONB_FORMAT_CHANGE_CAT_VER) + check_for_jsonb_9_4_usage(&old_cluster); + /* old = PG 8.3 checks? */ if (GET_MAJOR_VERSION(old_cluster.major_version) <= 803) { @@ -256,9 +263,10 @@ output_completion_banner(char *analyze_script_file_name, deletion_script_file_name); else pg_log(PG_REPORT, - "Could not create a script to delete the old cluster's data\n" - "files because user-defined tablespaces exist in the old cluster\n" - "directory. The old cluster's contents must be deleted manually.\n"); + "Could not create a script to delete the old cluster's data files\n" + "because user-defined tablespaces or the new cluster's data directory\n" + "exist in the old cluster directory. The old cluster's contents must\n" + "be deleted manually.\n"); } @@ -285,9 +293,9 @@ check_cluster_versions(void) PG_MAJORVERSION); /* - * We can't allow downgrading because we use the target pg_dumpall, and - * pg_dumpall cannot operate on new database versions, only older - * versions. + * We can't allow downgrading because we use the target pg_dump, and + * pg_dump cannot operate on newer database versions, only current and + * older versions. */ if (old_cluster.major_version > new_cluster.major_version) pg_fatal("This utility cannot be used to downgrade to older major PostgreSQL versions.\n"); @@ -365,23 +373,8 @@ set_locale_and_encoding(ClusterInfo *cluster) i_datcollate = PQfnumber(res, "datcollate"); i_datctype = PQfnumber(res, "datctype"); - if (GET_MAJOR_VERSION(cluster->major_version) < 902) - { - /* - * Pre-9.2 did not canonicalize the supplied locale names to match - * what the system returns, while 9.2+ does, so convert pre-9.2 to - * match. - */ - ctrl->lc_collate = get_canonical_locale_name(LC_COLLATE, - pg_strdup(PQgetvalue(res, 0, i_datcollate))); - ctrl->lc_ctype = get_canonical_locale_name(LC_CTYPE, - pg_strdup(PQgetvalue(res, 0, i_datctype))); - } - else - { - ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate)); - ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype)); - } + ctrl->lc_collate = pg_strdup(PQgetvalue(res, 0, i_datcollate)); + ctrl->lc_ctype = pg_strdup(PQgetvalue(res, 0, i_datctype)); PQclear(res); } @@ -413,10 +406,10 @@ static void check_locale_and_encoding(ControlData *oldctrl, ControlData *newctrl) { - if (!equivalent_locale(oldctrl->lc_collate, newctrl->lc_collate)) + if (!equivalent_locale(LC_COLLATE, oldctrl->lc_collate, newctrl->lc_collate)) pg_fatal("lc_collate cluster values do not match: old \"%s\", new \"%s\"\n", oldctrl->lc_collate, newctrl->lc_collate); - if (!equivalent_locale(oldctrl->lc_ctype, newctrl->lc_ctype)) + if (!equivalent_locale(LC_CTYPE, oldctrl->lc_ctype, newctrl->lc_ctype)) pg_fatal("lc_ctype cluster values do not match: old \"%s\", new \"%s\"\n", oldctrl->lc_ctype, newctrl->lc_ctype); if (!equivalent_encoding(oldctrl->encoding, newctrl->encoding)) @@ -429,39 +422,46 @@ check_locale_and_encoding(ControlData *oldctrl, * * Best effort locale-name comparison. Return false if we are not 100% sure * the locales are equivalent. + * + * Note: The encoding parts of the names are ignored. This function is + * currently used to compare locale names stored in pg_database, and + * pg_database contains a separate encoding field. That's compared directly + * in check_locale_and_encoding(). */ static bool -equivalent_locale(const char *loca, const char *locb) +equivalent_locale(int category, const char *loca, const char *locb) { - const char *chara = strrchr(loca, '.'); - const char *charb = strrchr(locb, '.'); - int lencmp; - - /* If they don't both contain an encoding part, just do strcasecmp(). */ - if (!chara || !charb) - return (pg_strcasecmp(loca, locb) == 0); + const char *chara; + const char *charb; + char *canona; + char *canonb; + int lena; + int lenb; /* - * Compare the encoding parts. Windows tends to use code page numbers for - * the encoding part, which equivalent_encoding() won't like, so accept if - * the strings are case-insensitive equal; otherwise use - * equivalent_encoding() to compare. + * If the names are equal, the locales are equivalent. Checking this + * first avoids calling setlocale() in the common case that the names + * are equal. That's a good thing, if setlocale() is buggy, for example. */ - if (pg_strcasecmp(chara + 1, charb + 1) != 0 && - !equivalent_encoding(chara + 1, charb + 1)) - return false; + if (pg_strcasecmp(loca, locb) == 0) + return true; /* - * OK, compare the locale identifiers (e.g. en_US part of en_US.utf8). - * - * It's tempting to ignore non-alphanumeric chars here, but for now it's - * not clear that that's necessary; just do case-insensitive comparison. + * Not identical. Canonicalize both names, remove the encoding parts, + * and try again. */ - lencmp = chara - loca; - if (lencmp != charb - locb) - return false; + canona = get_canonical_locale_name(category, loca); + chara = strrchr(canona, '.'); + lena = chara ? (chara - canona) : strlen(canona); - return (pg_strncasecmp(loca, locb, lencmp) == 0); + canonb = get_canonical_locale_name(category, locb); + charb = strrchr(canonb, '.'); + lenb = charb ? (charb - canonb) : strlen(canonb); + + if (lena == lenb && pg_strncasecmp(canona, canonb, lena) == 0) + return true; + + return false; } /* @@ -530,7 +530,7 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) if ((script = fopen_priv(*analyze_script_file_name, "w")) == NULL) pg_fatal("Could not open file \"%s\": %s\n", - *analyze_script_file_name, getErrorText(errno)); + *analyze_script_file_name, getErrorText()); #ifndef WIN32 /* add shebang header */ @@ -585,7 +585,7 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) #ifndef WIN32 if (chmod(*analyze_script_file_name, S_IRWXU) != 0) pg_fatal("Could not add execute permission to file \"%s\": %s\n", - *analyze_script_file_name, getErrorText(errno)); + *analyze_script_file_name, getErrorText()); #endif if (os_info.user_specified) @@ -595,6 +595,58 @@ create_script_for_cluster_analyze(char **analyze_script_file_name) } +static void +check_proper_datallowconn(ClusterInfo *cluster) +{ + int dbnum; + PGconn *conn_template1; + PGresult *dbres; + int ntups; + int i_datname; + int i_datallowconn; + + prep_status("Checking database connection settings"); + + conn_template1 = connectToServer(cluster, "template1"); + + /* get database names */ + dbres = executeQueryOrDie(conn_template1, + "SELECT datname, datallowconn " + "FROM pg_catalog.pg_database"); + + i_datname = PQfnumber(dbres, "datname"); + i_datallowconn = PQfnumber(dbres, "datallowconn"); + + ntups = PQntuples(dbres); + for (dbnum = 0; dbnum < ntups; dbnum++) + { + char *datname = PQgetvalue(dbres, dbnum, i_datname); + char *datallowconn = PQgetvalue(dbres, dbnum, i_datallowconn); + + if (strcmp(datname, "template0") == 0) + { + /* avoid restore failure when pg_dumpall tries to create template0 */ + if (strcmp(datallowconn, "t") == 0) + pg_fatal("template0 must not allow connections, " + "i.e. its pg_database.datallowconn must be false\n"); + } + else + { + /* avoid datallowconn == false databases from being skipped on restore */ + if (strcmp(datallowconn, "f") == 0) + pg_fatal("All non-template0 databases must allow connections, " + "i.e. their pg_database.datallowconn must be true\n"); + } + } + + PQclear(dbres); + + PQfinish(conn_template1); + + check_ok(); +} + + /* * create_script_for_old_cluster_deletion() * @@ -605,17 +657,34 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) { FILE *script = NULL; int tblnum; - char old_cluster_pgdata[MAXPGPATH]; + char old_cluster_pgdata[MAXPGPATH], new_cluster_pgdata[MAXPGPATH]; *deletion_script_file_name = psprintf("delete_old_cluster.%s", SCRIPT_EXT); + strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH); + canonicalize_path(old_cluster_pgdata); + + strlcpy(new_cluster_pgdata, new_cluster.pgdata, MAXPGPATH); + canonicalize_path(new_cluster_pgdata); + + /* Some people put the new data directory inside the old one. */ + if (path_is_prefix_of_path(old_cluster_pgdata, new_cluster_pgdata)) + { + pg_log(PG_WARNING, + "\nWARNING: new data directory should not be inside the old data directory, e.g. %s\n", old_cluster_pgdata); + + /* Unlink file in case it is left over from a previous run. */ + unlink(*deletion_script_file_name); + pg_free(*deletion_script_file_name); + *deletion_script_file_name = NULL; + return; + } + /* * Some users (oddly) create tablespaces inside the cluster data * directory. We can't create a proper old cluster delete script in that * case. */ - strlcpy(old_cluster_pgdata, old_cluster.pgdata, MAXPGPATH); - canonicalize_path(old_cluster_pgdata); for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) { char old_tablespace_dir[MAXPGPATH]; @@ -636,7 +705,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) if ((script = fopen_priv(*deletion_script_file_name, "w")) == NULL) pg_fatal("Could not open file \"%s\": %s\n", - *deletion_script_file_name, getErrorText(errno)); + *deletion_script_file_name, getErrorText()); #ifndef WIN32 /* add shebang header */ @@ -644,7 +713,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) #endif /* delete old cluster's default tablespace */ - fprintf(script, RMDIR_CMD " %s\n", fix_path_separator(old_cluster.pgdata)); + fprintf(script, RMDIR_CMD " \"%s\"\n", fix_path_separator(old_cluster.pgdata)); /* delete old cluster's alternate tablespaces */ for (tblnum = 0; tblnum < os_info.num_old_tablespaces; tblnum++) @@ -666,7 +735,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) PATH_SEPARATOR); for (dbnum = 0; dbnum < old_cluster.dbarr.ndbs; dbnum++) - fprintf(script, RMDIR_CMD " %s%c%d\n", + fprintf(script, RMDIR_CMD " \"%s%c%d\"\n", fix_path_separator(os_info.old_tablespaces[tblnum]), PATH_SEPARATOR, old_cluster.dbarr.dbs[dbnum].db_oid); } @@ -678,7 +747,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) * Simply delete the tablespace directory, which might be ".old" * or a version-specific subdirectory. */ - fprintf(script, RMDIR_CMD " %s%s\n", + fprintf(script, RMDIR_CMD " \"%s%s\"\n", fix_path_separator(os_info.old_tablespaces[tblnum]), fix_path_separator(suffix_path)); pfree(suffix_path); @@ -690,7 +759,7 @@ create_script_for_old_cluster_deletion(char **deletion_script_file_name) #ifndef WIN32 if (chmod(*deletion_script_file_name, S_IRWXU) != 0) pg_fatal("Could not add execute permission to file \"%s\": %s\n", - *deletion_script_file_name, getErrorText(errno)); + *deletion_script_file_name, getErrorText()); #endif check_ok(); @@ -826,7 +895,7 @@ check_for_isn_and_int8_passing_mismatch(ClusterInfo *cluster) found = true; if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); + output_path, getErrorText()); if (!db_used) { fprintf(script, "Database: %s\n", active_db->db_name); @@ -929,7 +998,7 @@ check_for_reg_data_type_usage(ClusterInfo *cluster) found = true; if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); + output_path, getErrorText()); if (!db_used) { fprintf(script, "Database: %s\n", active_db->db_name); @@ -964,6 +1033,96 @@ check_for_reg_data_type_usage(ClusterInfo *cluster) } +/* + * check_for_jsonb_9_4_usage() + * + * JSONB changed its storage format during 9.4 beta, so check for it. + */ +static void +check_for_jsonb_9_4_usage(ClusterInfo *cluster) +{ + int dbnum; + FILE *script = NULL; + bool found = false; + char output_path[MAXPGPATH]; + + prep_status("Checking for JSONB user data types"); + + snprintf(output_path, sizeof(output_path), "tables_using_jsonb.txt"); + + for (dbnum = 0; dbnum < cluster->dbarr.ndbs; dbnum++) + { + PGresult *res; + bool db_used = false; + int ntups; + int rowno; + int i_nspname, + i_relname, + i_attname; + DbInfo *active_db = &cluster->dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(cluster, active_db->db_name); + + /* + * While several relkinds don't store any data, e.g. views, they can + * be used to define data types of other columns, so we check all + * relkinds. + */ + res = executeQueryOrDie(conn, + "SELECT n.nspname, c.relname, a.attname " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n, " + " pg_catalog.pg_attribute a " + "WHERE c.oid = a.attrelid AND " + " NOT a.attisdropped AND " + " a.atttypid = 'pg_catalog.jsonb'::pg_catalog.regtype AND " + " c.relnamespace = n.oid AND " + /* exclude possible orphaned temp tables */ + " n.nspname !~ '^pg_temp_' AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema')"); + + ntups = PQntuples(res); + i_nspname = PQfnumber(res, "nspname"); + i_relname = PQfnumber(res, "relname"); + i_attname = PQfnumber(res, "attname"); + for (rowno = 0; rowno < ntups; rowno++) + { + found = true; + if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) + pg_fatal("Could not open file \"%s\": %s\n", + output_path, getErrorText()); + if (!db_used) + { + fprintf(script, "Database: %s\n", active_db->db_name); + db_used = true; + } + fprintf(script, " %s.%s.%s\n", + PQgetvalue(res, rowno, i_nspname), + PQgetvalue(res, rowno, i_relname), + PQgetvalue(res, rowno, i_attname)); + } + + PQclear(res); + + PQfinish(conn); + } + + if (script) + fclose(script); + + if (found) + { + pg_log(PG_REPORT, "fatal\n"); + pg_fatal("Your installation contains one of the JSONB data types in user tables.\n" + "The internal format of JSONB changed during 9.4 beta so this cluster cannot currently\n" + "be upgraded. You can remove the problem tables and restart the upgrade. A list\n" + "of the problem columns is in the file:\n" + " %s\n\n", output_path); + } + else + check_ok(); +} + + static void get_bin_version(ClusterInfo *cluster) { @@ -978,7 +1137,7 @@ get_bin_version(ClusterInfo *cluster) if ((output = popen(cmd, "r")) == NULL || fgets(cmd_output, sizeof(cmd_output), output) == NULL) pg_fatal("Could not get pg_ctl version data using %s: %s\n", - cmd, getErrorText(errno)); + cmd, getErrorText()); pclose(output); diff --git a/contrib/pg_upgrade/controldata.c b/contrib/pg_upgrade/controldata.c index 2906ccbf8c8f7..400549c9bfcdc 100644 --- a/contrib/pg_upgrade/controldata.c +++ b/contrib/pg_upgrade/controldata.c @@ -119,7 +119,7 @@ get_control_data(ClusterInfo *cluster, bool live_check) if ((output = popen(cmd, "r")) == NULL) pg_fatal("Could not get control data using %s: %s\n", - cmd, getErrorText(errno)); + cmd, getErrorText()); /* Only pre-8.4 has these so if they are not set below we will check later */ cluster->controldata.lc_collate = NULL; @@ -154,7 +154,7 @@ get_control_data(ClusterInfo *cluster, bool live_check) if (GET_MAJOR_VERSION(cluster->major_version) <= 803) { for (p = bufin; *p; p++) - if (!isascii(*p)) + if (!isascii((unsigned char) *p)) pg_fatal("The 8.3 cluster's pg_controldata is incapable of outputting ASCII, even\n" "with LANG=C. You must upgrade this cluster to a newer version of PostgreSQL\n" "8.3 to fix this bug. PostgreSQL 8.3.7 and later are known to work properly.\n"); @@ -228,21 +228,25 @@ get_control_data(ClusterInfo *cluster, bool live_check) pg_fatal("%d: controldata retrieval problem\n", __LINE__); p++; /* removing ':' char */ - cluster->controldata.chkpnt_tli = str2uint(p); + tli = str2uint(p); got_tli = true; } else if ((p = strstr(bufin, "Latest checkpoint's NextXID:")) != NULL) { - char *op = strchr(p, '/'); + p = strchr(p, ':'); - if (op == NULL) - op = strchr(p, ':'); + if (p == NULL || strlen(p) <= 1) + pg_fatal("%d: controldata retrieval problem\n", __LINE__); - if (op == NULL || strlen(op) <= 1) + p++; /* removing ':' char */ + cluster->controldata.chkpnt_nxtepoch = str2uint(p); + + p = strchr(p, '/'); + if (p == NULL || strlen(p) <= 1) pg_fatal("%d: controldata retrieval problem\n", __LINE__); - op++; /* removing ':' char */ - cluster->controldata.chkpnt_nxtxid = str2uint(op); + p++; /* removing '/' char */ + cluster->controldata.chkpnt_nxtxid = str2uint(p); got_xid = true; } else if ((p = strstr(bufin, "Latest checkpoint's NextOID:")) != NULL) @@ -474,11 +478,11 @@ get_control_data(ClusterInfo *cluster, bool live_check) * Before 9.3, pg_resetxlog reported the xlogid and segno of the first log * file after reset as separate lines. Starting with 9.3, it reports the * WAL file name. If the old cluster is older than 9.3, we construct the - * WAL file name from the xlogid and segno. + * WAL file name from the tli, xlogid, and segno. */ if (GET_MAJOR_VERSION(cluster->major_version) <= 902) { - if (got_log_id && got_log_seg) + if (got_tli && got_log_id && got_log_seg) { snprintf(cluster->controldata.nextxlogfile, 25, "%08X%08X%08X", tli, logid, segno); @@ -492,7 +496,6 @@ get_control_data(ClusterInfo *cluster, bool live_check) (!got_oldestmulti && cluster->controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) || (!live_check && !got_nextxlogfile) || - !got_tli || !got_align || !got_blocksz || !got_largesz || !got_walsz || !got_walseg || !got_ident || !got_index || !got_toast || !got_date_is_int || !got_float8_pass_by_value || !got_data_checksum_version) @@ -520,9 +523,6 @@ get_control_data(ClusterInfo *cluster, bool live_check) if (!live_check && !got_nextxlogfile) pg_log(PG_REPORT, " first WAL segment after reset\n"); - if (!got_tli) - pg_log(PG_REPORT, " latest checkpoint timeline ID\n"); - if (!got_align) pg_log(PG_REPORT, " maximum alignment\n"); @@ -580,7 +580,7 @@ check_control_data(ControlData *oldctrl, pg_fatal("old and new pg_controldata block sizes are invalid or do not match\n"); if (oldctrl->largesz == 0 || oldctrl->largesz != newctrl->largesz) - pg_fatal("old and new pg_controldata maximum relation segement sizes are invalid or do not match\n"); + pg_fatal("old and new pg_controldata maximum relation segment sizes are invalid or do not match\n"); if (oldctrl->walsz == 0 || oldctrl->walsz != newctrl->walsz) pg_fatal("old and new pg_controldata WAL block sizes are invalid or do not match\n"); diff --git a/contrib/pg_upgrade/dump.c b/contrib/pg_upgrade/dump.c index 6c7661049c7be..e623a22632772 100644 --- a/contrib/pg_upgrade/dump.c +++ b/contrib/pg_upgrade/dump.c @@ -12,6 +12,8 @@ #include "pg_upgrade.h" #include +#include "catalog/binary_upgrade.h" + void generate_old_dump(void) @@ -23,8 +25,8 @@ generate_old_dump(void) /* run new pg_dumpall binary for globals */ exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_dumpall\" %s --schema-only --globals-only " - "--quote-all-identifiers --binary-upgrade %s -f %s", + "\"%s/pg_dumpall\" %s --globals-only --quote-all-identifiers " + "--binary-upgrade %s -f %s", new_cluster.bindir, cluster_conn_opts(&old_cluster), log_opts.verbose ? "--verbose" : "", GLOBALS_DUMP_FILE); @@ -67,3 +69,71 @@ generate_old_dump(void) end_progress_output(); check_ok(); } + + +/* + * It is possible for there to be a mismatch in the need for TOAST tables + * between the old and new servers, e.g. some pre-9.1 tables didn't need + * TOAST tables but will need them in 9.1+. (There are also opposite cases, + * but these are handled by setting binary_upgrade_next_toast_pg_class_oid.) + * + * We can't allow the TOAST table to be created by pg_dump with a + * pg_dump-assigned oid because it might conflict with a later table that + * uses that oid, causing a "file exists" error for pg_class conflicts, and + * a "duplicate oid" error for pg_type conflicts. (TOAST tables need pg_type + * entries.) + * + * Therefore, a backend in binary-upgrade mode will not create a TOAST + * table unless an OID as passed in via pg_upgrade_support functions. + * This function is called after the restore and uses ALTER TABLE to + * auto-create any needed TOAST tables which will not conflict with + * restored oids. + */ +void +optionally_create_toast_tables(void) +{ + int dbnum; + + prep_status("Creating newly-required TOAST tables"); + + for (dbnum = 0; dbnum < new_cluster.dbarr.ndbs; dbnum++) + { + PGresult *res; + int ntups; + int rowno; + int i_nspname, + i_relname; + DbInfo *active_db = &new_cluster.dbarr.dbs[dbnum]; + PGconn *conn = connectToServer(&new_cluster, active_db->db_name); + + res = executeQueryOrDie(conn, + "SELECT n.nspname, c.relname " + "FROM pg_catalog.pg_class c, " + " pg_catalog.pg_namespace n " + "WHERE c.relnamespace = n.oid AND " + " n.nspname NOT IN ('pg_catalog', 'information_schema') AND " + "c.relkind IN ('r', 'm') AND " + "c.reltoastrelid = 0"); + + ntups = PQntuples(res); + i_nspname = PQfnumber(res, "nspname"); + i_relname = PQfnumber(res, "relname"); + for (rowno = 0; rowno < ntups; rowno++) + { + /* enable auto-oid-numbered TOAST creation if needed */ + PQclear(executeQueryOrDie(conn, "SELECT binary_upgrade.set_next_toast_pg_class_oid('%d'::pg_catalog.oid);", + OPTIONALLY_CREATE_TOAST_OID)); + + /* dummy command that also triggers check for required TOAST table */ + PQclear(executeQueryOrDie(conn, "ALTER TABLE %s.%s RESET (binary_upgrade_dummy_option);", + quote_identifier(PQgetvalue(res, rowno, i_nspname)), + quote_identifier(PQgetvalue(res, rowno, i_relname)))); + } + + PQclear(res); + + PQfinish(conn); + } + + check_ok(); +} diff --git a/contrib/pg_upgrade/exec.c b/contrib/pg_upgrade/exec.c index 6c217c902d86c..b38d801b5c17b 100644 --- a/contrib/pg_upgrade/exec.c +++ b/contrib/pg_upgrade/exec.c @@ -191,7 +191,7 @@ pid_lock_file_exists(const char *datadir) /* ENOTDIR means we will throw a more useful error later */ if (errno != ENOENT && errno != ENOTDIR) pg_fatal("could not open file \"%s\" for reading: %s\n", - path, getErrorText(errno)); + path, getErrorText()); return false; } @@ -285,7 +285,7 @@ check_data_dir(const char *pg_data) if (stat(subDirName, &statBuf) != 0) report_status(PG_FATAL, "check for \"%s\" failed: %s\n", - subDirName, getErrorText(errno)); + subDirName, getErrorText()); else if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, "%s is not a directory\n", subDirName); @@ -309,7 +309,7 @@ check_bin_dir(ClusterInfo *cluster) /* check bindir */ if (stat(cluster->bindir, &statBuf) != 0) report_status(PG_FATAL, "check for \"%s\" failed: %s\n", - cluster->bindir, getErrorText(errno)); + cluster->bindir, getErrorText()); else if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, "%s is not a directory\n", cluster->bindir); @@ -321,6 +321,7 @@ check_bin_dir(ClusterInfo *cluster) { /* these are only needed in the new cluster */ validate_exec(cluster->bindir, "psql"); + validate_exec(cluster->bindir, "pg_dump"); validate_exec(cluster->bindir, "pg_dumpall"); } } @@ -351,7 +352,7 @@ validate_exec(const char *dir, const char *cmdName) */ if (stat(path, &buf) < 0) pg_fatal("check for \"%s\" failed: %s\n", - path, getErrorText(errno)); + path, getErrorText()); else if (!S_ISREG(buf.st_mode)) pg_fatal("check for \"%s\" failed: not an executable file\n", path); diff --git a/contrib/pg_upgrade/file.c b/contrib/pg_upgrade/file.c index ab9d1edcb6d86..e2f8f3d2d1f5a 100644 --- a/contrib/pg_upgrade/file.c +++ b/contrib/pg_upgrade/file.c @@ -34,8 +34,12 @@ copyAndUpdateFile(pageCnvCtx *pageConverter, { if (pageConverter == NULL) { - if (pg_copy_file(src, dst, force) == -1) - return getErrorText(errno); +#ifndef WIN32 + if (copy_file(src, dst, force) == -1) +#else + if (CopyFile(src, dst, !force) == 0) +#endif + return getErrorText(); else return NULL; } @@ -117,7 +121,7 @@ linkAndUpdateFile(pageCnvCtx *pageConverter, return "Cannot in-place update this cluster, page-by-page conversion is required"; if (pg_link_file(src, dst) == -1) - return getErrorText(errno); + return getErrorText(); else return NULL; } @@ -215,7 +219,7 @@ check_hard_link(void) { pg_fatal("Could not create hard link between old and new data directories: %s\n" "In link mode the old and new data directories must be on the same file system volume.\n", - getErrorText(errno)); + getErrorText()); } unlink(new_link_file); } diff --git a/contrib/pg_upgrade/function.c b/contrib/pg_upgrade/function.c index f2cd4716c7295..f6e11a069953b 100644 --- a/contrib/pg_upgrade/function.c +++ b/contrib/pg_upgrade/function.c @@ -161,7 +161,7 @@ get_loadable_libraries(void) /* * Systems that install plpython before 8.1 have * plpython_call_handler() defined in the "public" schema, causing - * pg_dumpall to dump it. However that function still references + * pg_dump to dump it. However that function still references * "plpython" (no "2"), so it throws an error on restore. This code * checks for the problem function, reports affected databases to the * user and explains how to remove them. 8.1 git commit: @@ -327,7 +327,7 @@ check_loadable_libraries(void) if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); + output_path, getErrorText()); fprintf(script, "Could not load library \"%s\"\n%s\n", lib, PQerrorMessage(conn)); diff --git a/contrib/pg_upgrade/info.c b/contrib/pg_upgrade/info.c index d2968b479a930..0a8467294dc6b 100644 --- a/contrib/pg_upgrade/info.c +++ b/contrib/pg_upgrade/info.c @@ -38,21 +38,61 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, int *nmaps, const char *old_pgdata, const char *new_pgdata) { FileNameMap *maps; - int relnum; + int old_relnum, new_relnum; int num_maps = 0; maps = (FileNameMap *) pg_malloc(sizeof(FileNameMap) * old_db->rel_arr.nrels); - for (relnum = 0; relnum < Min(old_db->rel_arr.nrels, new_db->rel_arr.nrels); - relnum++) + /* + * The old database shouldn't have more relations than the new one. + * We force the new cluster to have a TOAST table if the old table + * had one. + */ + if (old_db->rel_arr.nrels > new_db->rel_arr.nrels) + pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", + old_db->db_name); + + /* Drive the loop using new_relnum, which might be higher. */ + for (old_relnum = new_relnum = 0; new_relnum < new_db->rel_arr.nrels; + new_relnum++) { - RelInfo *old_rel = &old_db->rel_arr.rels[relnum]; - RelInfo *new_rel = &new_db->rel_arr.rels[relnum]; + RelInfo *old_rel; + RelInfo *new_rel = &new_db->rel_arr.rels[new_relnum]; + + /* + * It is possible that the new cluster has a TOAST table for a table + * that didn't need one in the old cluster, e.g. 9.0 to 9.1 changed the + * NUMERIC length computation. Therefore, if we have a TOAST table + * in the new cluster that doesn't match, skip over it and continue + * processing. It is possible this TOAST table used an OID that was + * reserved in the old cluster, but we have no way of testing that, + * and we would have already gotten an error at the new cluster schema + * creation stage. Fortunately, since we only restore the OID counter + * after schema restore, and restore in OID order via pg_dump, a + * conflict would only happen if the new TOAST table had a very low + * OID. However, TOAST tables created long after initial table + * creation can have any OID, particularly after OID wraparound. + */ + if (old_relnum == old_db->rel_arr.nrels) + { + if (strcmp(new_rel->nspname, "pg_toast") == 0) + continue; + else + pg_fatal("Extra non-TOAST relation found in database \"%s\": new OID %d\n", + old_db->db_name, new_rel->reloid); + } + + old_rel = &old_db->rel_arr.rels[old_relnum]; if (old_rel->reloid != new_rel->reloid) - pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n", - old_db->db_name, old_rel->reloid, new_rel->reloid); + { + if (strcmp(new_rel->nspname, "pg_toast") == 0) + continue; + else + pg_fatal("Mismatch of relation OID in database \"%s\": old OID %d, new OID %d\n", + old_db->db_name, old_rel->reloid, new_rel->reloid); + } /* * TOAST table names initially match the heap pg_class oid. In @@ -76,14 +116,12 @@ gen_db_file_maps(DbInfo *old_db, DbInfo *new_db, create_rel_filename_map(old_pgdata, new_pgdata, old_db, new_db, old_rel, new_rel, maps + num_maps); num_maps++; + old_relnum++; } - /* - * Do this check after the loop so hopefully we will produce a clearer - * error above - */ - if (old_db->rel_arr.nrels != new_db->rel_arr.nrels) - pg_fatal("old and new databases \"%s\" have a different number of relations\n", + /* Did we fail to exhaust the old array? */ + if (old_relnum != old_db->rel_arr.nrels) + pg_fatal("old and new databases \"%s\" have a mismatched number of relations\n", old_db->db_name); *nmaps = num_maps; @@ -102,6 +140,7 @@ create_rel_filename_map(const char *old_data, const char *new_data, const RelInfo *old_rel, const RelInfo *new_rel, FileNameMap *map) { + /* In case old/new tablespaces don't match, do them separately. */ if (strlen(old_rel->tablespace) == 0) { /* @@ -109,16 +148,24 @@ create_rel_filename_map(const char *old_data, const char *new_data, * exist in the data directories. */ map->old_tablespace = old_data; - map->new_tablespace = new_data; map->old_tablespace_suffix = "/base"; - map->new_tablespace_suffix = "/base"; } else { /* relation belongs to a tablespace, so use the tablespace location */ map->old_tablespace = old_rel->tablespace; - map->new_tablespace = new_rel->tablespace; map->old_tablespace_suffix = old_cluster.tablespace_suffix; + } + + /* Do the same for new tablespaces */ + if (strlen(new_rel->tablespace) == 0) + { + map->new_tablespace = new_data; + map->new_tablespace_suffix = "/base"; + } + else + { + map->new_tablespace = new_rel->tablespace; map->new_tablespace_suffix = new_cluster.tablespace_suffix; } @@ -274,7 +321,7 @@ get_rel_infos(ClusterInfo *cluster, DbInfo *dbinfo) *last_tablespace = NULL; /* - * pg_largeobject contains user data that does not appear in pg_dumpall + * pg_largeobject contains user data that does not appear in pg_dump * --schema-only output, so we have to copy that system table heap and * index. We could grab the pg_largeobject oids from template1, but it is * easy to treat it as a normal table. Order by oid so we can join old/new diff --git a/contrib/pg_upgrade/option.c b/contrib/pg_upgrade/option.c index b81010a813083..a608036cf446e 100644 --- a/contrib/pg_upgrade/option.c +++ b/contrib/pg_upgrade/option.c @@ -240,7 +240,7 @@ usage(void) pg_upgrade [OPTION]...\n\ \n\ Options:\n\ - -b, --old-bindir=BINDIR old cluster executable directory\n\ + -b, --old-bindir=BINDIR old cluster executable directory\n\ -B, --new-bindir=BINDIR new cluster executable directory\n\ -c, --check check clusters only, don't change any data\n\ -d, --old-datadir=DATADIR old cluster data directory\n\ @@ -383,7 +383,7 @@ adjust_data_dir(ClusterInfo *cluster) if ((output = popen(cmd, "r")) == NULL || fgets(cmd_output, sizeof(cmd_output), output) == NULL) pg_fatal("Could not get data directory using %s: %s\n", - cmd, getErrorText(errno)); + cmd, getErrorText()); pclose(output); diff --git a/contrib/pg_upgrade/pg_upgrade.c b/contrib/pg_upgrade/pg_upgrade.c index 773bb07e04eb9..9d5ac03a2277e 100644 --- a/contrib/pg_upgrade/pg_upgrade.c +++ b/contrib/pg_upgrade/pg_upgrade.c @@ -46,9 +46,14 @@ static void prepare_new_cluster(void); static void prepare_new_databases(void); static void create_new_objects(void); static void copy_clog_xlog_xid(void); -static void set_frozenxids(void); +static void set_frozenxids(bool minmxid_only); static void setup(char *argv0, bool *live_check); static void cleanup(void); +static void get_restricted_token(const char *progname); + +#ifdef WIN32 +static int CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, const char *progname); +#endif ClusterInfo old_cluster, new_cluster; @@ -65,6 +70,9 @@ char *output_files[] = { NULL }; +#ifdef WIN32 +static char *restrict_env; +#endif int main(int argc, char **argv) @@ -76,6 +84,8 @@ main(int argc, char **argv) parseCommandLine(argc, argv); + get_restricted_token(os_info.progname); + adjust_data_dir(&old_cluster); adjust_data_dir(&new_cluster); @@ -174,6 +184,162 @@ main(int argc, char **argv) return 0; } +#ifdef WIN32 +typedef BOOL(WINAPI * __CreateRestrictedToken) (HANDLE, DWORD, DWORD, PSID_AND_ATTRIBUTES, DWORD, PLUID_AND_ATTRIBUTES, DWORD, PSID_AND_ATTRIBUTES, PHANDLE); + +/* Windows API define missing from some versions of MingW headers */ +#ifndef DISABLE_MAX_PRIVILEGE +#define DISABLE_MAX_PRIVILEGE 0x1 +#endif + +/* +* Create a restricted token and execute the specified process with it. +* +* Returns 0 on failure, non-zero on success, same as CreateProcess(). +* +* On NT4, or any other system not containing the required functions, will +* NOT execute anything. +*/ +static int +CreateRestrictedProcess(char *cmd, PROCESS_INFORMATION *processInfo, const char *progname) +{ + BOOL b; + STARTUPINFO si; + HANDLE origToken; + HANDLE restrictedToken; + SID_IDENTIFIER_AUTHORITY NtAuthority = { SECURITY_NT_AUTHORITY }; + SID_AND_ATTRIBUTES dropSids[2]; + __CreateRestrictedToken _CreateRestrictedToken = NULL; + HANDLE Advapi32Handle; + + ZeroMemory(&si, sizeof(si)); + si.cb = sizeof(si); + + Advapi32Handle = LoadLibrary("ADVAPI32.DLL"); + if (Advapi32Handle != NULL) + { + _CreateRestrictedToken = (__CreateRestrictedToken)GetProcAddress(Advapi32Handle, "CreateRestrictedToken"); + } + + if (_CreateRestrictedToken == NULL) + { + fprintf(stderr, _("%s: WARNING: cannot create restricted tokens on this platform\n"), progname); + if (Advapi32Handle != NULL) + FreeLibrary(Advapi32Handle); + return 0; + } + + /* Open the current token to use as a base for the restricted one */ + if (!OpenProcessToken(GetCurrentProcess(), TOKEN_ALL_ACCESS, &origToken)) + { + fprintf(stderr, _("%s: could not open process token: error code %lu\n"), progname, GetLastError()); + return 0; + } + + /* Allocate list of SIDs to remove */ + ZeroMemory(&dropSids, sizeof(dropSids)); + if (!AllocateAndInitializeSid(&NtAuthority, 2, + SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_ADMINS, 0, 0, 0, 0, 0, + 0, &dropSids[0].Sid) || + !AllocateAndInitializeSid(&NtAuthority, 2, + SECURITY_BUILTIN_DOMAIN_RID, DOMAIN_ALIAS_RID_POWER_USERS, 0, 0, 0, 0, 0, + 0, &dropSids[1].Sid)) + { + fprintf(stderr, _("%s: could not allocate SIDs: error code %lu\n"), progname, GetLastError()); + return 0; + } + + b = _CreateRestrictedToken(origToken, + DISABLE_MAX_PRIVILEGE, + sizeof(dropSids) / sizeof(dropSids[0]), + dropSids, + 0, NULL, + 0, NULL, + &restrictedToken); + + FreeSid(dropSids[1].Sid); + FreeSid(dropSids[0].Sid); + CloseHandle(origToken); + FreeLibrary(Advapi32Handle); + + if (!b) + { + fprintf(stderr, _("%s: could not create restricted token: error code %lu\n"), progname, GetLastError()); + return 0; + } + +#ifndef __CYGWIN__ + AddUserToTokenDacl(restrictedToken); +#endif + + if (!CreateProcessAsUser(restrictedToken, + NULL, + cmd, + NULL, + NULL, + TRUE, + CREATE_SUSPENDED, + NULL, + NULL, + &si, + processInfo)) + + { + fprintf(stderr, _("%s: could not start process for command \"%s\": error code %lu\n"), progname, cmd, GetLastError()); + return 0; + } + + return ResumeThread(processInfo->hThread); +} +#endif + +static void +get_restricted_token(const char *progname) +{ +#ifdef WIN32 + + /* + * Before we execute another program, make sure that we are running with a + * restricted token. If not, re-execute ourselves with one. + */ + + if ((restrict_env = getenv("PG_RESTRICT_EXEC")) == NULL + || strcmp(restrict_env, "1") != 0) + { + PROCESS_INFORMATION pi; + char *cmdline; + + ZeroMemory(&pi, sizeof(pi)); + + cmdline = pg_strdup(GetCommandLine()); + + putenv("PG_RESTRICT_EXEC=1"); + + if (!CreateRestrictedProcess(cmdline, &pi, progname)) + { + fprintf(stderr, _("%s: could not re-execute with restricted token: error code %lu\n"), progname, GetLastError()); + } + else + { + /* + * Successfully re-execed. Now wait for child process to capture + * exitcode. + */ + DWORD x; + + CloseHandle(pi.hThread); + WaitForSingleObject(pi.hProcess, INFINITE); + + if (!GetExitCodeProcess(pi.hProcess, &x)) + { + fprintf(stderr, _("%s: could not get exit code from subprocess: error code %lu\n"), progname, GetLastError()); + exit(1); + } + exit(x); + } + } +#endif +} static void setup(char *argv0, bool *live_check) @@ -223,7 +389,7 @@ setup(char *argv0, bool *live_check) /* get path to pg_upgrade executable */ if (find_my_exec(argv0, exec_path) < 0) - pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText(errno)); + pg_fatal("Could not get path name to pg_upgrade: %s\n", getErrorText()); /* Trim off program name and keep just path */ *last_dir_separator(exec_path) = '\0'; @@ -250,8 +416,8 @@ prepare_new_cluster(void) /* * We do freeze after analyze so pg_statistic is also frozen. template0 is * not frozen here, but data rows were frozen by initdb, and we set its - * datfrozenxid and relfrozenxids later to match the new xid counter - * later. + * datfrozenxid, relfrozenxids, and relminmxid later to match the new xid + * counter later. */ prep_status("Freezing all rows on the new cluster"); exec_prog(UTILITY_LOG_FILE, NULL, true, @@ -273,7 +439,7 @@ prepare_new_databases(void) * set. */ - set_frozenxids(); + set_frozenxids(false); prep_status("Restoring global objects in the new cluster"); @@ -356,6 +522,15 @@ create_new_objects(void) end_progress_output(); check_ok(); + /* + * We don't have minmxids for databases or relations in pre-9.3 + * clusters, so set those after we have restores the schemas. + */ + if (GET_MAJOR_VERSION(old_cluster.major_version) < 903) + set_frozenxids(true); + + optionally_create_toast_tables(); + /* regenerate now that we have objects in the databases */ get_db_and_rel_infos(&new_cluster); @@ -363,22 +538,35 @@ create_new_objects(void) } /* - * Delete the given subdirectory contents from the new cluster, and copy the - * files from the old cluster into it. + * Delete the given subdirectory contents from the new cluster */ static void -copy_subdir_files(char *subdir) +remove_new_subdir(char *subdir, bool rmtopdir) { - char old_path[MAXPGPATH]; char new_path[MAXPGPATH]; prep_status("Deleting files from new %s", subdir); - snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir); snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); - if (!rmtree(new_path, true)) + if (!rmtree(new_path, rmtopdir)) pg_fatal("could not delete directory \"%s\"\n", new_path); + check_ok(); +} + +/* + * Copy the files from the old cluster into it + */ +static void +copy_subdir_files(char *subdir) +{ + char old_path[MAXPGPATH]; + char new_path[MAXPGPATH]; + + remove_new_subdir(subdir, true); + + snprintf(old_path, sizeof(old_path), "%s/%s", old_cluster.pgdata, subdir); + snprintf(new_path, sizeof(new_path), "%s/%s", new_cluster.pgdata, subdir); prep_status("Copying old %s to new server", subdir); @@ -400,12 +588,16 @@ copy_clog_xlog_xid(void) /* copy old commit logs to new data dir */ copy_subdir_files("pg_clog"); - /* set the next transaction id of the new cluster */ - prep_status("Setting next transaction ID for new cluster"); + /* set the next transaction id and epoch of the new cluster */ + prep_status("Setting next transaction ID and epoch for new cluster"); exec_prog(UTILITY_LOG_FILE, NULL, true, "\"%s/pg_resetxlog\" -f -x %u \"%s\"", new_cluster.bindir, old_cluster.controldata.chkpnt_nxtxid, new_cluster.pgdata); + exec_prog(UTILITY_LOG_FILE, NULL, true, + "\"%s/pg_resetxlog\" -f -e %u \"%s\"", + new_cluster.bindir, old_cluster.controldata.chkpnt_nxtepoch, + new_cluster.pgdata); check_ok(); /* @@ -419,6 +611,7 @@ copy_clog_xlog_xid(void) { copy_subdir_files("pg_multixact/offsets"); copy_subdir_files("pg_multixact/members"); + prep_status("Setting next multixact ID and offset for new cluster"); /* @@ -436,6 +629,13 @@ copy_clog_xlog_xid(void) } else if (new_cluster.controldata.cat_ver >= MULTIXACT_FORMATCHANGE_CAT_VER) { + /* + * Remove offsets/0000 file created by initdb that no longer matches + * the new multi-xid value. "members" starts at zero so no need to + * remove it. + */ + remove_new_subdir("pg_multixact/offsets", false); + prep_status("Setting oldest multixact ID on new cluster"); /* @@ -458,8 +658,9 @@ copy_clog_xlog_xid(void) /* now reset the wal archives in the new cluster */ prep_status("Resetting WAL archives"); exec_prog(UTILITY_LOG_FILE, NULL, true, - "\"%s/pg_resetxlog\" -l %s \"%s\"", new_cluster.bindir, - old_cluster.controldata.nextxlogfile, + /* use timeline 1 to match controldata and no WAL history file */ + "\"%s/pg_resetxlog\" -l 00000001%s \"%s\"", new_cluster.bindir, + old_cluster.controldata.nextxlogfile + 8, new_cluster.pgdata); check_ok(); } @@ -468,15 +669,15 @@ copy_clog_xlog_xid(void) /* * set_frozenxids() * - * We have frozen all xids, so set relfrozenxid and datfrozenxid - * to be the old cluster's xid counter, which we just set in the new - * cluster. User-table frozenxid values will be set by pg_dumpall - * --binary-upgrade, but objects not set by the pg_dump must have - * proper frozen counters. + * We have frozen all xids, so set datfrozenxid, relfrozenxid, and + * relminmxid to be the old cluster's xid counter, which we just set + * in the new cluster. User-table frozenxid and minmxid values will + * be set by pg_dump --binary-upgrade, but objects not set by the pg_dump + * must have proper frozen counters. */ static void -set_frozenxids(void) +set_frozenxids(bool minmxid_only) { int dbnum; PGconn *conn, @@ -486,15 +687,25 @@ set_frozenxids(void) int i_datname; int i_datallowconn; - prep_status("Setting frozenxid counters in new cluster"); + if (!minmxid_only) + prep_status("Setting frozenxid and minmxid counters in new cluster"); + else + prep_status("Setting minmxid counter in new cluster"); conn_template1 = connectToServer(&new_cluster, "template1"); - /* set pg_database.datfrozenxid */ + if (!minmxid_only) + /* set pg_database.datfrozenxid */ + PQclear(executeQueryOrDie(conn_template1, + "UPDATE pg_catalog.pg_database " + "SET datfrozenxid = '%u'", + old_cluster.controldata.chkpnt_nxtxid)); + + /* set pg_database.datminmxid */ PQclear(executeQueryOrDie(conn_template1, "UPDATE pg_catalog.pg_database " - "SET datfrozenxid = '%u'", - old_cluster.controldata.chkpnt_nxtxid)); + "SET datminmxid = '%u'", + old_cluster.controldata.chkpnt_nxtmulti)); /* get database names */ dbres = executeQueryOrDie(conn_template1, @@ -512,10 +723,10 @@ set_frozenxids(void) /* * We must update databases where datallowconn = false, e.g. - * template0, because autovacuum increments their datfrozenxids and - * relfrozenxids even if autovacuum is turned off, and even though all - * the data rows are already frozen To enable this, we temporarily - * change datallowconn. + * template0, because autovacuum increments their datfrozenxids, + * relfrozenxids, and relminmxid even if autovacuum is turned off, + * and even though all the data rows are already frozen To enable + * this, we temporarily change datallowconn. */ if (strcmp(datallowconn, "f") == 0) PQclear(executeQueryOrDie(conn_template1, @@ -525,13 +736,22 @@ set_frozenxids(void) conn = connectToServer(&new_cluster, datname); - /* set pg_class.relfrozenxid */ + if (!minmxid_only) + /* set pg_class.relfrozenxid */ + PQclear(executeQueryOrDie(conn, + "UPDATE pg_catalog.pg_class " + "SET relfrozenxid = '%u' " + /* only heap, materialized view, and TOAST are vacuumed */ + "WHERE relkind IN ('r', 'm', 't')", + old_cluster.controldata.chkpnt_nxtxid)); + + /* set pg_class.relminmxid */ PQclear(executeQueryOrDie(conn, "UPDATE pg_catalog.pg_class " - "SET relfrozenxid = '%u' " + "SET relminmxid = '%u' " /* only heap, materialized view, and TOAST are vacuumed */ "WHERE relkind IN ('r', 'm', 't')", - old_cluster.controldata.chkpnt_nxtxid)); + old_cluster.controldata.chkpnt_nxtmulti)); PQfinish(conn); /* Reset datallowconn flag */ diff --git a/contrib/pg_upgrade/pg_upgrade.h b/contrib/pg_upgrade/pg_upgrade.h index 0410b02293d65..e7c0f637c4a04 100644 --- a/contrib/pg_upgrade/pg_upgrade.h +++ b/contrib/pg_upgrade/pg_upgrade.h @@ -70,7 +70,6 @@ extern char *output_files[]; #ifndef WIN32 -#define pg_copy_file copy_file #define pg_mv_file rename #define pg_link_file link #define PATH_SEPARATOR '/' @@ -80,7 +79,6 @@ extern char *output_files[]; #define ECHO_QUOTE "'" #define ECHO_BLANK "" #else -#define pg_copy_file CopyFile #define pg_mv_file pgrename #define pg_link_file win32_pghardlink #define PATH_SEPARATOR '\\' @@ -107,6 +105,11 @@ extern char *output_files[]; */ #define VISIBILITY_MAP_CRASHSAFE_CAT_VER 201107031 +/* + * change in JSONB format during 9.4 beta + */ +#define JSONB_FORMAT_CHANGE_CAT_VER 201409291 + /* * pg_multixact format changed in 9.3 commit 0ac5ad5134f2769ccbaefec73844f85, * ("Improve concurrency of foreign key locking") which also updated catalog @@ -188,8 +191,8 @@ typedef struct uint32 ctrl_ver; uint32 cat_ver; char nextxlogfile[25]; - uint32 chkpnt_tli; uint32 chkpnt_nxtxid; + uint32 chkpnt_nxtepoch; uint32 chkpnt_nxtoid; uint32 chkpnt_nxtmulti; uint32 chkpnt_nxtmxoff; @@ -338,6 +341,7 @@ void disable_old_cluster(void); /* dump.c */ void generate_old_dump(void); +void optionally_create_toast_tables(void); /* exec.c */ @@ -461,7 +465,7 @@ void prep_status(const char *fmt,...) __attribute__((format(PG_PRINTF_ATTRIBUTE, 1, 2))); void check_ok(void); -const char *getErrorText(int errNum); +const char *getErrorText(void); unsigned int str2uint(const char *str); void pg_putenv(const char *var, const char *val); diff --git a/contrib/pg_upgrade/relfilenode.c b/contrib/pg_upgrade/relfilenode.c index aa6aafde5e97d..05c375e0c38c4 100644 --- a/contrib/pg_upgrade/relfilenode.c +++ b/contrib/pg_upgrade/relfilenode.c @@ -260,7 +260,7 @@ transfer_relfile(pageCnvCtx *pageConverter, FileNameMap *map, else pg_fatal("error while checking for file existence \"%s.%s\" (\"%s\" to \"%s\"): %s\n", map->nspname, map->relname, old_file, new_file, - getErrorText(errno)); + getErrorText()); } close(fd); } diff --git a/contrib/pg_upgrade/server.c b/contrib/pg_upgrade/server.c index 5f4b5307cbad7..3d041efbfd0a6 100644 --- a/contrib/pg_upgrade/server.c +++ b/contrib/pg_upgrade/server.c @@ -202,11 +202,13 @@ start_postmaster(ClusterInfo *cluster, bool throw_error) #endif /* - * Using autovacuum=off disables cleanup vacuum and analyze, but freeze - * vacuums can still happen, so we set autovacuum_freeze_max_age to its - * maximum. We assume all datfrozenxid and relfrozen values are less than - * a gap of 2000000000 from the current xid counter, so autovacuum will - * not touch them. + * Since PG 9.1, we have used -b to disable autovacuum. For earlier + * releases, setting autovacuum=off disables cleanup vacuum and analyze, + * but freeze vacuums can still happen, so we set autovacuum_freeze_max_age + * to its maximum. (autovacuum_multixact_freeze_max_age was introduced + * after 9.1, so there is no need to set that.) We assume all datfrozenxid + * and relfrozenxid values are less than a gap of 2000000000 from the current + * xid counter, so autovacuum will not touch them. * * Turn off durability requirements to improve object creation speed, and * we only modify the new cluster, so only use it there. If there is a diff --git a/contrib/pg_upgrade/tablespace.c b/contrib/pg_upgrade/tablespace.c index 68e9cb241c75d..4086e53cf56da 100644 --- a/contrib/pg_upgrade/tablespace.c +++ b/contrib/pg_upgrade/tablespace.c @@ -91,7 +91,7 @@ get_tablespace_paths(void) else report_status(PG_FATAL, "cannot stat() tablespace directory \"%s\": %s\n", - os_info.old_tablespaces[tblnum], getErrorText(errno)); + os_info.old_tablespaces[tblnum], getErrorText()); } if (!S_ISDIR(statBuf.st_mode)) report_status(PG_FATAL, diff --git a/contrib/pg_upgrade/test.sh b/contrib/pg_upgrade/test.sh index baa7d4748b6f5..b6aaff2d97820 100644 --- a/contrib/pg_upgrade/test.sh +++ b/contrib/pg_upgrade/test.sh @@ -17,17 +17,57 @@ set -e unset MAKEFLAGS unset MAKELEVEL -# Set listen_addresses desirably +# Run a given "initdb" binary and overlay the regression testing +# authentication configuration. +standard_initdb() { + "$1" -N + if [ -n "$TEMP_CONFIG" -a -r "$TEMP_CONFIG" ] + then + cat "$TEMP_CONFIG" >> "$PGDATA/postgresql.conf" + fi + ../../src/test/regress/pg_regress --config-auth "$PGDATA" +} + +# Establish how the server will listen for connections testhost=`uname -s` case $testhost in - MINGW*) LISTEN_ADDRESSES="localhost" ;; - *) LISTEN_ADDRESSES="" ;; + MINGW*) + LISTEN_ADDRESSES="localhost" + PGHOST=localhost + ;; + *) + LISTEN_ADDRESSES="" + # Select a socket directory. The algorithm is from the "configure" + # script; the outcome mimics pg_regress.c:make_temp_sockdir(). + PGHOST=$PG_REGRESS_SOCK_DIR + if [ "x$PGHOST" = x ]; then + { + dir=`(umask 077 && + mktemp -d /tmp/pg_upgrade_check-XXXXXX) 2>/dev/null` && + [ -d "$dir" ] + } || + { + dir=/tmp/pg_upgrade_check-$$-$RANDOM + (umask 077 && mkdir "$dir") + } || + { + echo "could not create socket temporary directory in \"/tmp\"" + exit 1 + } + + PGHOST=$dir + trap 'rm -rf "$PGHOST"' 0 + trap 'exit 3' 1 2 13 15 + fi + ;; esac -POSTMASTER_OPTS="-F -c listen_addresses=$LISTEN_ADDRESSES" +POSTMASTER_OPTS="-F -c listen_addresses=$LISTEN_ADDRESSES -k \"$PGHOST\"" +export PGHOST -temp_root=$PWD/tmp_check +# don't rely on $PWD here, as old shells don't set it +temp_root=`pwd`/tmp_check if [ "$1" = '--install' ]; then temp_install=$temp_root/install @@ -70,7 +110,7 @@ PGDATA="$BASE_PGDATA.old" export PGDATA rm -rf "$BASE_PGDATA" "$PGDATA" -logdir=$PWD/log +logdir=`pwd`/log rm -rf "$logdir" mkdir "$logdir" @@ -86,7 +126,6 @@ PGSERVICE=""; unset PGSERVICE PGSSLMODE=""; unset PGSSLMODE PGREQUIRESSL=""; unset PGREQUIRESSL PGCONNECT_TIMEOUT=""; unset PGCONNECT_TIMEOUT -PGHOST=""; unset PGHOST PGHOSTADDR=""; unset PGHOSTADDR # Select a non-conflicting port number, similarly to pg_regress.c @@ -114,7 +153,7 @@ export EXTRA_REGRESS_OPTS # enable echo so the user can see what is being executed set -x -$oldbindir/initdb -N +standard_initdb "$oldbindir"/initdb $oldbindir/pg_ctl start -l "$logdir/postmaster1.log" -o "$POSTMASTER_OPTS" -w if "$MAKE" -C "$oldsrc" installcheck; then pg_dumpall -f "$temp_root"/dump1.sql || pg_dumpall1_status=$? @@ -154,7 +193,7 @@ fi PGDATA=$BASE_PGDATA -initdb -N +standard_initdb 'initdb' pg_upgrade $PG_UPGRADE_OPTS -d "${PGDATA}.old" -D "${PGDATA}" -b "$oldbindir" -B "$bindir" -p "$PGPORT" -P "$PGPORT" @@ -182,10 +221,11 @@ case $testhost in *) sh ./delete_old_cluster.sh ;; esac -if diff -q "$temp_root"/dump1.sql "$temp_root"/dump2.sql; then +if diff "$temp_root"/dump1.sql "$temp_root"/dump2.sql >/dev/null; then echo PASSED exit 0 else + echo "Files $temp_root/dump1.sql and $temp_root/dump2.sql differ" echo "dumps were not identical" exit 1 fi diff --git a/contrib/pg_upgrade/util.c b/contrib/pg_upgrade/util.c index 3b94057696d0a..84a02ecaf465c 100644 --- a/contrib/pg_upgrade/util.c +++ b/contrib/pg_upgrade/util.c @@ -234,18 +234,15 @@ get_user_info(char **user_name_p) /* * getErrorText() * - * Returns the text of the error message for the given error number - * - * This feature is factored into a separate function because it is - * system-dependent. + * Returns the text of the most recent error */ const char * -getErrorText(int errNum) +getErrorText(void) { #ifdef WIN32 _dosmaperr(GetLastError()); #endif - return pg_strdup(strerror(errNum)); + return pg_strdup(strerror(errno)); } diff --git a/contrib/pg_upgrade/version.c b/contrib/pg_upgrade/version.c index 0f9dc079b21e6..339241b6e37e7 100644 --- a/contrib/pg_upgrade/version.c +++ b/contrib/pg_upgrade/version.c @@ -49,7 +49,7 @@ new_9_0_populate_pg_largeobject_metadata(ClusterInfo *cluster, bool check_mode) if (!check_mode) { if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); fprintf(script, "\\connect %s\n", quote_identifier(active_db->db_name)); fprintf(script, @@ -143,7 +143,7 @@ old_9_3_check_for_line_data_type_usage(ClusterInfo *cluster) { found = true; if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); if (!db_used) { fprintf(script, "Database: %s\n", active_db->db_name); diff --git a/contrib/pg_upgrade/version_old_8_3.c b/contrib/pg_upgrade/version_old_8_3.c index 07e79bd609a5f..40d86871e0905 100644 --- a/contrib/pg_upgrade/version_old_8_3.c +++ b/contrib/pg_upgrade/version_old_8_3.c @@ -73,7 +73,7 @@ old_8_3_check_for_name_data_type_usage(ClusterInfo *cluster) { found = true; if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); if (!db_used) { fprintf(script, "Database: %s\n", active_db->db_name); @@ -163,7 +163,7 @@ old_8_3_check_for_tsquery_usage(ClusterInfo *cluster) { found = true; if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); if (!db_used) { fprintf(script, "Database: %s\n", active_db->db_name); @@ -242,7 +242,7 @@ old_8_3_check_ltree_usage(ClusterInfo *cluster) found = true; if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) pg_fatal("Could not open file \"%s\": %s\n", - output_path, getErrorText(errno)); + output_path, getErrorText()); if (!db_used) { fprintf(script, "Database: %s\n", active_db->db_name); @@ -365,7 +365,7 @@ old_8_3_rebuild_tsvector_tables(ClusterInfo *cluster, bool check_mode) if (!check_mode) { if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); if (!db_used) { fprintf(script, "\\connect %s\n\n", @@ -481,7 +481,7 @@ old_8_3_invalidate_hash_gin_indexes(ClusterInfo *cluster, bool check_mode) if (!check_mode) { if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); if (!db_used) { fprintf(script, "\\connect %s\n", @@ -600,7 +600,7 @@ old_8_3_invalidate_bpchar_pattern_ops_indexes(ClusterInfo *cluster, if (!check_mode) { if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); if (!db_used) { fprintf(script, "\\connect %s\n", @@ -722,7 +722,7 @@ old_8_3_create_sequence_script(ClusterInfo *cluster) found = true; if (script == NULL && (script = fopen_priv(output_path, "w")) == NULL) - pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText(errno)); + pg_fatal("could not open file \"%s\": %s\n", output_path, getErrorText()); if (!db_used) { fprintf(script, "\\connect %s\n\n", diff --git a/contrib/pg_xlogdump/Makefile b/contrib/pg_xlogdump/Makefile index ada261c4dd001..c226fa260007f 100644 --- a/contrib/pg_xlogdump/Makefile +++ b/contrib/pg_xlogdump/Makefile @@ -7,7 +7,7 @@ PROGRAM = pg_xlogdump OBJS = pg_xlogdump.o compat.o xlogreader.o rmgrdesc.o \ $(RMGRDESCOBJS) $(WIN32RES) -RMGRDESCSOURCES = $(notdir $(wildcard $(top_srcdir)/src/backend/access/rmgrdesc/*desc.c)) +RMGRDESCSOURCES = $(sort $(notdir $(wildcard $(top_srcdir)/src/backend/access/rmgrdesc/*desc.c))) RMGRDESCOBJS = $(patsubst %.c,%.o,$(RMGRDESCSOURCES)) EXTRA_CLEAN = $(RMGRDESCSOURCES) xlogreader.c diff --git a/contrib/pg_xlogdump/pg_xlogdump.c b/contrib/pg_xlogdump/pg_xlogdump.c index 824b8c393c9f6..37a4c44acd933 100644 --- a/contrib/pg_xlogdump/pg_xlogdump.c +++ b/contrib/pg_xlogdump/pg_xlogdump.c @@ -152,7 +152,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fname, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; /* XLOGDIR / fname */ @@ -161,7 +161,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; datadir = getenv("PGDATA"); @@ -173,7 +173,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; } } @@ -185,7 +185,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; /* directory / XLOGDIR / fname */ @@ -194,7 +194,7 @@ fuzzy_open_file(const char *directory, const char *fname) fd = open(fpath, O_RDONLY | PG_BINARY, 0); if (fd < 0 && errno != ENOENT) return -1; - else if (fd > 0) + else if (fd >= 0) return fd; } return -1; diff --git a/contrib/pgbench/pgbench.c b/contrib/pgbench/pgbench.c index 4aa8a5031a09b..6c36ad2528a7f 100644 --- a/contrib/pgbench/pgbench.c +++ b/contrib/pgbench/pgbench.c @@ -52,6 +52,10 @@ #ifndef INT64_MAX #define INT64_MAX INT64CONST(0x7FFFFFFFFFFFFFFF) #endif +#ifndef INT64_MIN +#define INT64_MIN (-INT64CONST(0x7FFFFFFFFFFFFFFF) - 1) +#endif + /* * Multi-platform pthread implementations @@ -204,10 +208,10 @@ typedef struct * sent */ int sleeping; /* 1 indicates that the client is napping */ bool throttling; /* whether nap is for throttling */ - int64 until; /* napping until (usec) */ Variable *variables; /* array of variable definitions */ int nvariables; - instr_time txn_begin; /* used for measuring transaction latencies */ + int64 txn_scheduled; /* scheduled start time of transaction (usec) */ + instr_time txn_begin; /* used for measuring schedule lag times */ instr_time stmt_begin; /* used for measuring statement latencies */ int64 txn_latencies; /* cumulated latencies */ int64 txn_sqlats; /* cumulated square latencies */ @@ -278,12 +282,17 @@ typedef struct long start_time; /* when does the interval start */ int cnt; /* number of transactions */ - double min_duration; /* min/max durations */ - double max_duration; - double sum; /* sum(duration), sum(duration^2) - for + + double min_latency; /* min/max latencies */ + double max_latency; + double sum_latency; /* sum(latency), sum(latency^2) - for * estimates */ - double sum2; + double sum2_latency; + double min_lag; + double max_lag; + double sum_lag; /* sum(lag) */ + double sum2_lag; /* sum(lag*lag) */ } AggVals; static Command **sql_files[MAX_FILES]; /* SQL script files */ @@ -471,6 +480,25 @@ getrand(TState *thread, int64 min, int64 max) return min + (int64) ((max - min + 1) * pg_erand48(thread->random_state)); } +/* + * random number generator: generate a value, such that the series of values + * will approximate a Poisson distribution centered on the given value. + */ +static int64 +getPoissonRand(TState *thread, int64 center) +{ + /* + * Use inverse transform sampling to generate a value > 0, such that the + * expected (i.e. average) value is the given argument. + */ + double uniform; + + /* erand in [0, 1), uniform in (0, 1] */ + uniform = 1.0 - pg_erand48(thread->random_state); + + return (int64) (-log(uniform) * ((double) center) + 0.5); +} + /* call PQexec() and exit() on failure */ static void executeStatement(PGconn *con, const char *sql) @@ -839,6 +867,7 @@ runShellCommand(CState *st, char *variable, char **argv, int argc) { if (!timer_exceeded) fprintf(stderr, "%s: cannot read the result\n", argv[0]); + (void) pclose(fp); return false; } if (pclose(fp) < 0) @@ -886,18 +915,23 @@ clientDone(CState *st, bool ok) return false; /* always false */ } -static -void +static void agg_vals_init(AggVals *aggs, instr_time start) { /* basic counters */ aggs->cnt = 0; /* number of transactions */ - aggs->sum = 0; /* SUM(duration) */ - aggs->sum2 = 0; /* SUM(duration*duration) */ + aggs->sum_latency = 0; /* SUM(latency) */ + aggs->sum2_latency = 0; /* SUM(latency*latency) */ /* min and max transaction duration */ - aggs->min_duration = 0; - aggs->max_duration = 0; + aggs->min_latency = 0; + aggs->max_latency = 0; + + /* schedule lag counters */ + aggs->sum_lag = 0; + aggs->sum2_lag = 0; + aggs->min_lag = 0; + aggs->max_lag = 0; /* start of the current interval */ aggs->start_time = INSTR_TIME_GET_DOUBLE(start); @@ -922,25 +956,17 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa if (throttle_delay && !st->is_throttled) { /* - * Use inverse transform sampling to randomly generate a delay, such - * that the series of delays will approximate a Poisson distribution - * centered on the throttle_delay time. - * - * 10000 implies a 9.2 (-log(1/10000)) to 0.0 (log 1) delay - * multiplier, and results in a 0.055 % target underestimation bias: - * - * SELECT 1.0/AVG(-LN(i/10000.0)) FROM generate_series(1,10000) AS i; - * = 1.000552717032611116335474 + * Generate a delay such that the series of delays will approximate a + * Poisson distribution centered on the throttle_delay time. * * If transactions are too slow or a given wait is shorter than a * transaction, the next transaction will start right away. */ - int64 wait = (int64) (throttle_delay * - 1.00055271703 * -log(getrand(thread, 1, 10000) / 10000.0)); + int64 wait = getPoissonRand(thread, throttle_delay); thread->throttle_trigger += wait; - st->until = thread->throttle_trigger; + st->txn_scheduled = thread->throttle_trigger; st->sleeping = 1; st->throttling = true; st->is_throttled = true; @@ -956,13 +982,13 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa INSTR_TIME_SET_CURRENT(now); now_us = INSTR_TIME_GET_MICROSEC(now); - if (st->until <= now_us) + if (st->txn_scheduled <= now_us) { st->sleeping = 0; /* Done sleeping, go ahead with next command */ if (st->throttling) { /* Measure lag of throttled transaction relative to target */ - int64 lag = now_us - st->until; + int64 lag = now_us - st->txn_scheduled; thread->throttle_lag += lag; if (lag > thread->throttle_lag_max) @@ -976,6 +1002,11 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa if (st->listen) { /* are we receiver? */ + instr_time now; + bool now_valid = false; + + INSTR_TIME_SET_ZERO(now); /* initialize to keep compiler quiet */ + if (commands[st->state]->type == SQL_COMMAND) { if (debug) @@ -995,10 +1026,13 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa */ if (is_latencies) { - instr_time now; int cnum = commands[st->state]->command_num; - INSTR_TIME_SET_CURRENT(now); + if (!now_valid) + { + INSTR_TIME_SET_CURRENT(now); + now_valid = true; + } INSTR_TIME_ACCUM_DIFF(thread->exec_elapsed[cnum], now, st->stmt_begin); thread->exec_count[cnum]++; @@ -1007,12 +1041,16 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa /* transaction finished: record latency under progress or throttling */ if ((progress || throttle_delay) && commands[st->state + 1] == NULL) { - instr_time diff; int64 latency; - INSTR_TIME_SET_CURRENT(diff); - INSTR_TIME_SUBTRACT(diff, st->txn_begin); - latency = INSTR_TIME_GET_MICROSEC(diff); + if (!now_valid) + { + INSTR_TIME_SET_CURRENT(now); + now_valid = true; + } + + latency = INSTR_TIME_GET_MICROSEC(now) - st->txn_scheduled; + st->txn_latencies += latency; /* @@ -1030,9 +1068,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa */ if (logfile && commands[st->state + 1] == NULL) { - instr_time now; - instr_time diff; - double usec; + double lag; + double latency; /* * write the log entry if this row belongs to the random sample, @@ -1041,10 +1078,13 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa if (sample_rate == 0.0 || pg_erand48(thread->random_state) <= sample_rate) { - INSTR_TIME_SET_CURRENT(now); - diff = now; - INSTR_TIME_SUBTRACT(diff, st->txn_begin); - usec = (double) INSTR_TIME_GET_MICROSEC(diff); + if (!now_valid) + { + INSTR_TIME_SET_CURRENT(now); + now_valid = true; + } + latency = (double) (INSTR_TIME_GET_MICROSEC(now) - st->txn_scheduled); + lag = (double) (INSTR_TIME_GET_MICROSEC(st->txn_begin) - st->txn_scheduled); /* should we aggregate the results or not? */ if (agg_interval > 0) @@ -1056,15 +1096,27 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa if (agg->start_time + agg_interval >= INSTR_TIME_GET_DOUBLE(now)) { agg->cnt += 1; - agg->sum += usec; - agg->sum2 += usec * usec; + agg->sum_latency += latency; + agg->sum2_latency += latency * latency; /* first in this aggregation interval */ - if ((agg->cnt == 1) || (usec < agg->min_duration)) - agg->min_duration = usec; + if ((agg->cnt == 1) || (latency < agg->min_latency)) + agg->min_latency = latency; - if ((agg->cnt == 1) || (usec > agg->max_duration)) - agg->max_duration = usec; + if ((agg->cnt == 1) || (latency > agg->max_latency)) + agg->max_latency = latency; + + /* and the same for schedule lag */ + if (throttle_delay) + { + agg->sum_lag += lag; + agg->sum2_lag += lag * lag; + + if ((agg->cnt == 1) || (lag < agg->min_lag)) + agg->min_lag = lag; + if ((agg->cnt == 1) || (lag > agg->max_lag)) + agg->max_lag = lag; + } } else { @@ -1080,23 +1132,34 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa * ifdef in usage), so we don't need to handle * this in a special way (see below). */ - fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f\n", + fprintf(logfile, "%ld %d %.0f %.0f %.0f %.0f", agg->start_time, agg->cnt, - agg->sum, - agg->sum2, - agg->min_duration, - agg->max_duration); + agg->sum_latency, + agg->sum2_latency, + agg->min_latency, + agg->max_latency); + if (throttle_delay) + fprintf(logfile, " %.0f %.0f %.0f %.0f", + agg->sum_lag, + agg->sum2_lag, + agg->min_lag, + agg->max_lag); + fputc('\n', logfile); /* move to the next inteval */ agg->start_time = agg->start_time + agg_interval; /* reset for "no transaction" intervals */ agg->cnt = 0; - agg->min_duration = 0; - agg->max_duration = 0; - agg->sum = 0; - agg->sum2 = 0; + agg->min_latency = 0; + agg->max_latency = 0; + agg->sum_latency = 0; + agg->sum2_latency = 0; + agg->min_lag = 0; + agg->max_lag = 0; + agg->sum_lag = 0; + agg->sum2_lag = 0; } /* @@ -1104,10 +1167,14 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa * current) */ agg->cnt = 1; - agg->min_duration = usec; - agg->max_duration = usec; - agg->sum = usec; - agg->sum2 = usec * usec; + agg->min_latency = latency; + agg->max_latency = latency; + agg->sum_latency = latency; + agg->sum2_latency = latency * latency; + agg->min_lag = lag; + agg->max_lag = lag; + agg->sum_lag = lag; + agg->sum2_lag = lag * lag; } } else @@ -1119,8 +1186,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa * This is more than we really ought to know about * instr_time */ - fprintf(logfile, "%d %d %.0f %d %ld %ld\n", - st->id, st->cnt, usec, st->use_file, + fprintf(logfile, "%d %d %.0f %d %ld %ld", + st->id, st->cnt, latency, st->use_file, (long) now.tv_sec, (long) now.tv_usec); #else @@ -1128,9 +1195,12 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa * On Windows, instr_time doesn't provide a timestamp * anyway */ - fprintf(logfile, "%d %d %.0f %d 0 0\n", - st->id, st->cnt, usec, st->use_file); + fprintf(logfile, "%d %d %.0f %d 0 0", + st->id, st->cnt, latency, st->use_file); #endif + if (throttle_delay) + fprintf(logfile, " %.0f", lag); + fputc('\n', logfile); } } } @@ -1219,8 +1289,17 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa /* Record transaction start time under logging, progress or throttling */ if ((logfile || progress || throttle_delay) && st->state == 0) + { INSTR_TIME_SET_CURRENT(st->txn_begin); + /* + * When not throttling, this is also the transaction's scheduled start + * time. + */ + if (!throttle_delay) + st->txn_scheduled = INSTR_TIME_GET_MICROSEC(st->txn_begin); + } + /* Record statement start time if per-command latencies are requested */ if (is_latencies) INSTR_TIME_SET_CURRENT(st->stmt_begin); @@ -1435,13 +1514,37 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa snprintf(res, sizeof(res), INT64_FORMAT, ope1 * ope2); else if (strcmp(argv[3], "/") == 0) { + int64 operes; + if (ope2 == 0) { fprintf(stderr, "%s: division by zero\n", argv[0]); st->ecnt++; return true; } - snprintf(res, sizeof(res), INT64_FORMAT, ope1 / ope2); + /* + * INT64_MIN / -1 is problematic, since the result can't + * be represented on a two's-complement machine. Some + * machines produce INT64_MIN, some produce zero, some + * throw an exception. We can dodge the problem by + * recognizing that division by -1 is the same as + * negation. + */ + if (ope2 == -1) + { + operes = -ope1; + + /* overflow check (needed for INT64_MIN) */ + if (ope1 == INT64_MIN) + { + fprintf(stderr, "bigint out of range\n"); + st->ecnt++; + return true; + } + } + else + operes = ope1 / ope2; + snprintf(res, sizeof(res), INT64_FORMAT, operes); } else { @@ -1489,7 +1592,7 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa usec *= 1000000; INSTR_TIME_SET_CURRENT(now); - st->until = INSTR_TIME_GET_MICROSEC(now) + usec; + st->txn_scheduled = INSTR_TIME_GET_MICROSEC(now) + usec; st->sleeping = 1; st->listen = 1; @@ -1853,6 +1956,7 @@ parseQuery(Command *cmd, const char *raw_sql) if (cmd->argc >= MAX_ARGS) { fprintf(stderr, "statement has too many arguments (maximum is %d): %s\n", MAX_ARGS - 1, raw_sql); + pg_free(name); return false; } @@ -2093,6 +2197,7 @@ process_file(char *filename) else if ((fd = fopen(filename, "r")) == NULL) { fprintf(stderr, "%s: %s\n", filename, strerror(errno)); + pg_free(my_commands); return false; } @@ -2223,6 +2328,10 @@ printResults(int ttype, int64 normal_xacts, int nclients, normal_xacts); } + /* Remaining stats are nonsensical if we failed to execute any xacts */ + if (normal_xacts <= 0) + return; + if (throttle_delay || progress) { /* compute and show latency average and standard deviation */ @@ -2551,7 +2660,7 @@ main(int argc, char **argv) case 'M': if (num_files > 0) { - fprintf(stderr, "query mode (-M) should be specifiled before transaction scripts (-f)\n"); + fprintf(stderr, "query mode (-M) should be specified before transaction scripts (-f)\n"); exit(1); } for (querymode = 0; querymode < NUM_QUERYMODE; querymode++) @@ -3108,7 +3217,7 @@ threadRun(void *arg) now_usec = INSTR_TIME_GET_MICROSEC(now); } - this_usec = st->until - now_usec; + this_usec = st->txn_scheduled - now_usec; if (min_usec > this_usec) min_usec = this_usec; } @@ -3122,7 +3231,7 @@ threadRun(void *arg) sock = PQsocket(st->con); if (sock < 0) { - fprintf(stderr, "bad socket: %s\n", strerror(errno)); + fprintf(stderr, "bad socket: %s", PQerrorMessage(st->con)); goto done; } @@ -3132,6 +3241,33 @@ threadRun(void *arg) maxsock = sock; } + /* also wake up to print the next progress report on time */ + if (progress && min_usec > 0 +#if !defined(PTHREAD_FORK_EMULATION) + && thread->tid == 0 +#endif /* !PTHREAD_FORK_EMULATION */ + ) + { + /* get current time if needed */ + if (now_usec == 0) + { + instr_time now; + + INSTR_TIME_SET_CURRENT(now); + now_usec = INSTR_TIME_GET_MICROSEC(now); + } + + if (now_usec >= next_report) + min_usec = 0; + else if ((next_report - now_usec) < min_usec) + min_usec = next_report - now_usec; + } + + /* + * Sleep until we receive data from the server, or a nap-time + * specified in the script ends, or it's time to print a progress + * report. + */ if (min_usec > 0 && maxsock != -1) { int nsocks; /* return from select(2) */ @@ -3163,11 +3299,21 @@ threadRun(void *arg) Command **commands = sql_files[st->use_file]; int prev_ecnt = st->ecnt; - if (st->con && (FD_ISSET(PQsocket(st->con), &input_mask) - || commands[st->state]->type == META_COMMAND)) + if (st->con) { - if (!doCustom(thread, st, &result->conn_time, logfile, &aggs)) - remains--; /* I've aborted */ + int sock = PQsocket(st->con); + + if (sock < 0) + { + fprintf(stderr, "bad socket: %s", PQerrorMessage(st->con)); + goto done; + } + if (FD_ISSET(sock, &input_mask) || + commands[st->state]->type == META_COMMAND) + { + if (!doCustom(thread, st, &result->conn_time, logfile, &aggs)) + remains--; /* I've aborted */ + } } if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND) @@ -3233,7 +3379,15 @@ threadRun(void *arg) last_sqlats = sqlats; last_lags = lags; last_report = now; - next_report += (int64) progress *1000000; + + /* + * Ensure that the next report is in the future, in case + * pgbench/postgres got stuck somewhere. + */ + do + { + next_report += (int64) progress *1000000; + } while (now >= next_report); } } #else @@ -3293,7 +3447,15 @@ threadRun(void *arg) last_sqlats = sqlats; last_lags = lags; last_report = now; - next_report += (int64) progress *1000000; + + /* + * Ensure that the next report is in the future, in case + * pgbench/postgres got stuck somewhere. + */ + do + { + next_report += (int64) progress *1000000; + } while (now >= next_report); } } #endif /* PTHREAD_FORK_EMULATION */ diff --git a/contrib/pgcrypto/crypt-blowfish.c b/contrib/pgcrypto/crypt-blowfish.c index fbaa3d776a08f..6feaefcf7be10 100644 --- a/contrib/pgcrypto/crypt-blowfish.c +++ b/contrib/pgcrypto/crypt-blowfish.c @@ -33,6 +33,7 @@ */ #include "postgres.h" +#include "miscadmin.h" #include "px-crypt.h" #include "px.h" @@ -602,6 +603,17 @@ _crypt_blowfish_rn(const char *key, const char *setting, if (size < 7 + 22 + 31 + 1) return NULL; + /* + * Blowfish salt value must be formatted as follows: "$2a$" or "$2x$", a + * two digit cost parameter, "$", and 22 digits from the alphabet + * "./0-9A-Za-z". -- from the PHP crypt docs. Apparently we enforce a few + * more restrictions on the count in the salt as well. + */ + if (strlen(setting) < 29) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid salt"))); + if (setting[0] != '$' || setting[1] != '2' || (setting[2] != 'a' && setting[2] != 'x') || @@ -611,14 +623,18 @@ _crypt_blowfish_rn(const char *key, const char *setting, (setting[4] == '3' && setting[5] > '1') || setting[6] != '$') { - return NULL; + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid salt"))); } count = (BF_word) 1 << ((setting[4] - '0') * 10 + (setting[5] - '0')); if (count < 16 || BF_decode(data.binary.salt, &setting[7], 16)) { px_memset(data.binary.salt, 0, sizeof(data.binary.salt)); - return NULL; + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid salt"))); } BF_swap(data.binary.salt, 4); @@ -655,6 +671,8 @@ _crypt_blowfish_rn(const char *key, const char *setting, do { + CHECK_FOR_INTERRUPTS(); + data.ctx.P[0] ^= data.expanded_key[0]; data.ctx.P[1] ^= data.expanded_key[1]; data.ctx.P[2] ^= data.expanded_key[2]; diff --git a/contrib/pgcrypto/crypt-des.c b/contrib/pgcrypto/crypt-des.c index 4ed44beeff517..56a267fb6c94e 100644 --- a/contrib/pgcrypto/crypt-des.c +++ b/contrib/pgcrypto/crypt-des.c @@ -61,6 +61,7 @@ */ #include "postgres.h" +#include "miscadmin.h" #include "px-crypt.h" @@ -540,6 +541,8 @@ do_des(uint32 l_in, uint32 r_in, uint32 *l_out, uint32 *r_out, int count) while (count--) { + CHECK_FOR_INTERRUPTS(); + /* * Do each round. */ @@ -681,9 +684,19 @@ px_crypt_des(const char *key, const char *setting) if (*setting == _PASSWORD_EFMT1) { /* - * "new"-style: setting - underscore, 4 bytes of count, 4 bytes of - * salt key - unlimited characters + * "new"-style: setting must be a 9-character (underscore, then 4 + * bytes of count, then 4 bytes of salt) string. See CRYPT(3) under + * the "Extended crypt" heading for further details. + * + * Unlimited characters of the input key are used. This is known as + * the "Extended crypt" DES method. + * */ + if (strlen(setting) < 9) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid salt"))); + for (i = 1, count = 0L; i < 5; i++) count |= ascii_to_bin(setting[i]) << (i - 1) * 6; @@ -723,10 +736,16 @@ px_crypt_des(const char *key, const char *setting) #endif /* !DISABLE_XDES */ { /* - * "old"-style: setting - 2 bytes of salt key - up to 8 characters + * "old"-style: setting - 2 bytes of salt key - only up to the first 8 + * characters of the input key are used. */ count = 25; + if (strlen(setting) < 2) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid salt"))); + salt = (ascii_to_bin(setting[1]) << 6) | ascii_to_bin(setting[0]); diff --git a/contrib/pgcrypto/expected/crypt-blowfish.out b/contrib/pgcrypto/expected/crypt-blowfish.out index 329d78f625462..d79b0c047b4c7 100644 --- a/contrib/pgcrypto/expected/crypt-blowfish.out +++ b/contrib/pgcrypto/expected/crypt-blowfish.out @@ -13,6 +13,15 @@ SELECT crypt('foox', '$2a$06$RQiOJ.3ELirrXwxIZY8q0O'); $2a$06$RQiOJ.3ELirrXwxIZY8q0OR3CVJrAfda1z26CCHPnB6mmVZD8p0/C (1 row) +-- error, salt too short: +SELECT crypt('foox', '$2a$'); +ERROR: invalid salt +-- error, first digit of count in salt invalid +SELECT crypt('foox', '$2a$40$RQiOJ.3ELirrXwxIZY8q0O'); +ERROR: invalid salt +-- error, count in salt too small +SELECT crypt('foox', '$2a$00$RQiOJ.3ELirrXwxIZY8q0O'); +ERROR: invalid salt CREATE TABLE ctest (data text, res text, salt text); INSERT INTO ctest VALUES ('password', '', ''); UPDATE ctest SET salt = gen_salt('bf', 8); diff --git a/contrib/pgcrypto/expected/crypt-des.out b/contrib/pgcrypto/expected/crypt-des.out index b8b605037d48b..a462dcd580a89 100644 --- a/contrib/pgcrypto/expected/crypt-des.out +++ b/contrib/pgcrypto/expected/crypt-des.out @@ -13,6 +13,10 @@ SELECT crypt('foox', 'NB'); NB53EGGqrrb5E (1 row) +-- We are supposed to pass in a 2-character salt. +-- error since salt is too short: +SELECT crypt('password', 'a'); +ERROR: invalid salt CREATE TABLE ctest (data text, res text, salt text); INSERT INTO ctest VALUES ('password', '', ''); UPDATE ctest SET salt = gen_salt('des'); diff --git a/contrib/pgcrypto/expected/crypt-xdes.out b/contrib/pgcrypto/expected/crypt-xdes.out index cdcdefb199699..8cf907512f6f7 100644 --- a/contrib/pgcrypto/expected/crypt-xdes.out +++ b/contrib/pgcrypto/expected/crypt-xdes.out @@ -13,6 +13,30 @@ SELECT crypt('foox', '_J9..j2zz'); _J9..j2zzAYKMvO2BYRY (1 row) +-- check XDES handling of keys longer than 8 chars +SELECT crypt('longlongpassword', '_J9..j2zz'); + crypt +---------------------- + _J9..j2zz4BeseiQNwUg +(1 row) + +-- error, salt too short +SELECT crypt('foox', '_J9..BWH'); +ERROR: invalid salt +-- error, count specified in the second argument is 0 +SELECT crypt('password', '_........'); +ERROR: crypt(3) returned NULL +-- error, count will wind up still being 0 due to invalid encoding +-- of the count: only chars ``./0-9A-Za-z' are valid +SELECT crypt('password', '_..!!!!!!'); +ERROR: crypt(3) returned NULL +-- count should be non-zero here, will work +SELECT crypt('password', '_/!!!!!!!'); + crypt +---------------------- + _/!!!!!!!zqM49hRzxko +(1 row) + CREATE TABLE ctest (data text, res text, salt text); INSERT INTO ctest VALUES ('password', '', ''); UPDATE ctest SET salt = gen_salt('xdes', 1001); diff --git a/contrib/pgcrypto/expected/pgp-decrypt.out b/contrib/pgcrypto/expected/pgp-decrypt.out index 859f4d681b430..2dabfaf7b0e5f 100644 --- a/contrib/pgcrypto/expected/pgp-decrypt.out +++ b/contrib/pgcrypto/expected/pgp-decrypt.out @@ -364,3 +364,62 @@ a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs (1 row) -- expected: 7efefcab38467f7484d6fa43dc86cf5281bd78e2 +-- check BUG #11905, problem with messages 6 less than a power of 2. +select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',65530); + ?column? +---------- + t +(1 row) + +-- expected: true +-- Negative tests +-- Decryption with a certain incorrect key yields an apparent Literal Data +-- packet reporting its content to be binary data. Ciphertext source: +-- iterative pgp_sym_encrypt('secret', 'key') until the random prefix gave +-- rise to that property. +select pgp_sym_decrypt(dearmor(' +-----BEGIN PGP MESSAGE----- + +ww0EBwMCxf8PTrQBmJdl0jcB6y2joE7GSLKRv7trbNsF5Z8ou5NISLUg31llVH/S0B2wl4bvzZjV +VsxxqLSPzNLAeIspJk5G +=mSd/ +-----END PGP MESSAGE----- +'), 'wrong-key', 'debug=1'); +NOTICE: dbg: prefix_init: corrupt prefix +NOTICE: dbg: parse_literal_data: data type=b +NOTICE: dbg: mdcbuf_finish: bad MDC pkt hdr +ERROR: Wrong key or corrupt data +-- Routine text/binary mismatch. +select pgp_sym_decrypt(pgp_sym_encrypt_bytea('P', 'key'), 'key', 'debug=1'); +NOTICE: dbg: parse_literal_data: data type=b +ERROR: Not text data +-- Decryption with a certain incorrect key yields an apparent BZip2-compressed +-- plaintext. Ciphertext source: iterative pgp_sym_encrypt('secret', 'key') +-- until the random prefix gave rise to that property. +select pgp_sym_decrypt(dearmor(' +-----BEGIN PGP MESSAGE----- + +ww0EBwMC9rK/dMkF5Zlt0jcBlzAQ1mQY2qYbKYbw8h3EZ5Jk0K2IiY92R82TRhWzBIF/8cmXDPtP +GXsd65oYJZp3Khz0qfyn +=Nmpq +-----END PGP MESSAGE----- +'), 'wrong-key', 'debug=1'); +NOTICE: dbg: prefix_init: corrupt prefix +NOTICE: dbg: parse_compressed_data: bzip2 unsupported +NOTICE: dbg: mdcbuf_finish: bad MDC pkt hdr +ERROR: Wrong key or corrupt data +-- Routine use of BZip2 compression. Ciphertext source: +-- echo x | gpg --homedir /nonexistent --personal-compress-preferences bzip2 \ +-- --personal-cipher-preferences aes --no-emit-version --batch \ +-- --symmetric --passphrase key --armor +select pgp_sym_decrypt(dearmor(' +-----BEGIN PGP MESSAGE----- + +jA0EBwMCRhFrAKNcLVJg0mMBLJG1cCASNk/x/3dt1zJ+2eo7jHfjgg3N6wpB3XIe +QCwkWJwlBG5pzbO5gu7xuPQN+TbPJ7aQ2sLx3bAHhtYb0i3vV9RO10Gw++yUyd4R +UCAAw2JRIISttRHMfDpDuZJpvYo= +=AZ9M +-----END PGP MESSAGE----- +'), 'key', 'debug=1'); +NOTICE: dbg: parse_compressed_data: bzip2 unsupported +ERROR: Unsupported compression algorithm diff --git a/contrib/pgcrypto/expected/pgp-info.out b/contrib/pgcrypto/expected/pgp-info.out index 1fe008890fbd1..90648383730c0 100644 --- a/contrib/pgcrypto/expected/pgp-info.out +++ b/contrib/pgcrypto/expected/pgp-info.out @@ -74,5 +74,6 @@ from encdata order by id; 2C226E1FFE5CC7D4 B68504FD128E1FF9 FD0206C409B74875 -(4 rows) + FD0206C409B74875 +(5 rows) diff --git a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out index 61e09b9a86cbf..b4b6810a3c5af 100644 --- a/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out +++ b/contrib/pgcrypto/expected/pgp-pubkey-decrypt.out @@ -564,6 +564,27 @@ GQ== =XHkF -----END PGP MESSAGE----- '); +-- rsaenc2048 / aes128 (not from gnupg) +insert into encdata (id, data) values (5, ' +-----BEGIN PGP MESSAGE----- + +wcBMA/0CBsQJt0h1AQgAzxZ8j+OTeZ8IlLxfZ/mVd28/gUsCY+xigWBk/anZlK3T +p2tNU2idHzKdAttH2Hu/PWbZp4kwjl9spezYxMqCeBZqtfGED88Y+rqK0n/ul30A +7jjFHaw0XUOqFNlST1v6H2i7UXndnp+kcLfHPhnO5BIYWxB2CYBehItqtrn75eqr +C7trGzU/cr74efcWagbCDSNjiAV7GlEptlzmgVMmNikyI6w0ojEUx8lCLc/OsFz9 +pJUAX8xuwjxDVv+W7xk6c96grQiQlm+FLDYGiGNXoAzx3Wi/howu3uV40dXfY+jx +3WBrhEew5Pkpt1SsWoFnJWOfJ8GLd0ec8vfRCqAIVdLgAeS7NyawQYtd6wuVrEAj +5SMg4Thb4d+g45RksuGLHUUr4qO9tiXglODa4InhmJfgNuLk+RGz4LXjq8wepEmW +vRbgFOG54+Cf4C/gC+HkreDm5JKSKjvvw4B/jC6CDxq+JoziEe2Z1uEjCuEcr+Es +/eGzeOi36BejXPMHeKxXejj5qBBHKV0pHVhZSgffR0TtlXdB967Yl/5agV0R89hI +7Gw52emfnH4Z0Y4V0au2H0k1dR/2IxXdJEWSTG7Be1JHT59p9ei2gSEOrdBMIOjP +tbYYUlmmbvD49bHfThkDiC+oc9947LgQsk3kOOLbNHcjkbrjH8R5kjII4m/SEZA1 +g09T+338SzevBcVXh/cFrQ6/Et+lyyO2LJRUMs69g/HyzJOVWT2Iu8E0eS9MWevY +Qtrkrhrpkl3Y02qEp/j6M03Yu2t6ZF7dp51aJ5VhO2mmmtHaTnCyCc8Fcf72LmD8 +blH2nKZC9d6fi4YzSYMepZpMOFR65M80MCMiDUGnZBB8sEADu2/iVtqDUeG8mAA= +=PHJ1 +-----END PGP MESSAGE----- +'); -- successful decrypt select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) from keytbl, encdata where keytbl.id=1 and encdata.id=1; @@ -604,7 +625,7 @@ ERROR: No encryption key found -- rsa: password-protected secret key, wrong password select pgp_pub_decrypt(dearmor(data), dearmor(seckey), '123') from keytbl, encdata where keytbl.id=7 and encdata.id=4; -ERROR: Corrupt data +ERROR: Wrong key or corrupt data -- rsa: password-protected secret key, right password select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool') from keytbl, encdata where keytbl.id=7 and encdata.id=4; @@ -620,7 +641,7 @@ ERROR: Need password for secret key -- password-protected secret key, wrong password select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'foo') from keytbl, encdata where keytbl.id=5 and encdata.id=1; -ERROR: Corrupt data +ERROR: Wrong key or corrupt data -- password-protected secret key, right password select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool') from keytbl, encdata where keytbl.id=5 and encdata.id=1; @@ -629,3 +650,7 @@ from keytbl, encdata where keytbl.id=5 and encdata.id=1; Secret msg (1 row) +-- test for a short read from prefix_init +select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) +from keytbl, encdata where keytbl.id=6 and encdata.id=5; +ERROR: Wrong key or corrupt data diff --git a/contrib/pgcrypto/imath.c b/contrib/pgcrypto/imath.c index 5c6ebebfe2108..61a01e2b71002 100644 --- a/contrib/pgcrypto/imath.c +++ b/contrib/pgcrypto/imath.c @@ -818,7 +818,8 @@ mp_int_mul(mp_int a, mp_int b, mp_int c) */ ua = MP_USED(a); ub = MP_USED(b); - osize = ua + ub; + osize = MAX(ua, ub); + osize = 4 * ((osize + 1) / 2); if (c == a || c == b) { @@ -907,7 +908,7 @@ mp_int_sqr(mp_int a, mp_int c) CHECK(a != NULL && c != NULL); /* Get a temporary buffer big enough to hold the result */ - osize = (mp_size) 2 *MP_USED(a); + osize = (mp_size) 4 *((MP_USED(a) + 1) / 2); if (a == c) { @@ -2605,8 +2606,8 @@ s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, * Now we'll get t1 = a0b0 and t2 = a1b1, and subtract them out so * that we're left with only the pieces we want: t3 = a1b0 + a0b1 */ - ZERO(t1, bot_size + 1); - ZERO(t2, bot_size + 1); + ZERO(t1, buf_size); + ZERO(t2, buf_size); (void) s_kmul(da, db, t1, bot_size, bot_size); /* t1 = a0 * b0 */ (void) s_kmul(a_top, b_top, t2, at_size, bt_size); /* t2 = a1 * b1 */ @@ -2616,11 +2617,13 @@ s_kmul(mp_digit *da, mp_digit *db, mp_digit *dc, /* Assemble the output value */ COPY(t1, dc, buf_size); - (void) s_uadd(t3, dc + bot_size, dc + bot_size, - buf_size + 1, buf_size + 1); + carry = s_uadd(t3, dc + bot_size, dc + bot_size, + buf_size + 1, buf_size); + assert(carry == 0); - (void) s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, - buf_size, buf_size); + carry = s_uadd(t2, dc + 2 * bot_size, dc + 2 * bot_size, + buf_size, buf_size); + assert(carry == 0); s_free(t1); /* note t2 and t3 are just internal pointers * to t1 */ @@ -3307,7 +3310,10 @@ s_embar(mp_int a, mp_int b, mp_int m, mp_int mu, mp_int c) dbt = db + MP_USED(b) - 1; while (last < 3) - SETUP(mp_int_init_size(TEMP(last), 2 * umu), last); + { + SETUP(mp_int_init_size(TEMP(last), 4 * umu), last); + ZERO(MP_DIGITS(TEMP(last - 1)), MP_ALLOC(TEMP(last - 1))); + } (void) mp_int_set_value(c, 1); diff --git a/contrib/pgcrypto/mbuf.c b/contrib/pgcrypto/mbuf.c index 6124e4513c7cb..44d9adcd2ab95 100644 --- a/contrib/pgcrypto/mbuf.c +++ b/contrib/pgcrypto/mbuf.c @@ -305,6 +305,7 @@ pullf_read_max(PullFilter *pf, int len, uint8 **data_p, uint8 *tmpbuf) break; memcpy(tmpbuf + total, tmp, res); total += res; + len -= res; } return total; } @@ -324,7 +325,7 @@ pullf_read_fixed(PullFilter *src, int len, uint8 *dst) if (res != len) { px_debug("pullf_read_fixed: need=%d got=%d", len, res); - return PXE_MBUF_SHORT_READ; + return PXE_PGP_CORRUPT_DATA; } if (p != dst) memcpy(dst, p, len); diff --git a/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql b/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql index fe8d4c4e72ce9..8154e85f44d63 100644 --- a/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql +++ b/contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pgcrypto/pgcrypto--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pgcrypto" to load this file. \quit +\echo Use "CREATE EXTENSION pgcrypto FROM unpackaged" to load this file. \quit ALTER EXTENSION pgcrypto ADD function digest(text,text); ALTER EXTENSION pgcrypto ADD function digest(bytea,text); diff --git a/contrib/pgcrypto/pgp-decrypt.c b/contrib/pgcrypto/pgp-decrypt.c index e03ee7f5f02fe..55119e1e112d8 100644 --- a/contrib/pgcrypto/pgp-decrypt.c +++ b/contrib/pgcrypto/pgp-decrypt.c @@ -182,7 +182,7 @@ pktreader_pull(void *priv, PullFilter *src, int len, if (pkt->type == PKT_CONTEXT) return pullf_read(src, len, data_p); - if (pkt->len == 0) + while (pkt->len == 0) { /* this was last chunk in stream */ if (pkt->type == PKT_NORMAL) @@ -236,6 +236,8 @@ pgp_create_pkt_reader(PullFilter **pf_p, PullFilter *src, int len, /* * Prefix check filter + * https://tools.ietf.org/html/rfc4880#section-5.7 + * https://tools.ietf.org/html/rfc4880#section-5.13 */ static int @@ -264,20 +266,7 @@ prefix_init(void **priv_p, void *arg, PullFilter *src) if (buf[len - 2] != buf[len] || buf[len - 1] != buf[len + 1]) { px_debug("prefix_init: corrupt prefix"); - - /* - * The original purpose of the 2-byte check was to show user a - * friendly "wrong key" message. This made following possible: - * - * "An Attack on CFB Mode Encryption As Used By OpenPGP" by Serge - * Mister and Robert Zuccherato - * - * To avoid being 'oracle', we delay reporting, which basically means - * we prefer to run into corrupt packet header. - * - * We _could_ throw PXE_PGP_CORRUPT_DATA here, but there is - * possibility of attack via timing, so we don't. - */ + /* report error in pgp_decrypt() */ ctx->corrupt_prefix = 1; } px_memset(tmpbuf, 0, sizeof(tmpbuf)); @@ -351,37 +340,33 @@ mdc_free(void *priv) } static int -mdc_finish(PGP_Context *ctx, PullFilter *src, - int len, uint8 **data_p) +mdc_finish(PGP_Context *ctx, PullFilter *src, int len) { int res; uint8 hash[20]; - uint8 tmpbuf[22]; + uint8 tmpbuf[20]; + uint8 *data; - if (len + 1 > sizeof(tmpbuf)) + /* should not happen */ + if (ctx->use_mdcbuf_filter) return PXE_BUG; + /* It's SHA1 */ + if (len != 20) + return PXE_PGP_CORRUPT_DATA; + + /* mdc_read should not call md_update */ + ctx->in_mdc_pkt = 1; + /* read data */ - res = pullf_read_max(src, len + 1, data_p, tmpbuf); + res = pullf_read_max(src, len, &data, tmpbuf); if (res < 0) return res; if (res == 0) { - if (ctx->mdc_checked == 0) - { - px_debug("no mdc"); - return PXE_PGP_CORRUPT_DATA; - } - return 0; - } - - /* safety check */ - if (ctx->in_mdc_pkt > 1) - { - px_debug("mdc_finish: several times here?"); + px_debug("no mdc"); return PXE_PGP_CORRUPT_DATA; } - ctx->in_mdc_pkt++; /* is the packet sane? */ if (res != 20) @@ -394,7 +379,7 @@ mdc_finish(PGP_Context *ctx, PullFilter *src, * ok, we got the hash, now check */ px_md_finish(ctx->mdc_ctx, hash); - res = memcmp(hash, *data_p, 20); + res = memcmp(hash, data, 20); px_memset(hash, 0, 20); px_memset(tmpbuf, 0, sizeof(tmpbuf)); if (res != 0) @@ -403,7 +388,7 @@ mdc_finish(PGP_Context *ctx, PullFilter *src, return PXE_PGP_CORRUPT_DATA; } ctx->mdc_checked = 1; - return len; + return 0; } static int @@ -414,12 +399,9 @@ mdc_read(void *priv, PullFilter *src, int len, PGP_Context *ctx = priv; /* skip this filter? */ - if (ctx->use_mdcbuf_filter) + if (ctx->use_mdcbuf_filter || ctx->in_mdc_pkt) return pullf_read(src, len, data_p); - if (ctx->in_mdc_pkt) - return mdc_finish(ctx, src, len, data_p); - res = pullf_read(src, len, data_p); if (res < 0) return res; @@ -795,12 +777,15 @@ parse_literal_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt) } px_memset(tmpbuf, 0, 4); - /* check if text */ + /* + * If called from an SQL function that returns text, pgp_decrypt() rejects + * inputs not self-identifying as text. + */ if (ctx->text_mode) if (type != 't' && type != 'u') { px_debug("parse_literal_data: data type=%c", type); - return PXE_PGP_NOT_TEXT; + ctx->unexpected_binary = true; } ctx->unicode_mode = (type == 'u') ? 1 : 0; @@ -834,6 +819,7 @@ parse_compressed_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt) int res; uint8 type; PullFilter *pf_decompr; + uint8 *discard_buf; GETBYTE(pkt, type); @@ -857,7 +843,20 @@ parse_compressed_data(PGP_Context *ctx, MBuf *dst, PullFilter *pkt) case PGP_COMPR_BZIP2: px_debug("parse_compressed_data: bzip2 unsupported"); - res = PXE_PGP_UNSUPPORTED_COMPR; + /* report error in pgp_decrypt() */ + ctx->unsupported_compr = 1; + + /* + * Discard the compressed data, allowing it to first affect any + * MDC digest computation. + */ + while (1) + { + res = pullf_read(pkt, 32 * 1024, &discard_buf); + if (res <= 0) + break; + } + break; default: @@ -878,7 +877,6 @@ process_data_packets(PGP_Context *ctx, MBuf *dst, PullFilter *src, int got_data = 0; int got_mdc = 0; PullFilter *pkt = NULL; - uint8 *tmp; while (1) { @@ -937,11 +935,8 @@ process_data_packets(PGP_Context *ctx, MBuf *dst, PullFilter *src, break; } - /* notify mdc_filter */ - ctx->in_mdc_pkt = 1; - - res = pullf_read(pkt, 8192, &tmp); - if (res > 0) + res = mdc_finish(ctx, pkt, len); + if (res >= 0) got_mdc = 1; break; default: @@ -1182,8 +1177,36 @@ pgp_decrypt(PGP_Context *ctx, MBuf *msrc, MBuf *mdst) if (res < 0) return res; + /* + * Report a failure of the prefix_init() "quick check" now, rather than + * upon detection, to hinder timing attacks. pgcrypto is not generally + * secure against timing attacks, but this helps. + */ if (!got_data || ctx->corrupt_prefix) - res = PXE_PGP_CORRUPT_DATA; + return PXE_PGP_CORRUPT_DATA; + + /* + * Code interpreting purportedly-decrypted data prior to this stage shall + * report no error other than PXE_PGP_CORRUPT_DATA. (PXE_BUG is okay so + * long as it remains unreachable.) This ensures that an attacker able to + * choose a ciphertext and receive a corresponding decryption error + * message cannot use that oracle to gather clues about the decryption + * key. See "An Attack on CFB Mode Encryption As Used By OpenPGP" by + * Serge Mister and Robert Zuccherato. + * + * A problematic value in the first octet of a Literal Data or Compressed + * Data packet may indicate a simple user error, such as the need to call + * pgp_sym_decrypt_bytea instead of pgp_sym_decrypt. Occasionally, + * though, it is the first symptom of the encryption key not matching the + * decryption key. When this was the only problem encountered, report a + * specific error to guide the user; otherwise, we will have reported + * PXE_PGP_CORRUPT_DATA before now. A key mismatch makes the other errors + * into red herrings, and this avoids leaking clues to attackers. + */ + if (ctx->unsupported_compr) + return PXE_PGP_UNSUPPORTED_COMPR; + if (ctx->unexpected_binary) + return PXE_PGP_NOT_TEXT; return res; } diff --git a/contrib/pgcrypto/pgp-pgsql.c b/contrib/pgcrypto/pgp-pgsql.c index ad1fd084276a3..3f39e53559491 100644 --- a/contrib/pgcrypto/pgp-pgsql.c +++ b/contrib/pgcrypto/pgp-pgsql.c @@ -241,7 +241,10 @@ set_arg(PGP_Context *ctx, char *key, char *val, res = pgp_set_convert_crlf(ctx, atoi(val)); else if (strcmp(key, "unicode-mode") == 0) res = pgp_set_unicode_mode(ctx, atoi(val)); - /* decrypt debug */ + /* + * The remaining options are for debugging/testing and are therefore not + * documented in the user-facing docs. + */ else if (ex != NULL && strcmp(key, "debug") == 0) ex->debug = atoi(val); else if (ex != NULL && strcmp(key, "expect-cipher-algo") == 0) diff --git a/contrib/pgcrypto/pgp.h b/contrib/pgcrypto/pgp.h index 8d4ab9862dfd5..1381eda49a2f8 100644 --- a/contrib/pgcrypto/pgp.h +++ b/contrib/pgcrypto/pgp.h @@ -151,7 +151,9 @@ struct PGP_Context * internal variables */ int mdc_checked; - int corrupt_prefix; + int corrupt_prefix; /* prefix failed RFC 4880 "quick check" */ + int unsupported_compr; /* has bzip2 compression */ + int unexpected_binary; /* binary data seen in text_mode */ int in_mdc_pkt; int use_mdcbuf_filter; PX_MD *mdc_ctx; diff --git a/contrib/pgcrypto/px-crypt.c b/contrib/pgcrypto/px-crypt.c index 7b003a76ca663..e3246fc5b9d5f 100644 --- a/contrib/pgcrypto/px-crypt.c +++ b/contrib/pgcrypto/px-crypt.c @@ -42,7 +42,7 @@ run_crypt_des(const char *psw, const char *salt, char *res; res = px_crypt_des(psw, salt); - if (strlen(res) > len - 1) + if (res == NULL || strlen(res) > len - 1) return NULL; strcpy(buf, res); return buf; diff --git a/contrib/pgcrypto/px.c b/contrib/pgcrypto/px.c index 93c436daa0dba..cfb3b50985a49 100644 --- a/contrib/pgcrypto/px.c +++ b/contrib/pgcrypto/px.c @@ -87,9 +87,6 @@ static const struct error_desc px_err_list[] = { {PXE_PGP_UNSUPPORTED_PUBALGO, "Unsupported public key algorithm"}, {PXE_PGP_MULTIPLE_SUBKEYS, "Several subkeys not supported"}, - /* fake this as PXE_PGP_CORRUPT_DATA */ - {PXE_MBUF_SHORT_READ, "Corrupt data"}, - {0, NULL}, }; diff --git a/contrib/pgcrypto/px.h b/contrib/pgcrypto/px.h index a01a58e29c0b3..d237d97017dcf 100644 --- a/contrib/pgcrypto/px.h +++ b/contrib/pgcrypto/px.h @@ -80,8 +80,6 @@ void px_free(void *p); #define PXE_NO_RANDOM -17 #define PXE_DECRYPT_FAILED -18 -#define PXE_MBUF_SHORT_READ -50 - #define PXE_PGP_CORRUPT_DATA -100 #define PXE_PGP_CORRUPT_ARMOR -101 #define PXE_PGP_UNSUPPORTED_COMPR -102 diff --git a/contrib/pgcrypto/random.c b/contrib/pgcrypto/random.c index 3f092ca346114..d72679e412d40 100644 --- a/contrib/pgcrypto/random.c +++ b/contrib/pgcrypto/random.c @@ -32,6 +32,7 @@ #include "postgres.h" #include "px.h" +#include "utils/memdebug.h" /* how many bytes to ask from system random provider */ #define RND_BYTES 32 @@ -195,7 +196,7 @@ try_unix_std(uint8 *dst) memcpy(dst, (uint8 *) &x, sizeof(x)); dst += sizeof(x); - /* let's be desperate */ + /* hash of uninitialized stack and heap allocations */ res = px_find_digest("sha1", &md); if (res >= 0) { @@ -203,8 +204,10 @@ try_unix_std(uint8 *dst) uint8 stack[8192]; int alloc = 32 * 1024; + VALGRIND_MAKE_MEM_DEFINED(stack, sizeof(stack)); px_md_update(md, stack, sizeof(stack)); ptr = px_alloc(alloc); + VALGRIND_MAKE_MEM_DEFINED(ptr, alloc); px_md_update(md, ptr, alloc); px_free(ptr); diff --git a/contrib/pgcrypto/sql/crypt-blowfish.sql b/contrib/pgcrypto/sql/crypt-blowfish.sql index 60c1140055520..3b5a681c3f5cd 100644 --- a/contrib/pgcrypto/sql/crypt-blowfish.sql +++ b/contrib/pgcrypto/sql/crypt-blowfish.sql @@ -6,6 +6,15 @@ SELECT crypt('', '$2a$06$RQiOJ.3ELirrXwxIZY8q0O'); SELECT crypt('foox', '$2a$06$RQiOJ.3ELirrXwxIZY8q0O'); +-- error, salt too short: +SELECT crypt('foox', '$2a$'); + +-- error, first digit of count in salt invalid +SELECT crypt('foox', '$2a$40$RQiOJ.3ELirrXwxIZY8q0O'); + +-- error, count in salt too small +SELECT crypt('foox', '$2a$00$RQiOJ.3ELirrXwxIZY8q0O'); + CREATE TABLE ctest (data text, res text, salt text); INSERT INTO ctest VALUES ('password', '', ''); diff --git a/contrib/pgcrypto/sql/crypt-des.sql b/contrib/pgcrypto/sql/crypt-des.sql index fabdc652fc986..a85ec1e655510 100644 --- a/contrib/pgcrypto/sql/crypt-des.sql +++ b/contrib/pgcrypto/sql/crypt-des.sql @@ -6,6 +6,10 @@ SELECT crypt('', 'NB'); SELECT crypt('foox', 'NB'); +-- We are supposed to pass in a 2-character salt. +-- error since salt is too short: +SELECT crypt('password', 'a'); + CREATE TABLE ctest (data text, res text, salt text); INSERT INTO ctest VALUES ('password', '', ''); diff --git a/contrib/pgcrypto/sql/crypt-xdes.sql b/contrib/pgcrypto/sql/crypt-xdes.sql index d4a74f76bde52..8171cd872be21 100644 --- a/contrib/pgcrypto/sql/crypt-xdes.sql +++ b/contrib/pgcrypto/sql/crypt-xdes.sql @@ -6,6 +6,22 @@ SELECT crypt('', '_J9..j2zz'); SELECT crypt('foox', '_J9..j2zz'); +-- check XDES handling of keys longer than 8 chars +SELECT crypt('longlongpassword', '_J9..j2zz'); + +-- error, salt too short +SELECT crypt('foox', '_J9..BWH'); + +-- error, count specified in the second argument is 0 +SELECT crypt('password', '_........'); + +-- error, count will wind up still being 0 due to invalid encoding +-- of the count: only chars ``./0-9A-Za-z' are valid +SELECT crypt('password', '_..!!!!!!'); + +-- count should be non-zero here, will work +SELECT crypt('password', '_/!!!!!!!'); + CREATE TABLE ctest (data text, res text, salt text); INSERT INTO ctest VALUES ('password', '', ''); diff --git a/contrib/pgcrypto/sql/pgp-decrypt.sql b/contrib/pgcrypto/sql/pgp-decrypt.sql index 93535ab016ad7..f46a18f8cfd3c 100644 --- a/contrib/pgcrypto/sql/pgp-decrypt.sql +++ b/contrib/pgcrypto/sql/pgp-decrypt.sql @@ -264,3 +264,52 @@ a3nsOzKTXUfS9VyaXo8IrncM6n7fdaXpwba/3tNsAhJG4lDv1k4g9v8Ix2dfv6Rs -----END PGP MESSAGE----- '), 'key', 'convert-crlf=1'), 'sha1'), 'hex'); -- expected: 7efefcab38467f7484d6fa43dc86cf5281bd78e2 + +-- check BUG #11905, problem with messages 6 less than a power of 2. +select pgp_sym_decrypt(pgp_sym_encrypt(repeat('x',65530),'1'),'1') = repeat('x',65530); +-- expected: true + + +-- Negative tests + +-- Decryption with a certain incorrect key yields an apparent Literal Data +-- packet reporting its content to be binary data. Ciphertext source: +-- iterative pgp_sym_encrypt('secret', 'key') until the random prefix gave +-- rise to that property. +select pgp_sym_decrypt(dearmor(' +-----BEGIN PGP MESSAGE----- + +ww0EBwMCxf8PTrQBmJdl0jcB6y2joE7GSLKRv7trbNsF5Z8ou5NISLUg31llVH/S0B2wl4bvzZjV +VsxxqLSPzNLAeIspJk5G +=mSd/ +-----END PGP MESSAGE----- +'), 'wrong-key', 'debug=1'); + +-- Routine text/binary mismatch. +select pgp_sym_decrypt(pgp_sym_encrypt_bytea('P', 'key'), 'key', 'debug=1'); + +-- Decryption with a certain incorrect key yields an apparent BZip2-compressed +-- plaintext. Ciphertext source: iterative pgp_sym_encrypt('secret', 'key') +-- until the random prefix gave rise to that property. +select pgp_sym_decrypt(dearmor(' +-----BEGIN PGP MESSAGE----- + +ww0EBwMC9rK/dMkF5Zlt0jcBlzAQ1mQY2qYbKYbw8h3EZ5Jk0K2IiY92R82TRhWzBIF/8cmXDPtP +GXsd65oYJZp3Khz0qfyn +=Nmpq +-----END PGP MESSAGE----- +'), 'wrong-key', 'debug=1'); + +-- Routine use of BZip2 compression. Ciphertext source: +-- echo x | gpg --homedir /nonexistent --personal-compress-preferences bzip2 \ +-- --personal-cipher-preferences aes --no-emit-version --batch \ +-- --symmetric --passphrase key --armor +select pgp_sym_decrypt(dearmor(' +-----BEGIN PGP MESSAGE----- + +jA0EBwMCRhFrAKNcLVJg0mMBLJG1cCASNk/x/3dt1zJ+2eo7jHfjgg3N6wpB3XIe +QCwkWJwlBG5pzbO5gu7xuPQN+TbPJ7aQ2sLx3bAHhtYb0i3vV9RO10Gw++yUyd4R +UCAAw2JRIISttRHMfDpDuZJpvYo= +=AZ9M +-----END PGP MESSAGE----- +'), 'key', 'debug=1'); diff --git a/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql b/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql index f8495d1e540ef..3f2bae9e40bd4 100644 --- a/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql +++ b/contrib/pgcrypto/sql/pgp-pubkey-decrypt.sql @@ -579,6 +579,28 @@ GQ== -----END PGP MESSAGE----- '); +-- rsaenc2048 / aes128 (not from gnupg) +insert into encdata (id, data) values (5, ' +-----BEGIN PGP MESSAGE----- + +wcBMA/0CBsQJt0h1AQgAzxZ8j+OTeZ8IlLxfZ/mVd28/gUsCY+xigWBk/anZlK3T +p2tNU2idHzKdAttH2Hu/PWbZp4kwjl9spezYxMqCeBZqtfGED88Y+rqK0n/ul30A +7jjFHaw0XUOqFNlST1v6H2i7UXndnp+kcLfHPhnO5BIYWxB2CYBehItqtrn75eqr +C7trGzU/cr74efcWagbCDSNjiAV7GlEptlzmgVMmNikyI6w0ojEUx8lCLc/OsFz9 +pJUAX8xuwjxDVv+W7xk6c96grQiQlm+FLDYGiGNXoAzx3Wi/howu3uV40dXfY+jx +3WBrhEew5Pkpt1SsWoFnJWOfJ8GLd0ec8vfRCqAIVdLgAeS7NyawQYtd6wuVrEAj +5SMg4Thb4d+g45RksuGLHUUr4qO9tiXglODa4InhmJfgNuLk+RGz4LXjq8wepEmW +vRbgFOG54+Cf4C/gC+HkreDm5JKSKjvvw4B/jC6CDxq+JoziEe2Z1uEjCuEcr+Es +/eGzeOi36BejXPMHeKxXejj5qBBHKV0pHVhZSgffR0TtlXdB967Yl/5agV0R89hI +7Gw52emfnH4Z0Y4V0au2H0k1dR/2IxXdJEWSTG7Be1JHT59p9ei2gSEOrdBMIOjP +tbYYUlmmbvD49bHfThkDiC+oc9947LgQsk3kOOLbNHcjkbrjH8R5kjII4m/SEZA1 +g09T+338SzevBcVXh/cFrQ6/Et+lyyO2LJRUMs69g/HyzJOVWT2Iu8E0eS9MWevY +Qtrkrhrpkl3Y02qEp/j6M03Yu2t6ZF7dp51aJ5VhO2mmmtHaTnCyCc8Fcf72LmD8 +blH2nKZC9d6fi4YzSYMepZpMOFR65M80MCMiDUGnZBB8sEADu2/iVtqDUeG8mAA= +=PHJ1 +-----END PGP MESSAGE----- +'); + -- successful decrypt select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) from keytbl, encdata where keytbl.id=1 and encdata.id=1; @@ -619,3 +641,7 @@ from keytbl, encdata where keytbl.id=5 and encdata.id=1; -- password-protected secret key, right password select pgp_pub_decrypt(dearmor(data), dearmor(seckey), 'parool') from keytbl, encdata where keytbl.id=5 and encdata.id=1; + +-- test for a short read from prefix_init +select pgp_pub_decrypt(dearmor(data), dearmor(seckey)) +from keytbl, encdata where keytbl.id=6 and encdata.id=5; diff --git a/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql b/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql index b8c3faf1c7678..bfa9855825515 100644 --- a/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql +++ b/contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/pgrowlocks/pgrowlocks--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pgrowlocks" to load this file. \quit +\echo Use "CREATE EXTENSION pgrowlocks FROM unpackaged" to load this file. \quit ALTER EXTENSION pgrowlocks ADD function pgrowlocks(text); diff --git a/contrib/pgstattuple/expected/pgstattuple.out b/contrib/pgstattuple/expected/pgstattuple.out index d769f6d49431f..e920234488e11 100644 --- a/contrib/pgstattuple/expected/pgstattuple.out +++ b/contrib/pgstattuple/expected/pgstattuple.out @@ -41,40 +41,44 @@ select pgstattuple(relname) from pg_class where relname = 'test'; (0,0,0,0,0,0,0,0,0) (1 row) -select * from pgstatindex('test_pkey'); +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) -select * from pgstatindex('test_pkey'::text); +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'::text); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) -select * from pgstatindex('test_pkey'::name); +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'::name); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN + 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) -select * from pgstatindex('test_pkey'::regclass); +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'::regclass); version | tree_level | index_size | root_block_no | internal_pages | leaf_pages | empty_pages | deleted_pages | avg_leaf_density | leaf_fragmentation ---------+------------+------------+---------------+----------------+------------+-------------+---------------+------------------+-------------------- - 2 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | NaN | NaN -(1 row) - -select pgstatindex(oid) from pg_class where relname = 'test_pkey'; - pgstatindex ---------------------------- - (2,0,0,0,0,0,0,0,NaN,NaN) -(1 row) - -select pgstatindex(relname) from pg_class where relname = 'test_pkey'; - pgstatindex ---------------------------- - (2,0,0,0,0,0,0,0,NaN,NaN) + 2 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | NaN | NaN (1 row) select pg_relpages('test'); diff --git a/contrib/pgstattuple/pgstatindex.c b/contrib/pgstattuple/pgstatindex.c index a2ea5d709cf98..8d9d81610ec2f 100644 --- a/contrib/pgstattuple/pgstatindex.c +++ b/contrib/pgstattuple/pgstatindex.c @@ -78,7 +78,6 @@ typedef struct BTIndexStat uint32 level; BlockNumber root_blkno; - uint64 root_pages; uint64 internal_pages; uint64 leaf_pages; uint64 empty_pages; @@ -184,7 +183,6 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo) } /* -- init counters -- */ - indexStat.root_pages = 0; indexStat.internal_pages = 0; indexStat.leaf_pages = 0; indexStat.empty_pages = 0; @@ -217,7 +215,11 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo) /* Determine page type, and update totals */ - if (P_ISLEAF(opaque)) + if (P_ISDELETED(opaque)) + indexStat.deleted_pages++; + else if (P_IGNORE(opaque)) + indexStat.empty_pages++; /* this is the "half dead" state */ + else if (P_ISLEAF(opaque)) { int max_avail; @@ -234,12 +236,6 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo) if (opaque->btpo_next != P_NONE && opaque->btpo_next < blkno) indexStat.fragments++; } - else if (P_ISDELETED(opaque)) - indexStat.deleted_pages++; - else if (P_IGNORE(opaque)) - indexStat.empty_pages++; - else if (P_ISROOT(opaque)) - indexStat.root_pages++; else indexStat.internal_pages++; @@ -268,7 +264,7 @@ pgstatindex_impl(Relation rel, FunctionCallInfo fcinfo) values[j++] = psprintf("%d", indexStat.version); values[j++] = psprintf("%d", indexStat.level); values[j++] = psprintf(INT64_FORMAT, - (indexStat.root_pages + + (1 + /* include the metapage in index_size */ indexStat.leaf_pages + indexStat.internal_pages + indexStat.deleted_pages + diff --git a/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql b/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql index 14b63cafcf5fe..ef71000a32931 100644 --- a/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql +++ b/contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/pgstattuple/pgstattuple--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION pgstattuple" to load this file. \quit +\echo Use "CREATE EXTENSION pgstattuple FROM unpackaged" to load this file. \quit ALTER EXTENSION pgstattuple ADD function pgstattuple(text); ALTER EXTENSION pgstattuple ADD function pgstattuple(oid); diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index edc603f6a1b09..10077483d3587 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -274,7 +274,6 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) BlockNumber tupblock; Buffer buffer; pgstattuple_type stat = {0}; - BufferAccessStrategy bstrategy; SnapshotData SnapshotDirty; /* Disable syncscan because we assume we scan from block zero upwards */ @@ -283,10 +282,6 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) nblocks = scan->rs_nblocks; /* # blocks to be scanned */ - /* prepare access strategy for this table */ - bstrategy = GetAccessStrategy(BAS_BULKREAD); - scan->rs_strategy = bstrategy; - /* scan the relation */ while ((tuple = heap_getnext(scan, ForwardScanDirection)) != NULL) { @@ -320,26 +315,28 @@ pgstat_heap(Relation rel, FunctionCallInfo fcinfo) { CHECK_FOR_INTERRUPTS(); - buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, RBM_NORMAL, bstrategy); + buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, + RBM_NORMAL, scan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); block++; } } - heap_endscan(scan); while (block < nblocks) { CHECK_FOR_INTERRUPTS(); - buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, RBM_NORMAL, bstrategy); + buffer = ReadBufferExtended(rel, MAIN_FORKNUM, block, + RBM_NORMAL, scan->rs_strategy); LockBuffer(buffer, BUFFER_LOCK_SHARE); stat.free_space += PageGetHeapFreeSpace((Page) BufferGetPage(buffer)); UnlockReleaseBuffer(buffer); block++; } + heap_endscan(scan); relation_close(rel, AccessShareLock); stat.table_len = (uint64) nblocks *BLCKSZ; diff --git a/contrib/pgstattuple/sql/pgstattuple.sql b/contrib/pgstattuple/sql/pgstattuple.sql index 0e0ad0e15d5a7..d22c9f1c46dc1 100644 --- a/contrib/pgstattuple/sql/pgstattuple.sql +++ b/contrib/pgstattuple/sql/pgstattuple.sql @@ -15,12 +15,26 @@ select * from pgstattuple('test'::regclass); select pgstattuple(oid) from pg_class where relname = 'test'; select pgstattuple(relname) from pg_class where relname = 'test'; -select * from pgstatindex('test_pkey'); -select * from pgstatindex('test_pkey'::text); -select * from pgstatindex('test_pkey'::name); -select * from pgstatindex('test_pkey'::regclass); -select pgstatindex(oid) from pg_class where relname = 'test_pkey'; -select pgstatindex(relname) from pg_class where relname = 'test_pkey'; +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'); +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'::text); +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'::name); +select version, tree_level, + index_size / current_setting('block_size')::int as index_size, + root_block_no, internal_pages, leaf_pages, empty_pages, deleted_pages, + avg_leaf_density, leaf_fragmentation + from pgstatindex('test_pkey'::regclass); select pg_relpages('test'); select pg_relpages('test_pkey'); diff --git a/contrib/postgres_fdw/Makefile b/contrib/postgres_fdw/Makefile index 8c497201d0e77..c0f4160f6ba40 100644 --- a/contrib/postgres_fdw/Makefile +++ b/contrib/postgres_fdw/Makefile @@ -5,7 +5,6 @@ OBJS = postgres_fdw.o option.o deparse.o connection.o PG_CPPFLAGS = -I$(libpq_srcdir) SHLIB_LINK = $(libpq) -SHLIB_PREREQS = submake-libpq EXTENSION = postgres_fdw DATA = postgres_fdw--1.0.sql @@ -20,6 +19,7 @@ PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) include $(PGXS) else +SHLIB_PREREQS = submake-libpq subdir = contrib/postgres_fdw top_builddir = ../.. include $(top_builddir)/src/Makefile.global diff --git a/contrib/postgres_fdw/deparse.c b/contrib/postgres_fdw/deparse.c index d7d9b9c77d254..c13694a2d5e2d 100644 --- a/contrib/postgres_fdw/deparse.c +++ b/contrib/postgres_fdw/deparse.c @@ -17,11 +17,12 @@ * We do not consider that it is ever safe to send COLLATE expressions to * the remote server: it might not have the same collation names we do. * (Later we might consider it safe to send COLLATE "C", but even that would - * fail on old remote servers.) An expression is considered safe to send only - * if all collations used in it are traceable to Var(s) of the foreign table. - * That implies that if the remote server gets a different answer than we do, - * the foreign table's columns are not marked with collations that match the - * remote table's columns, which we can consider to be user error. + * fail on old remote servers.) An expression is considered safe to send + * only if all operator/function input collations used in it are traceable to + * Var(s) of the foreign table. That implies that if the remote server gets + * a different answer than we do, the foreign table's columns are not marked + * with collations that match the remote table's columns, which we can + * consider to be user error. * * Portions Copyright (c) 2012-2014, PostgreSQL Global Development Group * @@ -68,9 +69,12 @@ typedef struct foreign_glob_cxt */ typedef enum { - FDW_COLLATE_NONE, /* expression is of a noncollatable type */ + FDW_COLLATE_NONE, /* expression is of a noncollatable type, or + * it has default collation that is not + * traceable to a foreign Var */ FDW_COLLATE_SAFE, /* collation derives from a foreign Var */ - FDW_COLLATE_UNSAFE /* collation derives from something else */ + FDW_COLLATE_UNSAFE /* collation is non-default and derives from + * something other than a foreign Var */ } FDWCollateState; typedef struct foreign_loc_cxt @@ -106,6 +110,7 @@ static void deparseTargetList(StringInfo buf, PlannerInfo *root, Index rtindex, Relation rel, + bool is_returning, Bitmapset *attrs_used, List **retrieved_attrs); static void deparseReturningList(StringInfo buf, PlannerInfo *root, @@ -254,19 +259,42 @@ foreign_expr_walker(Node *node, var->varlevelsup == 0) { /* Var belongs to foreign table */ + + /* + * System columns other than ctid should not be sent to + * the remote, since we don't make any effort to ensure + * that local and remote values match (tableoid, in + * particular, almost certainly doesn't match). + */ + if (var->varattno < 0 && + var->varattno != SelfItemPointerAttributeNumber) + return false; + + /* Else check the collation */ collation = var->varcollid; state = OidIsValid(collation) ? FDW_COLLATE_SAFE : FDW_COLLATE_NONE; } else { /* Var belongs to some other table */ - if (var->varcollid != InvalidOid && - var->varcollid != DEFAULT_COLLATION_OID) - return false; - - /* We can consider that it doesn't set collation */ - collation = InvalidOid; - state = FDW_COLLATE_NONE; + collation = var->varcollid; + if (collation == InvalidOid || + collation == DEFAULT_COLLATION_OID) + { + /* + * It's noncollatable, or it's safe to combine with a + * collatable foreign Var, so set state to NONE. + */ + state = FDW_COLLATE_NONE; + } + else + { + /* + * Do not fail right away, since the Var might appear + * in a collation-insensitive context. + */ + state = FDW_COLLATE_UNSAFE; + } } } break; @@ -276,16 +304,16 @@ foreign_expr_walker(Node *node, /* * If the constant has nondefault collation, either it's of a - * non-builtin type, or it reflects folding of a CollateExpr; - * either way, it's unsafe to send to the remote. + * non-builtin type, or it reflects folding of a CollateExpr. + * It's unsafe to send to the remote unless it's used in a + * non-collation-sensitive context. */ - if (c->constcollid != InvalidOid && - c->constcollid != DEFAULT_COLLATION_OID) - return false; - - /* Otherwise, we can consider that it doesn't set collation */ - collation = InvalidOid; - state = FDW_COLLATE_NONE; + collation = c->constcollid; + if (collation == InvalidOid || + collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; } break; case T_Param: @@ -293,19 +321,19 @@ foreign_expr_walker(Node *node, Param *p = (Param *) node; /* - * Collation handling is same as for Consts. + * Collation rule is same as for Consts and non-foreign Vars. */ - if (p->paramcollid != InvalidOid && - p->paramcollid != DEFAULT_COLLATION_OID) - return false; - - collation = InvalidOid; - state = FDW_COLLATE_NONE; + collation = p->paramcollid; + if (collation == InvalidOid || + collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; + else + state = FDW_COLLATE_UNSAFE; } break; case T_ArrayRef: { - ArrayRef *ar = (ArrayRef *) node;; + ArrayRef *ar = (ArrayRef *) node; /* Assignment should not be in restrictions. */ if (ar->refassgnexpr != NULL) @@ -336,6 +364,8 @@ foreign_expr_walker(Node *node, else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; else state = FDW_COLLATE_UNSAFE; } @@ -381,6 +411,8 @@ foreign_expr_walker(Node *node, else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; else state = FDW_COLLATE_UNSAFE; } @@ -422,6 +454,8 @@ foreign_expr_walker(Node *node, else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; else state = FDW_COLLATE_UNSAFE; } @@ -471,7 +505,7 @@ foreign_expr_walker(Node *node, /* * RelabelType must not introduce a collation not derived from - * an input foreign Var. + * an input foreign Var (same logic as for a real function). */ collation = r->resultcollid; if (collation == InvalidOid) @@ -479,6 +513,8 @@ foreign_expr_walker(Node *node, else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; else state = FDW_COLLATE_UNSAFE; } @@ -528,7 +564,7 @@ foreign_expr_walker(Node *node, /* * ArrayExpr must not introduce a collation not derived from - * an input foreign Var. + * an input foreign Var (same logic as for a function). */ collation = a->array_collid; if (collation == InvalidOid) @@ -536,6 +572,8 @@ foreign_expr_walker(Node *node, else if (inner_cxt.state == FDW_COLLATE_SAFE && collation == inner_cxt.collation) state = FDW_COLLATE_SAFE; + else if (collation == DEFAULT_COLLATION_OID) + state = FDW_COLLATE_NONE; else state = FDW_COLLATE_UNSAFE; } @@ -684,7 +722,7 @@ deparseSelectSql(StringInfo buf, * Construct SELECT list */ appendStringInfoString(buf, "SELECT "); - deparseTargetList(buf, root, baserel->relid, rel, attrs_used, + deparseTargetList(buf, root, baserel->relid, rel, false, attrs_used, retrieved_attrs); /* @@ -698,7 +736,8 @@ deparseSelectSql(StringInfo buf, /* * Emit a target list that retrieves the columns specified in attrs_used. - * This is used for both SELECT and RETURNING targetlists. + * This is used for both SELECT and RETURNING targetlists; the is_returning + * parameter is true only for a RETURNING targetlist. * * The tlist text is appended to buf, and we also create an integer List * of the columns being retrieved, which is returned to *retrieved_attrs. @@ -708,6 +747,7 @@ deparseTargetList(StringInfo buf, PlannerInfo *root, Index rtindex, Relation rel, + bool is_returning, Bitmapset *attrs_used, List **retrieved_attrs) { @@ -737,6 +777,8 @@ deparseTargetList(StringInfo buf, { if (!first) appendStringInfoString(buf, ", "); + else if (is_returning) + appendStringInfoString(buf, " RETURNING "); first = false; deparseColumnRef(buf, rtindex, i, root); @@ -754,6 +796,8 @@ deparseTargetList(StringInfo buf, { if (!first) appendStringInfoString(buf, ", "); + else if (is_returning) + appendStringInfoString(buf, " RETURNING "); first = false; appendStringInfoString(buf, "ctid"); @@ -763,7 +807,7 @@ deparseTargetList(StringInfo buf, } /* Don't generate bad syntax if no undropped columns */ - if (first) + if (first && !is_returning) appendStringInfoString(buf, "NULL"); } @@ -979,11 +1023,8 @@ deparseReturningList(StringInfo buf, PlannerInfo *root, } if (attrs_used != NULL) - { - appendStringInfoString(buf, " RETURNING "); - deparseTargetList(buf, root, rtindex, rel, attrs_used, + deparseTargetList(buf, root, rtindex, rel, true, attrs_used, retrieved_attrs); - } else *retrieved_attrs = NIL; } diff --git a/contrib/postgres_fdw/expected/postgres_fdw.out b/contrib/postgres_fdw/expected/postgres_fdw.out index 2e49ee317a290..aceef406ae5e9 100644 --- a/contrib/postgres_fdw/expected/postgres_fdw.out +++ b/contrib/postgres_fdw/expected/postgres_fdw.out @@ -231,6 +231,39 @@ SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1'; 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo (1 row) +-- with FOR UPDATE/SHARE +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; + QUERY PLAN +---------------------------------------------------------------------------------------------------------------- + LockRows + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + -> Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 101)) FOR UPDATE +(5 rows) + +SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-----+----+-------+------------------------------+--------------------------+----+------------+----- + 101 | 1 | 00101 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; + QUERY PLAN +--------------------------------------------------------------------------------------------------------------- + LockRows + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + -> Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8, t1.* + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE (("C 1" = 102)) FOR SHARE +(5 rows) + +SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-----+----+-------+------------------------------+--------------------------+----+------------+----- + 102 | 2 | 00102 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo +(1 row) + -- aggregate SELECT COUNT(*) FROM ft1 t1; count @@ -826,6 +859,74 @@ DEALLOCATE st2; DEALLOCATE st3; DEALLOCATE st4; DEALLOCATE st5; +-- System columns, except ctid, should not be sent to remote +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------- + Limit + Output: c1, c2, c3, c4, c5, c6, c7, c8 + -> Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8 + Filter: (t1.tableoid = 1259::oid) + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" +(6 rows) + +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +----+----+-------+------------------------------+--------------------------+----+------------+----- + 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------- + Limit + Output: ((tableoid)::regclass), c1, c2, c3, c4, c5, c6, c7, c8 + -> Foreign Scan on public.ft1 t1 + Output: (tableoid)::regclass, c1, c2, c3, c4, c5, c6, c7, c8 + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" +(5 rows) + +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; + tableoid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +----------+----+----+-------+------------------------------+--------------------------+----+------------+----- + ft1 | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; + QUERY PLAN +------------------------------------------------------------------------------------------------------- + Foreign Scan on public.ft1 t1 + Output: c1, c2, c3, c4, c5, c6, c7, c8 + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8 FROM "S 1"."T 1" WHERE ((ctid = '(0,2)'::tid)) +(3 rows) + +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; + c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +----+----+-------+------------------------------+--------------------------+----+------------+----- + 2 | 2 | 00002 | Sat Jan 03 00:00:00 1970 PST | Sat Jan 03 00:00:00 1970 | 2 | 2 | foo +(1 row) + +EXPLAIN (VERBOSE, COSTS false) +SELECT ctid, * FROM ft1 t1 LIMIT 1; + QUERY PLAN +------------------------------------------------------------------------------------- + Limit + Output: ctid, c1, c2, c3, c4, c5, c6, c7, c8 + -> Foreign Scan on public.ft1 t1 + Output: ctid, c1, c2, c3, c4, c5, c6, c7, c8 + Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" +(5 rows) + +SELECT ctid, * FROM ft1 t1 LIMIT 1; + ctid | c1 | c2 | c3 | c4 | c5 | c6 | c7 | c8 +-------+----+----+-------+------------------------------+--------------------------+----+------------+----- + (0,1) | 1 | 1 | 00001 | Fri Jan 02 00:00:00 1970 PST | Fri Jan 02 00:00:00 1970 | 1 | 1 | foo +(1 row) + -- =================================================================== -- used in pl/pgsql function -- =================================================================== @@ -898,71 +999,110 @@ COMMIT; -- =================================================================== -- test handling of collations -- =================================================================== -create table loct3 (f1 text collate "C", f2 text); -create foreign table ft3 (f1 text collate "C", f2 text) - server loopback options (table_name 'loct3'); +create table loct3 (f1 text collate "C" unique, f2 text, f3 varchar(10) unique); +create foreign table ft3 (f1 text collate "C", f2 text, f3 varchar(10)) + server loopback options (table_name 'loct3', use_remote_estimate 'true'); -- can be sent to remote explain (verbose, costs off) select * from ft3 where f1 = 'foo'; - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------ Foreign Scan on public.ft3 - Output: f1, f2 - Remote SQL: SELECT f1, f2 FROM public.loct3 WHERE ((f1 = 'foo'::text)) + Output: f1, f2, f3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f1 = 'foo'::text)) (3 rows) explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo'; - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------ Foreign Scan on public.ft3 - Output: f1, f2 - Remote SQL: SELECT f1, f2 FROM public.loct3 WHERE ((f1 = 'foo'::text)) + Output: f1, f2, f3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f1 = 'foo'::text)) (3 rows) explain (verbose, costs off) select * from ft3 where f2 = 'foo'; - QUERY PLAN --------------------------------------------------------------------------- + QUERY PLAN +------------------------------------------------------------------------------ Foreign Scan on public.ft3 - Output: f1, f2 - Remote SQL: SELECT f1, f2 FROM public.loct3 WHERE ((f2 = 'foo'::text)) + Output: f1, f2, f3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f2 = 'foo'::text)) (3 rows) +explain (verbose, costs off) select * from ft3 where f3 = 'foo'; + QUERY PLAN +------------------------------------------------------------------------------ + Foreign Scan on public.ft3 + Output: f1, f2, f3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE ((f3 = 'foo'::text)) +(3 rows) + +explain (verbose, costs off) select * from ft3 f, loct3 l + where f.f3 = l.f3 and l.f1 = 'foo'; + QUERY PLAN +-------------------------------------------------------------------------------------------------- + Nested Loop + Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3 + -> Index Scan using loct3_f1_key on public.loct3 l + Output: l.f1, l.f2, l.f3 + Index Cond: (l.f1 = 'foo'::text) + -> Foreign Scan on public.ft3 f + Output: f.f1, f.f2, f.f3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 WHERE (($1::character varying(10) = f3)) +(8 rows) + -- can't be sent to remote explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo'; - QUERY PLAN ------------------------------------------------ + QUERY PLAN +--------------------------------------------------- Foreign Scan on public.ft3 - Output: f1, f2 + Output: f1, f2, f3 Filter: ((ft3.f1)::text = 'foo'::text) - Remote SQL: SELECT f1, f2 FROM public.loct3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 (4 rows) explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C"; - QUERY PLAN ------------------------------------------------ + QUERY PLAN +--------------------------------------------------- Foreign Scan on public.ft3 - Output: f1, f2 + Output: f1, f2, f3 Filter: (ft3.f1 = 'foo'::text COLLATE "C") - Remote SQL: SELECT f1, f2 FROM public.loct3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 (4 rows) explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo'; - QUERY PLAN ------------------------------------------------ + QUERY PLAN +--------------------------------------------------- Foreign Scan on public.ft3 - Output: f1, f2 + Output: f1, f2, f3 Filter: ((ft3.f2)::text = 'foo'::text) - Remote SQL: SELECT f1, f2 FROM public.loct3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 (4 rows) explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C"; - QUERY PLAN ------------------------------------------------ + QUERY PLAN +--------------------------------------------------- Foreign Scan on public.ft3 - Output: f1, f2 + Output: f1, f2, f3 Filter: (ft3.f2 = 'foo'::text COLLATE "C") - Remote SQL: SELECT f1, f2 FROM public.loct3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 (4 rows) +explain (verbose, costs off) select * from ft3 f, loct3 l + where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo'; + QUERY PLAN +------------------------------------------------------------- + Hash Join + Output: f.f1, f.f2, f.f3, l.f1, l.f2, l.f3 + Hash Cond: ((f.f3)::text = (l.f3)::text) + -> Foreign Scan on public.ft3 f + Output: f.f1, f.f2, f.f3 + Remote SQL: SELECT f1, f2, f3 FROM public.loct3 + -> Hash + Output: l.f1, l.f2, l.f3 + -> Index Scan using loct3_f1_key on public.loct3 l + Output: l.f1, l.f2, l.f3 + Index Cond: (l.f1 = 'foo'::text) +(11 rows) + -- =================================================================== -- test writable foreign table stuff -- =================================================================== @@ -2086,6 +2226,59 @@ SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1; 1104 | 204 | ddd | (819 rows) +EXPLAIN (verbose, costs off) +INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + Insert on public.ft2 + Output: (tableoid)::regclass + Remote SQL: INSERT INTO "S 1"."T 1"("C 1", c2, c3, c4, c5, c6, c7, c8) VALUES ($1, $2, $3, $4, $5, $6, $7, $8) + -> Result + Output: 9999, 999, NULL::integer, 'foo'::text, NULL::timestamp with time zone, NULL::timestamp without time zone, NULL::character varying, 'ft2 '::character(10), NULL::user_enum +(5 rows) + +INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; + tableoid +---------- + ft2 +(1 row) + +EXPLAIN (verbose, costs off) +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------------------------------------------- + Update on public.ft2 + Output: (tableoid)::regclass + Remote SQL: UPDATE "S 1"."T 1" SET c3 = $2 WHERE ctid = $1 + -> Foreign Scan on public.ft2 + Output: c1, c2, NULL::integer, 'bar'::text, c4, c5, c6, c7, c8, ctid + Remote SQL: SELECT "C 1", c2, c4, c5, c6, c7, c8, ctid FROM "S 1"."T 1" WHERE (("C 1" = 9999)) FOR UPDATE +(6 rows) + +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; + tableoid +---------- + ft2 +(1 row) + +EXPLAIN (verbose, costs off) +DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; + QUERY PLAN +------------------------------------------------------------------------------------ + Delete on public.ft2 + Output: (tableoid)::regclass + Remote SQL: DELETE FROM "S 1"."T 1" WHERE ctid = $1 + -> Foreign Scan on public.ft2 + Output: ctid + Remote SQL: SELECT ctid FROM "S 1"."T 1" WHERE (("C 1" = 9999)) FOR UPDATE +(6 rows) + +DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; + tableoid +---------- + ft2 +(1 row) + -- Test that trigger on remote table works as expected CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$ BEGIN diff --git a/contrib/postgres_fdw/sql/postgres_fdw.sql b/contrib/postgres_fdw/sql/postgres_fdw.sql index 6187839453c0a..4bf66ad36a028 100644 --- a/contrib/postgres_fdw/sql/postgres_fdw.sql +++ b/contrib/postgres_fdw/sql/postgres_fdw.sql @@ -145,6 +145,11 @@ SELECT * FROM ft1 WHERE false; -- with WHERE clause EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1'; SELECT * FROM ft1 t1 WHERE t1.c1 = 101 AND t1.c6 = '1' AND t1.c7 >= '1'; +-- with FOR UPDATE/SHARE +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; +SELECT * FROM ft1 t1 WHERE c1 = 101 FOR UPDATE; +EXPLAIN (VERBOSE, COSTS false) SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; +SELECT * FROM ft1 t1 WHERE c1 = 102 FOR SHARE; -- aggregate SELECT COUNT(*) FROM ft1 t1; -- join two tables @@ -248,6 +253,20 @@ DEALLOCATE st3; DEALLOCATE st4; DEALLOCATE st5; +-- System columns, except ctid, should not be sent to remote +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'pg_class'::regclass LIMIT 1; +SELECT * FROM ft1 t1 WHERE t1.tableoid = 'ft1'::regclass LIMIT 1; +EXPLAIN (VERBOSE, COSTS false) +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; +SELECT tableoid::regclass, * FROM ft1 t1 LIMIT 1; +EXPLAIN (VERBOSE, COSTS false) +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; +SELECT * FROM ft1 t1 WHERE t1.ctid = '(0,2)'; +EXPLAIN (VERBOSE, COSTS false) +SELECT ctid, * FROM ft1 t1 LIMIT 1; +SELECT ctid, * FROM ft1 t1 LIMIT 1; + -- =================================================================== -- used in pl/pgsql function -- =================================================================== @@ -291,19 +310,24 @@ COMMIT; -- =================================================================== -- test handling of collations -- =================================================================== -create table loct3 (f1 text collate "C", f2 text); -create foreign table ft3 (f1 text collate "C", f2 text) - server loopback options (table_name 'loct3'); +create table loct3 (f1 text collate "C" unique, f2 text, f3 varchar(10) unique); +create foreign table ft3 (f1 text collate "C", f2 text, f3 varchar(10)) + server loopback options (table_name 'loct3', use_remote_estimate 'true'); -- can be sent to remote explain (verbose, costs off) select * from ft3 where f1 = 'foo'; explain (verbose, costs off) select * from ft3 where f1 COLLATE "C" = 'foo'; explain (verbose, costs off) select * from ft3 where f2 = 'foo'; +explain (verbose, costs off) select * from ft3 where f3 = 'foo'; +explain (verbose, costs off) select * from ft3 f, loct3 l + where f.f3 = l.f3 and l.f1 = 'foo'; -- can't be sent to remote explain (verbose, costs off) select * from ft3 where f1 COLLATE "POSIX" = 'foo'; explain (verbose, costs off) select * from ft3 where f1 = 'foo' COLLATE "C"; explain (verbose, costs off) select * from ft3 where f2 COLLATE "C" = 'foo'; explain (verbose, costs off) select * from ft3 where f2 = 'foo' COLLATE "C"; +explain (verbose, costs off) select * from ft3 f, loct3 l + where f.f3 = l.f3 COLLATE "POSIX" and l.f1 = 'foo'; -- =================================================================== -- test writable foreign table stuff @@ -328,6 +352,15 @@ EXPLAIN (verbose, costs off) DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; DELETE FROM ft2 USING ft1 WHERE ft1.c1 = ft2.c2 AND ft1.c1 % 10 = 2; SELECT c1,c2,c3,c4 FROM ft2 ORDER BY c1; +EXPLAIN (verbose, costs off) +INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; +INSERT INTO ft2 (c1,c2,c3) VALUES (9999,999,'foo') RETURNING tableoid::regclass; +EXPLAIN (verbose, costs off) +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; +UPDATE ft2 SET c3 = 'bar' WHERE c1 = 9999 RETURNING tableoid::regclass; +EXPLAIN (verbose, costs off) +DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; +DELETE FROM ft2 WHERE c1 = 9999 RETURNING tableoid::regclass; -- Test that trigger on remote table works as expected CREATE OR REPLACE FUNCTION "S 1".F_BRTRIG() RETURNS trigger AS $$ diff --git a/contrib/seg/seg--unpackaged--1.0.sql b/contrib/seg/seg--unpackaged--1.0.sql index ebd6b3bc5b53c..3987ebf3ddff3 100644 --- a/contrib/seg/seg--unpackaged--1.0.sql +++ b/contrib/seg/seg--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/seg/seg--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION seg" to load this file. \quit +\echo Use "CREATE EXTENSION seg FROM unpackaged" to load this file. \quit ALTER EXTENSION seg ADD type seg; ALTER EXTENSION seg ADD function seg_in(cstring); diff --git a/contrib/sepgsql/expected/alter.out b/contrib/sepgsql/expected/alter.out index 124f862cec3d9..e67cc2dc0c80d 100644 --- a/contrib/sepgsql/expected/alter.out +++ b/contrib/sepgsql/expected/alter.out @@ -8,9 +8,9 @@ DROP DATABASE IF EXISTS regtest_sepgsql_test_database; DROP USER IF EXISTS regtest_sepgsql_test_user; RESET client_min_messages; SELECT sepgsql_getcon(); -- confirm client privilege - sepgsql_getcon -------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0 + sepgsql_getcon +---------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 (1 row) -- @@ -40,140 +40,136 @@ SET client_min_messages = LOG; -- owner is not actually changed. -- ALTER DATABASE regtest_sepgsql_test_database_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1" ALTER DATABASE regtest_sepgsql_test_database_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1" ALTER SCHEMA regtest_schema_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" ALTER SCHEMA regtest_schema_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" ALTER TABLE regtest_table_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1" ALTER TABLE regtest_table_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1" ALTER SEQUENCE regtest_seq_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1" ALTER SEQUENCE regtest_seq_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1" ALTER VIEW regtest_view_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1" ALTER VIEW regtest_view_1 OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1" ALTER FUNCTION regtest_func_1(text) OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)" ALTER FUNCTION regtest_func_1(text) OWNER TO regtest_sepgsql_test_user; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)" -- -- ALTER xxx SET SCHEMA -- ALTER TABLE regtest_table_1 SET SCHEMA regtest_schema_2; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_1.regtest_table_1" ALTER SEQUENCE regtest_seq_1 SET SCHEMA regtest_schema_2; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_1.regtest_seq_1" ALTER VIEW regtest_view_1 SET SCHEMA regtest_schema_2; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_1.regtest_view_1" ALTER FUNCTION regtest_func_1(text) SET SCHEMA regtest_schema_2; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_1.regtest_func_1(pg_catalog.text)" -- -- ALTER xxx RENAME TO -- ALTER DATABASE regtest_sepgsql_test_database_1 RENAME TO regtest_sepgsql_test_database; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database_1" ALTER SCHEMA regtest_schema_1 RENAME TO regtest_schema; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_1" ALTER TABLE regtest_table_1 RENAME TO regtest_table; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" -LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table_1" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table_1" ALTER SEQUENCE regtest_seq_1 RENAME TO regtest_seq; -LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_2.regtest_seq_1" +LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_2.regtest_seq_1" ALTER VIEW regtest_view_1 RENAME TO regtest_view; -LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_2.regtest_view_1" +LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_2.regtest_view_1" ALTER FUNCTION regtest_func_1(text) RENAME TO regtest_func; -LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_2.regtest_func_1(pg_catalog.text)" +LOG: SELinux: allowed { add_name remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema_2.regtest_func_1(pg_catalog.text)" SET search_path = regtest_schema, regtest_schema_2, public; -- -- misc ALTER commands -- ALTER DATABASE regtest_sepgsql_test_database CONNECTION LIMIT 999; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database" ALTER DATABASE regtest_sepgsql_test_database SET search_path TO regtest_schema, public; -- not supported yet ALTER TABLE regtest_table ADD COLUMN d float; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.d" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.d" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.d" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.d" ALTER TABLE regtest_table DROP COLUMN d; -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.d" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.d" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.d" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.d" ALTER TABLE regtest_table ALTER b SET DEFAULT 'abcd'; -- not supported yet ALTER TABLE regtest_table ALTER b SET DEFAULT 'XYZ'; -- not supported yet ALTER TABLE regtest_table ALTER b DROP DEFAULT; -- not supported yet ALTER TABLE regtest_table ALTER b SET NOT NULL; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" ALTER TABLE regtest_table ALTER b DROP NOT NULL; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" ALTER TABLE regtest_table ALTER b SET STATISTICS -1; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" ALTER TABLE regtest_table ALTER b SET (n_distinct = 999); -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" ALTER TABLE regtest_table ALTER b SET STORAGE PLAIN; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.b" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" ALTER TABLE regtest_table ADD CONSTRAINT test_fk FOREIGN KEY (a) REFERENCES regtest_table_3(x); -- not supported -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema_2" LINE 1: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" f... ^ QUERY: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL) -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" LINE 1: ...schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_s... ^ QUERY: SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL) -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table column a" CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table regtest_table_3 column x" CONTEXT: SQL statement "SELECT fk."a" FROM ONLY "regtest_schema_2"."regtest_table" fk LEFT OUTER JOIN ONLY "regtest_schema"."regtest_table_3" pk ON ( pk."x" OPERATOR(pg_catalog.=) fk."a") WHERE pk."x" IS NULL AND (fk."a" IS NOT NULL)" ALTER TABLE regtest_table ADD CONSTRAINT test_ck CHECK (b like '%abc%') NOT VALID; -- not supported ALTER TABLE regtest_table VALIDATE CONSTRAINT test_ck; -- not supported @@ -186,23 +182,23 @@ CREATE RULE regtest_test_rule AS ON INSERT TO regtest_table_3 DO ALSO NOTHING; ALTER TABLE regtest_table_3 DISABLE RULE regtest_test_rule; -- not supported ALTER TABLE regtest_table_3 ENABLE RULE regtest_test_rule; -- not supported ALTER TABLE regtest_table SET WITH OIDS; -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" ALTER TABLE regtest_table SET WITHOUT OIDS; -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema_2.regtest_table.oid" ALTER TABLE regtest_table SET (fillfactor = 75); -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" ALTER TABLE regtest_table RESET (fillfactor); -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" ALTER TABLE regtest_table_2 NO INHERIT regtest_table; -- not supported ALTER TABLE regtest_table_2 INHERIT regtest_table; -- not supported ALTER TABLE regtest_table SET TABLESPACE pg_default; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema_2.regtest_table" ALTER VIEW regtest_view SET (security_barrier); -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_2.regtest_view" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema_2.regtest_view" ALTER SEQUENCE regtest_seq INCREMENT BY 10 START WITH 1000; -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_2.regtest_seq" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema_2.regtest_seq" -- -- clean-up objects -- diff --git a/contrib/sepgsql/expected/ddl.out b/contrib/sepgsql/expected/ddl.out index 08cd6d5e01dbb..deb26ee6bb401 100644 --- a/contrib/sepgsql/expected/ddl.out +++ b/contrib/sepgsql/expected/ddl.out @@ -8,9 +8,9 @@ DROP USER IF EXISTS regtest_sepgsql_test_user; RESET client_min_messages; -- confirm required permissions using audit messages SELECT sepgsql_getcon(); -- confirm client privilege - sepgsql_getcon -------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0 + sepgsql_getcon +---------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 (1 row) SET sepgsql.debug_audit = true; @@ -19,257 +19,247 @@ SET client_min_messages = LOG; -- CREATE Permission checks -- CREATE DATABASE regtest_sepgsql_test_database; -LOG: SELinux: allowed { getattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_db_t:s0 tclass=db_database name="template1" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database" +LOG: SELinux: allowed { getattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_db_t:s0 tclass=db_database name="template1" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database" CREATE USER regtest_sepgsql_test_user; CREATE SCHEMA regtest_schema; -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" GRANT ALL ON SCHEMA regtest_schema TO regtest_sepgsql_test_user; SET search_path = regtest_schema, public; CREATE TABLE regtest_table (x serial primary key, y text); -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.tableoid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.ctid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.x" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.y" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LINE 1: CREATE TABLE regtest_table (x serial primary key, y text); - ^ -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.tableoid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.ctid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.x" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.y" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq" ALTER TABLE regtest_table ADD COLUMN z int; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.z" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.z" CREATE TABLE regtest_table_2 (a int) WITH OIDS; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_2" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.tableoid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.ctid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.a" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_2" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.tableoid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.ctid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.a" -- corresponding toast table should not have label and permission checks ALTER TABLE regtest_table_2 ADD COLUMN b text; -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" -- VACUUM FULL internally create a new table and swap them later. VACUUM FULL regtest_table; CREATE VIEW regtest_view AS SELECT * FROM regtest_table WHERE x < 100; -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view" CREATE SEQUENCE regtest_seq; -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_seq" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_seq" CREATE TYPE regtest_comptype AS (a int, b text); -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" CREATE FUNCTION regtest_func(text,int[]) RETURNS bool LANGUAGE plpgsql AS 'BEGIN RAISE NOTICE ''regtest_func => %'', $1; RETURN true; END'; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func(pg_catalog.text,integer[])" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func(pg_catalog.text,integer[])" CREATE AGGREGATE regtest_agg ( sfunc1 = int4pl, basetype = int4, stype1 = int4, initcond1 = '0' ); -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_agg(integer)" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_agg(integer)" -- CREATE objects owned by others SET SESSION AUTHORIZATION regtest_sepgsql_test_user; SET search_path = regtest_schema, public; CREATE TABLE regtest_table_3 (x int, y serial); -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.tableoid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.ctid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.x" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.y" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.tableoid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.ctid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.x" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.y" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq" CREATE VIEW regtest_view_2 AS SELECT * FROM regtest_table_3 WHERE x < y; -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view_2" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view_2" CREATE FUNCTION regtest_func_2(int) RETURNS bool LANGUAGE plpgsql AS 'BEGIN RETURN $1 * $1 < 100; END'; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func_2(integer)" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func_2(integer)" RESET SESSION AUTHORIZATION; -- -- ALTER and CREATE/DROP extra attribute permissions -- CREATE TABLE regtest_table_4 (x int primary key, y int, z int); -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.tableoid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmax" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmin" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.ctid" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.x" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y" -LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.z" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LINE 1: CREATE TABLE regtest_table_4 (x int primary key, y int, z in... - ^ -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.tableoid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmax" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmin" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.ctid" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.x" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y" +LOG: SELinux: allowed { create } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.z" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" CREATE INDEX regtest_index_tbl4_y ON regtest_table_4(y); -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" CREATE INDEX regtest_index_tbl4_z ON regtest_table_4(z); -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" ALTER TABLE regtest_table_4 ALTER COLUMN y TYPE float; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y" DROP INDEX regtest_index_tbl4_y; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" ALTER TABLE regtest_table_4 ADD CONSTRAINT regtest_tbl4_con EXCLUDE USING btree (z WITH =); -LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { add_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" DROP TABLE regtest_table_4 CASCADE; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.tableoid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.ctid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.x" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.z" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_4" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.tableoid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.cmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.xmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.ctid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.x" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.y" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_4.z" -- -- DROP Permission checks (with clean-up) -- DROP FUNCTION regtest_func(text,int[]); -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func(pg_catalog.text,integer[])" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func(pg_catalog.text,integer[])" DROP AGGREGATE regtest_agg(int); -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_agg(integer)" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="pg_catalog" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_agg(integer)" DROP SEQUENCE regtest_seq; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_seq" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_seq" DROP VIEW regtest_view; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view" ALTER TABLE regtest_table DROP COLUMN y; -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.y" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.y" ALTER TABLE regtest_table_2 SET WITHOUT OIDS; -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.oid" DROP TABLE regtest_table; -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.tableoid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.ctid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.x" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.z" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_x_seq" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { setattr } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.tableoid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.cmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.xmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.ctid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.x" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table.z" DROP OWNED BY regtest_sepgsql_test_user; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func_2(integer)" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view_2" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq" -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.tableoid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.ctid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.x" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.y" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="regtest_schema.regtest_func_2(integer)" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_view_t:s0 tclass=db_view name="regtest_schema.regtest_view_2" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_seq_t:s0 tclass=db_sequence name="regtest_schema.regtest_table_3_y_seq" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_3" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.tableoid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.cmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.xmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.ctid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.x" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_3.y" DROP DATABASE regtest_sepgsql_test_database; -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_db_t:s0 tclass=db_database name="regtest_sepgsql_test_database" DROP USER regtest_sepgsql_test_user; DROP SCHEMA IF EXISTS regtest_schema CASCADE; -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { search } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=system_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="public" NOTICE: drop cascades to 2 other objects DETAIL: drop cascades to table regtest_table_2 drop cascades to type regtest_comptype -LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_2" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.tableoid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmax" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmin" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.ctid" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.a" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" -LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:unconfined_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { remove_name } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="regtest_schema.regtest_table_2" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.tableoid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmax" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.cmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.xmin" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.ctid" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.a" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="regtest_schema.regtest_table_2.b" +LOG: SELinux: allowed { drop } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 tcontext=unconfined_u:object_r:sepgsql_schema_t:s0 tclass=db_schema name="regtest_schema" diff --git a/contrib/sepgsql/expected/dml.out b/contrib/sepgsql/expected/dml.out index 3b90f8934714b..8716ac735d5a2 100644 --- a/contrib/sepgsql/expected/dml.out +++ b/contrib/sepgsql/expected/dml.out @@ -192,9 +192,9 @@ LINE 1: SELECT * FROM my_schema_2.ts2; -- Clean up -- SELECT sepgsql_getcon(); -- confirm client privilege - sepgsql_getcon ------------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c255 + sepgsql_getcon +--------------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 (1 row) DROP TABLE IF EXISTS t1 CASCADE; diff --git a/contrib/sepgsql/expected/label.out b/contrib/sepgsql/expected/label.out index 9d1f90437a222..fad1954b41336 100644 --- a/contrib/sepgsql/expected/label.out +++ b/contrib/sepgsql/expected/label.out @@ -175,138 +175,138 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_re -- -- validation of transaction aware dynamic-transition SELECT sepgsql_getcon(); -- confirm client privilege - sepgsql_getcon --------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c25 + sepgsql_getcon +----------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c25 (1 row) -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c15'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c15'); sepgsql_setcon ---------------- t (1 row) SELECT sepgsql_getcon(); - sepgsql_getcon --------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c15 + sepgsql_getcon +----------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c15 (1 row) SELECT sepgsql_setcon(NULL); -- failed to reset ERROR: SELinux: security policy violation SELECT sepgsql_getcon(); - sepgsql_getcon --------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c15 + sepgsql_getcon +----------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c15 (1 row) BEGIN; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c12'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c12'); sepgsql_setcon ---------------- t (1 row) SELECT sepgsql_getcon(); - sepgsql_getcon --------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c12 + sepgsql_getcon +----------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c12 (1 row) SAVEPOINT svpt_1; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c9'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c9'); sepgsql_setcon ---------------- t (1 row) SELECT sepgsql_getcon(); - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c9 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c9 (1 row) SAVEPOINT svpt_2; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c6'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c6'); sepgsql_setcon ---------------- t (1 row) SELECT sepgsql_getcon(); - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c6 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c6 (1 row) SAVEPOINT svpt_3; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c3'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c3'); sepgsql_setcon ---------------- t (1 row) SELECT sepgsql_getcon(); - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c3 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c3 (1 row) ROLLBACK TO SAVEPOINT svpt_2; SELECT sepgsql_getcon(); -- should be 's0:c0.c9' - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c9 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c9 (1 row) ROLLBACK TO SAVEPOINT svpt_1; SELECT sepgsql_getcon(); -- should be 's0:c0.c12' - sepgsql_getcon --------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c12 + sepgsql_getcon +----------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c12 (1 row) ABORT; SELECT sepgsql_getcon(); -- should be 's0:c0.c15' - sepgsql_getcon --------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c15 + sepgsql_getcon +----------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c15 (1 row) BEGIN; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c8'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c8'); sepgsql_setcon ---------------- t (1 row) SELECT sepgsql_getcon(); - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c8 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c8 (1 row) SAVEPOINT svpt_1; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c4'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c4'); sepgsql_setcon ---------------- t (1 row) SELECT sepgsql_getcon(); - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c4 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c4 (1 row) ROLLBACK TO SAVEPOINT svpt_1; SELECT sepgsql_getcon(); -- should be 's0:c0.c8' - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c8 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c8 (1 row) -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c6'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c6'); sepgsql_setcon ---------------- t @@ -314,9 +314,9 @@ SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c6'); COMMIT; SELECT sepgsql_getcon(); -- should be 's0:c0.c6' - sepgsql_getcon -------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0:c0.c6 + sepgsql_getcon +---------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c6 (1 row) -- sepgsql_regtest_user_t is not available dynamic-transition, @@ -493,9 +493,9 @@ SELECT sepgsql_getcon(); -- Clean up -- SELECT sepgsql_getcon(); -- confirm client privilege - sepgsql_getcon ------------------------------------------------------- - unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c255 + sepgsql_getcon +--------------------------------------------------------------------- + unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 (1 row) DROP TABLE IF EXISTS t1 CASCADE; diff --git a/contrib/sepgsql/expected/misc.out b/contrib/sepgsql/expected/misc.out index 5904840163359..1ce47c48b01fb 100644 --- a/contrib/sepgsql/expected/misc.out +++ b/contrib/sepgsql/expected/misc.out @@ -12,11 +12,11 @@ SET sepgsql.debug_audit = on; SET client_min_messages = log; -- regular function and operators SELECT * FROM t1 WHERE x > 50 AND y like '%64%'; -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4gt(integer,integer)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" x | y -----+---------------------------------- 77 | 28dd2c7955ce926456240b2ff0100bde @@ -29,13 +29,13 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined -- aggregate function SELECT MIN(x), AVG(x) FROM t1; -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.avg(integer)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4_avg_accum(bigint[],integer)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int8_avg(bigint[])" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.min(integer)" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.int4smaller(integer,integer)" min | avg -----+--------------------- 1 | 50.5000000000000000 @@ -43,11 +43,11 @@ LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined -- window function SELECT row_number() OVER (order by x), * FROM t1 WHERE y like '%86%'; -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" -LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()" -LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c1023 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_table name="public.t1" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column x" +LOG: SELinux: allowed { select } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=unconfined_u:object_r:sepgsql_table_t:s0 tclass=db_column name="table t1 column y" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.row_number()" +LOG: SELinux: allowed { execute } scontext=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 tcontext=system_u:object_r:sepgsql_proc_exec_t:s0 tclass=db_procedure name="pg_catalog.textlike(pg_catalog.text,pg_catalog.text)" row_number | x | y ------------+----+---------------------------------- 1 | 2 | c81e728d9d4c2f636f067f89cc14862c diff --git a/contrib/sepgsql/launcher b/contrib/sepgsql/launcher index 62a6c2737d204..2a377e10dc077 100755 --- a/contrib/sepgsql/launcher +++ b/contrib/sepgsql/launcher @@ -21,7 +21,7 @@ fi # Read SQL from stdin # TEMP=`mktemp` -CONTEXT="" +CONTEXT="unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255" while IFS='\\n' read LINE do diff --git a/contrib/sepgsql/sepgsql-regtest.te b/contrib/sepgsql/sepgsql-regtest.te index 8727523ca5554..e5d65243e6be3 100644 --- a/contrib/sepgsql/sepgsql-regtest.te +++ b/contrib/sepgsql/sepgsql-regtest.te @@ -1,4 +1,4 @@ -policy_module(sepgsql-regtest, 1.07) +policy_module(sepgsql-regtest, 1.08) gen_require(` all_userspace_class_perms @@ -23,6 +23,35 @@ postgresql_procedure_object(sepgsql_nosuch_trusted_proc_exec_t) type sepgsql_regtest_invisible_schema_t; postgresql_schema_object(sepgsql_regtest_invisible_schema_t); +# +# Test domains for self defined unconfined / superuser +# +role sepgsql_regtest_superuser_r; +userdom_base_user_template(sepgsql_regtest_superuser) +userdom_manage_home_role(sepgsql_regtest_superuser_r, sepgsql_regtest_superuser_t) +userdom_exec_user_home_content_files(sepgsql_regtest_superuser_t) +userdom_write_user_tmp_sockets(sepgsql_regtest_superuser_t) +optional_policy(` + postgresql_stream_connect(sepgsql_regtest_superuser_t) + postgresql_unconfined(sepgsql_regtest_superuser_t) +') +optional_policy(` + unconfined_stream_connect(sepgsql_regtest_superuser_t) + unconfined_rw_pipes(sepgsql_regtest_superuser_t) +') +optional_policy(` + gen_require(` + attribute sepgsql_client_type; + ') + allow sepgsql_regtest_superuser_t self : process { setcurrent }; + allow sepgsql_regtest_superuser_t { self sepgsql_client_type } : process { dyntransition }; +') + +# Type transition rules +allow sepgsql_regtest_user_t sepgsql_regtest_dba_t : process { transition }; +type_transition sepgsql_regtest_user_t sepgsql_regtest_trusted_proc_exec_t:process sepgsql_regtest_dba_t; +type_transition sepgsql_regtest_user_t sepgsql_nosuch_trusted_proc_exec_t:process sepgsql_regtest_nosuch_t; + # # Test domains for database administrators # @@ -156,10 +185,12 @@ optional_policy(` tunable_policy(`sepgsql_regression_test_mode',` allow unconfined_t self : process { setcurrent dyntransition }; allow unconfined_t sepgsql_regtest_dba_t : process { transition dyntransition }; + allow unconfined_t sepgsql_regtest_superuser_t : process { transition dyntransition }; allow unconfined_t sepgsql_regtest_user_t : process { transition dyntransition }; allow unconfined_t sepgsql_regtest_pool_t : process { transition dyntransition }; ') role unconfined_r types sepgsql_regtest_dba_t; + role unconfined_r types sepgsql_regtest_superuser_t; role unconfined_r types sepgsql_regtest_user_t; role unconfined_r types sepgsql_regtest_nosuch_t; role unconfined_r types sepgsql_trusted_proc_t; @@ -169,6 +200,32 @@ optional_policy(` role unconfined_r types sepgsql_regtest_var_t; ') +# +# Rule to make MCS policy work on regression test +# +# NOTE: MCS (multi category security) policy was enabled by default, to +# allow DAC style access control, in the previous selinux policy. +# However, its definition was changed later, then a limited number of +# applications are restricted by MCS policy, for container features +# mainly. The rules below enables MCS policy for domains of regression +# test also, even if base security policy does not apply. If base policy +# is old and MCS is enabled in default, rules below does nothing. +# +optional_policy(` + gen_require(` + type sepgsql_trusted_proc_t; + ') + mcs_constrained(sepgsql_regtest_dba_t) + mcs_constrained(sepgsql_regtest_superuser_t) + mcs_constrained(sepgsql_regtest_user_t) + mcs_constrained(sepgsql_regtest_nosuch_t) + mcs_constrained(sepgsql_trusted_proc_t) + + mcs_constrained(sepgsql_regtest_pool_t) + mcs_constrained(sepgsql_regtest_foo_t) + mcs_constrained(sepgsql_regtest_var_t) +') + # # Rule to execute original trusted procedures # diff --git a/contrib/sepgsql/sql/alter.sql b/contrib/sepgsql/sql/alter.sql index 4bded7ead5c65..3682b3e92ad6a 100644 --- a/contrib/sepgsql/sql/alter.sql +++ b/contrib/sepgsql/sql/alter.sql @@ -9,7 +9,7 @@ DROP DATABASE IF EXISTS regtest_sepgsql_test_database; DROP USER IF EXISTS regtest_sepgsql_test_user; RESET client_min_messages; --- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0 +-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 -- -- CREATE Objects to be altered (with debug_audit being silent) diff --git a/contrib/sepgsql/sql/ddl.sql b/contrib/sepgsql/sql/ddl.sql index c91c4cf572f15..c0de3f6b8c2c3 100644 --- a/contrib/sepgsql/sql/ddl.sql +++ b/contrib/sepgsql/sql/ddl.sql @@ -9,7 +9,7 @@ DROP USER IF EXISTS regtest_sepgsql_test_user; RESET client_min_messages; -- confirm required permissions using audit messages --- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0 +-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0 SET sepgsql.debug_audit = true; SET client_min_messages = LOG; diff --git a/contrib/sepgsql/sql/dml.sql b/contrib/sepgsql/sql/dml.sql index 97e01c3e3c410..7a64b9e21327c 100644 --- a/contrib/sepgsql/sql/dml.sql +++ b/contrib/sepgsql/sql/dml.sql @@ -126,7 +126,7 @@ SELECT * FROM my_schema_2.ts2; -- failed (policy violation) -- -- Clean up -- --- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c255 +-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 DROP TABLE IF EXISTS t1 CASCADE; DROP TABLE IF EXISTS t2 CASCADE; DROP TABLE IF EXISTS t3 CASCADE; diff --git a/contrib/sepgsql/sql/label.sql b/contrib/sepgsql/sql/label.sql index 7a05c248ebb65..04085e57a4dcf 100644 --- a/contrib/sepgsql/sql/label.sql +++ b/contrib/sepgsql/sql/label.sql @@ -110,27 +110,27 @@ SELECT sepgsql_getcon(); -- client's label must be restored -- -- validation of transaction aware dynamic-transition --- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0:c0.c25 -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c15'); +-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c25 +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c15'); SELECT sepgsql_getcon(); SELECT sepgsql_setcon(NULL); -- failed to reset SELECT sepgsql_getcon(); BEGIN; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c12'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c12'); SELECT sepgsql_getcon(); SAVEPOINT svpt_1; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c9'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c9'); SELECT sepgsql_getcon(); SAVEPOINT svpt_2; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c6'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c6'); SELECT sepgsql_getcon(); SAVEPOINT svpt_3; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c3'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c3'); SELECT sepgsql_getcon(); ROLLBACK TO SAVEPOINT svpt_2; @@ -143,16 +143,16 @@ ABORT; SELECT sepgsql_getcon(); -- should be 's0:c0.c15' BEGIN; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c8'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c8'); SELECT sepgsql_getcon(); SAVEPOINT svpt_1; -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c4'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c4'); SELECT sepgsql_getcon(); ROLLBACK TO SAVEPOINT svpt_1; SELECT sepgsql_getcon(); -- should be 's0:c0.c8' -SELECT sepgsql_setcon('unconfined_u:unconfined_r:unconfined_t:s0:c0.c6'); +SELECT sepgsql_setcon('unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0:c0.c6'); COMMIT; SELECT sepgsql_getcon(); -- should be 's0:c0.c6' @@ -231,7 +231,7 @@ SELECT sepgsql_getcon(); -- -- Clean up -- --- @SECURITY-CONTEXT=unconfined_u:unconfined_r:unconfined_t:s0-s0:c0.c255 +-- @SECURITY-CONTEXT=unconfined_u:unconfined_r:sepgsql_regtest_superuser_t:s0-s0:c0.c255 DROP TABLE IF EXISTS t1 CASCADE; DROP TABLE IF EXISTS t2 CASCADE; DROP TABLE IF EXISTS t3 CASCADE; diff --git a/contrib/spi/autoinc--unpackaged--1.0.sql b/contrib/spi/autoinc--unpackaged--1.0.sql index cfe2065d3d860..e5289e834fcc7 100644 --- a/contrib/spi/autoinc--unpackaged--1.0.sql +++ b/contrib/spi/autoinc--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/spi/autoinc--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION autoinc" to load this file. \quit +\echo Use "CREATE EXTENSION autoinc FROM unpackaged" to load this file. \quit ALTER EXTENSION autoinc ADD function autoinc(); diff --git a/contrib/spi/insert_username--unpackaged--1.0.sql b/contrib/spi/insert_username--unpackaged--1.0.sql index 91a5d1f30958c..eb26ba0bd1656 100644 --- a/contrib/spi/insert_username--unpackaged--1.0.sql +++ b/contrib/spi/insert_username--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/spi/insert_username--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION insert_username" to load this file. \quit +\echo Use "CREATE EXTENSION insert_username FROM unpackaged" to load this file. \quit ALTER EXTENSION insert_username ADD function insert_username(); diff --git a/contrib/spi/moddatetime--unpackaged--1.0.sql b/contrib/spi/moddatetime--unpackaged--1.0.sql index caa49ce0dc90a..c681fa7ed9508 100644 --- a/contrib/spi/moddatetime--unpackaged--1.0.sql +++ b/contrib/spi/moddatetime--unpackaged--1.0.sql @@ -1,6 +1,6 @@ /* contrib/spi/moddatetime--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION moddatetime" to load this file. \quit +\echo Use "CREATE EXTENSION moddatetime FROM unpackaged" to load this file. \quit ALTER EXTENSION moddatetime ADD function moddatetime(); diff --git a/contrib/spi/refint--unpackaged--1.0.sql b/contrib/spi/refint--unpackaged--1.0.sql index cd9c9b0c3656e..461ed157c30f7 100644 --- a/contrib/spi/refint--unpackaged--1.0.sql +++ b/contrib/spi/refint--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/spi/refint--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION refint" to load this file. \quit +\echo Use "CREATE EXTENSION refint FROM unpackaged" to load this file. \quit ALTER EXTENSION refint ADD function check_primary_key(); ALTER EXTENSION refint ADD function check_foreign_key(); diff --git a/contrib/spi/timetravel--unpackaged--1.0.sql b/contrib/spi/timetravel--unpackaged--1.0.sql index dd07a133a5004..121bceba9b2ba 100644 --- a/contrib/spi/timetravel--unpackaged--1.0.sql +++ b/contrib/spi/timetravel--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/spi/timetravel--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION timetravel" to load this file. \quit +\echo Use "CREATE EXTENSION timetravel FROM unpackaged" to load this file. \quit ALTER EXTENSION timetravel ADD function timetravel(); ALTER EXTENSION timetravel ADD function set_timetravel(name,integer); diff --git a/contrib/sslinfo/sslinfo--unpackaged--1.0.sql b/contrib/sslinfo/sslinfo--unpackaged--1.0.sql index e4b868423b305..07407acb54306 100644 --- a/contrib/sslinfo/sslinfo--unpackaged--1.0.sql +++ b/contrib/sslinfo/sslinfo--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/sslinfo/sslinfo--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION sslinfo" to load this file. \quit +\echo Use "CREATE EXTENSION sslinfo FROM unpackaged" to load this file. \quit ALTER EXTENSION sslinfo ADD function ssl_client_serial(); ALTER EXTENSION sslinfo ADD function ssl_is_used(); diff --git a/contrib/sslinfo/sslinfo.c b/contrib/sslinfo/sslinfo.c index db491a4bc806b..0cf7baefd60df 100644 --- a/contrib/sslinfo/sslinfo.c +++ b/contrib/sslinfo/sslinfo.c @@ -140,6 +140,10 @@ ASN1_STRING_to_text(ASN1_STRING *str) text *result; membuf = BIO_new(BIO_s_mem()); + if (membuf == NULL) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("could not create OpenSSL BIO structure"))); (void) BIO_set_close(membuf, BIO_CLOSE); ASN1_STRING_print_ex(membuf, str, ((ASN1_STRFLGS_RFC2253 & ~ASN1_STRFLGS_ESC_MSB) @@ -152,7 +156,8 @@ ASN1_STRING_to_text(ASN1_STRING *str) result = cstring_to_text(dp); if (dp != sp) pfree(dp); - BIO_free(membuf); + if (BIO_free(membuf) != 1) + elog(ERROR, "could not free OpenSSL BIO structure"); PG_RETURN_TEXT_P(result); } @@ -291,15 +296,28 @@ X509_NAME_to_text(X509_NAME *name) char *dp; text *result; + if (membuf == NULL) + ereport(ERROR, + (errcode(ERRCODE_OUT_OF_MEMORY), + errmsg("could not create OpenSSL BIO structure"))); + (void) BIO_set_close(membuf, BIO_CLOSE); for (i = 0; i < count; i++) { e = X509_NAME_get_entry(name, i); nid = OBJ_obj2nid(X509_NAME_ENTRY_get_object(e)); + if (nid == NID_undef) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not get NID for ASN1_OBJECT object"))); v = X509_NAME_ENTRY_get_data(e); field_name = OBJ_nid2sn(nid); - if (!field_name) + if (field_name == NULL) field_name = OBJ_nid2ln(nid); + if (field_name == NULL) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not convert NID %d to an ASN1_OBJECT structure", nid))); BIO_printf(membuf, "/%s=", field_name); ASN1_STRING_print_ex(membuf, v, ((ASN1_STRFLGS_RFC2253 & ~ASN1_STRFLGS_ESC_MSB) @@ -314,7 +332,8 @@ X509_NAME_to_text(X509_NAME *name) result = cstring_to_text(dp); if (dp != sp) pfree(dp); - BIO_free(membuf); + if (BIO_free(membuf) != 1) + elog(ERROR, "could not free OpenSSL BIO structure"); PG_RETURN_TEXT_P(result); } diff --git a/contrib/start-scripts/osx/PostgreSQL b/contrib/start-scripts/osx/PostgreSQL index 22ed9ff45e84c..24872b0944d99 100755 --- a/contrib/start-scripts/osx/PostgreSQL +++ b/contrib/start-scripts/osx/PostgreSQL @@ -4,7 +4,7 @@ # PostgreSQL RDBMS Server ## -# PostgreSQL boot time startup script for Darwin/Mac OS X. To install, change +# PostgreSQL boot time startup script for OS X. To install, change # the "prefix", "PGDATA", "PGUSER", and "PGLOG" variables below as # necessary. Next, create a new directory, "/Library/StartupItems/PostgreSQL". # Then copy this script and the accompanying "StartupParameters.plist" file diff --git a/contrib/tablefunc/expected/tablefunc.out b/contrib/tablefunc/expected/tablefunc.out index 0437ecf90a977..a979e17c0367c 100644 --- a/contrib/tablefunc/expected/tablefunc.out +++ b/contrib/tablefunc/expected/tablefunc.out @@ -376,6 +376,37 @@ SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 4, '~') A 11 | 10 | 4 | 2~5~9~10~11 (8 rows) +-- should fail as first two columns must have the same type +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid text, parent_keyid int, level int, branch text); +ERROR: invalid return type +DETAIL: First two columns must be the same type. +-- should fail as key field datatype should match return datatype +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid float8, parent_keyid float8, level int, branch text); +ERROR: infinite recursion detected +-- tests for values using custom queries +-- query with one column - failed +SELECT * FROM connectby('connectby_int', '1; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +ERROR: invalid return type +DETAIL: Query must return at least two columns. +-- query with two columns first value as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, 1::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); + keyid | parent_keyid | level +-------+--------------+------- + 2 | | 0 + | 1 | 1 +(2 rows) + +-- query with two columns second value as NULL +SELECT * FROM connectby('connectby_int', '1::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +ERROR: infinite recursion detected +-- query with two columns, both values as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); + keyid | parent_keyid | level +-------+--------------+------- + 2 | | 0 + | | 1 +(2 rows) + -- test for falsely detected recursion DROP TABLE connectby_int; CREATE TABLE connectby_int(keyid int, parent_keyid int); diff --git a/contrib/tablefunc/sql/tablefunc.sql b/contrib/tablefunc/sql/tablefunc.sql index bf874f26ad88d..ec375b05c63c9 100644 --- a/contrib/tablefunc/sql/tablefunc.sql +++ b/contrib/tablefunc/sql/tablefunc.sql @@ -179,6 +179,22 @@ SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') A -- infinite recursion failure avoided by depth limit SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 4, '~') AS t(keyid int, parent_keyid int, level int, branch text); +-- should fail as first two columns must have the same type +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid text, parent_keyid int, level int, branch text); + +-- should fail as key field datatype should match return datatype +SELECT * FROM connectby('connectby_int', 'keyid', 'parent_keyid', '2', 0, '~') AS t(keyid float8, parent_keyid float8, level int, branch text); + +-- tests for values using custom queries +-- query with one column - failed +SELECT * FROM connectby('connectby_int', '1; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +-- query with two columns first value as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, 1::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +-- query with two columns second value as NULL +SELECT * FROM connectby('connectby_int', '1::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); +-- query with two columns, both values as NULL +SELECT * FROM connectby('connectby_int', 'NULL::int, NULL::int; --', 'parent_keyid', '2', 0) AS t(keyid int, parent_keyid int, level int); + -- test for falsely detected recursion DROP TABLE connectby_int; CREATE TABLE connectby_int(keyid int, parent_keyid int); diff --git a/contrib/tablefunc/tablefunc--unpackaged--1.0.sql b/contrib/tablefunc/tablefunc--unpackaged--1.0.sql index e5e9619c52f93..f0a276a9c4bfb 100644 --- a/contrib/tablefunc/tablefunc--unpackaged--1.0.sql +++ b/contrib/tablefunc/tablefunc--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/tablefunc/tablefunc--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION tablefunc" to load this file. \quit +\echo Use "CREATE EXTENSION tablefunc FROM unpackaged" to load this file. \quit ALTER EXTENSION tablefunc ADD function normal_rand(integer,double precision,double precision); ALTER EXTENSION tablefunc ADD function crosstab(text); diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index 10ee8c76dbe4c..c312fca83d39e 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -54,7 +54,7 @@ static Tuplestorestate *get_crosstab_tuplestore(char *sql, bool randomAccess); static void validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial); static bool compatCrosstabTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); -static bool compatConnectbyTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); +static void compatConnectbyTupleDescs(TupleDesc tupdesc1, TupleDesc tupdesc2); static void get_normal_pair(float8 *x1, float8 *x2); static Tuplestorestate *connectby(char *relname, char *key_fld, @@ -68,7 +68,7 @@ static Tuplestorestate *connectby(char *relname, MemoryContext per_query_ctx, bool randomAccess, AttInMetadata *attinmeta); -static Tuplestorestate *build_tuplestore_recursively(char *key_fld, +static void build_tuplestore_recursively(char *key_fld, char *parent_key_fld, char *relname, char *orderby_fld, @@ -1178,28 +1178,28 @@ connectby(char *relname, MemoryContextSwitchTo(oldcontext); /* now go get the whole tree */ - tupstore = build_tuplestore_recursively(key_fld, - parent_key_fld, - relname, - orderby_fld, - branch_delim, - start_with, - start_with, /* current_branch */ - 0, /* initial level is 0 */ - &serial, /* initial serial is 1 */ - max_depth, - show_branch, - show_serial, - per_query_ctx, - attinmeta, - tupstore); + build_tuplestore_recursively(key_fld, + parent_key_fld, + relname, + orderby_fld, + branch_delim, + start_with, + start_with, /* current_branch */ + 0, /* initial level is 0 */ + &serial, /* initial serial is 1 */ + max_depth, + show_branch, + show_serial, + per_query_ctx, + attinmeta, + tupstore); SPI_finish(); return tupstore; } -static Tuplestorestate * +static void build_tuplestore_recursively(char *key_fld, char *parent_key_fld, char *relname, @@ -1230,7 +1230,7 @@ build_tuplestore_recursively(char *key_fld, HeapTuple tuple; if (max_depth > 0 && level > max_depth) - return tupstore; + return; initStringInfo(&sql); @@ -1316,22 +1316,11 @@ build_tuplestore_recursively(char *key_fld, StringInfoData chk_branchstr; StringInfoData chk_current_key; - /* First time through, do a little more setup */ - if (level == 0) - { - /* - * Check that return tupdesc is compatible with the one we got - * from the query, but only at level 0 -- no need to check more - * than once - */ - - if (!compatConnectbyTupleDescs(tupdesc, spi_tupdesc)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("invalid return type"), - errdetail("Return and SQL tuple descriptions are " \ - "incompatible."))); - } + /* + * Check that return tupdesc is compatible with the one we got from + * the query. + */ + compatConnectbyTupleDescs(tupdesc, spi_tupdesc); initStringInfo(&branchstr); initStringInfo(&chk_branchstr); @@ -1346,24 +1335,31 @@ build_tuplestore_recursively(char *key_fld, /* get the next sql result tuple */ spi_tuple = tuptable->vals[i]; - /* get the current key and parent */ + /* get the current key (might be NULL) */ current_key = SPI_getvalue(spi_tuple, spi_tupdesc, 1); - appendStringInfo(&chk_current_key, "%s%s%s", branch_delim, current_key, branch_delim); - current_key_parent = pstrdup(SPI_getvalue(spi_tuple, spi_tupdesc, 2)); + + /* get the parent key (might be NULL) */ + current_key_parent = SPI_getvalue(spi_tuple, spi_tupdesc, 2); /* get the current level */ sprintf(current_level, "%d", level); /* check to see if this key is also an ancestor */ - if (strstr(chk_branchstr.data, chk_current_key.data)) - elog(ERROR, "infinite recursion detected"); + if (current_key) + { + appendStringInfo(&chk_current_key, "%s%s%s", + branch_delim, current_key, branch_delim); + if (strstr(chk_branchstr.data, chk_current_key.data)) + elog(ERROR, "infinite recursion detected"); + } /* OK, extend the branch */ - appendStringInfo(&branchstr, "%s%s", branch_delim, current_key); + if (current_key) + appendStringInfo(&branchstr, "%s%s", branch_delim, current_key); current_branch = branchstr.data; /* build a tuple */ - values[0] = pstrdup(current_key); + values[0] = current_key; values[1] = current_key_parent; values[2] = current_level; if (show_branch) @@ -1379,30 +1375,31 @@ build_tuplestore_recursively(char *key_fld, tuple = BuildTupleFromCStrings(attinmeta, values); - xpfree(current_key); - xpfree(current_key_parent); - /* store the tuple for later use */ tuplestore_puttuple(tupstore, tuple); heap_freetuple(tuple); - /* recurse using current_key_parent as the new start_with */ - tupstore = build_tuplestore_recursively(key_fld, - parent_key_fld, - relname, - orderby_fld, - branch_delim, - values[0], - current_branch, - level + 1, - serial, - max_depth, - show_branch, - show_serial, - per_query_ctx, - attinmeta, - tupstore); + /* recurse using current_key as the new start_with */ + if (current_key) + build_tuplestore_recursively(key_fld, + parent_key_fld, + relname, + orderby_fld, + branch_delim, + current_key, + current_branch, + level + 1, + serial, + max_depth, + show_branch, + show_serial, + per_query_ctx, + attinmeta, + tupstore); + + xpfree(current_key); + xpfree(current_key_parent); /* reset branch for next pass */ resetStringInfo(&branchstr); @@ -1414,8 +1411,6 @@ build_tuplestore_recursively(char *key_fld, xpfree(chk_branchstr.data); xpfree(chk_current_key.data); } - - return tupstore; } /* @@ -1488,34 +1483,25 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial /* * Check if spi sql tupdesc and return tupdesc are compatible */ -static bool +static void compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) { - Oid ret_atttypid; - Oid sql_atttypid; - - /* check the key_fld types match */ - ret_atttypid = ret_tupdesc->attrs[0]->atttypid; - sql_atttypid = sql_tupdesc->attrs[0]->atttypid; - if (ret_atttypid != sql_atttypid) + /* + * Result must have at least 2 columns. + */ + if (sql_tupdesc->natts < 2) ereport(ERROR, (errcode(ERRCODE_SYNTAX_ERROR), errmsg("invalid return type"), - errdetail("SQL key field datatype does " \ - "not match return key field datatype."))); + errdetail("Query must return at least two columns."))); - /* check the parent_key_fld types match */ - ret_atttypid = ret_tupdesc->attrs[1]->atttypid; - sql_atttypid = sql_tupdesc->attrs[1]->atttypid; - if (ret_atttypid != sql_atttypid) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("invalid return type"), - errdetail("SQL parent key field datatype does " \ - "not match return parent key field datatype."))); + /* + * We have failed to check datatype match since 2003, so we don't do that + * here. The call will work as long as the datatypes are I/O + * representation compatible. + */ /* OK, the two tupdescs are compatible for our purposes */ - return true; } /* diff --git a/contrib/test_decoding/Makefile b/contrib/test_decoding/Makefile index 58e0f384cbb22..4f022b5a355ea 100644 --- a/contrib/test_decoding/Makefile +++ b/contrib/test_decoding/Makefile @@ -37,7 +37,8 @@ submake-isolation: submake-test_decoding: $(MAKE) -C $(top_builddir)/contrib/test_decoding -REGRESSCHECKS=ddl rewrite toast permissions decoding_in_xact binary prepared +REGRESSCHECKS=ddl xact rewrite toast permissions decoding_in_xact \ + decoding_into_rel binary prepared regresscheck: all | submake-regress submake-test_decoding $(MKDIR_P) regression_output @@ -53,7 +54,7 @@ regresscheck-install-force: | submake-regress submake-test_decoding --extra-install=contrib/test_decoding \ $(REGRESSCHECKS) -ISOLATIONCHECKS=mxact delayed_startup concurrent_ddl_dml +ISOLATIONCHECKS=mxact delayed_startup ondisk_startup concurrent_ddl_dml isolationcheck: all | submake-isolation submake-test_decoding $(MKDIR_P) isolation_output diff --git a/contrib/test_decoding/expected/binary.out b/contrib/test_decoding/expected/binary.out index 4164784ab34e6..d0949201003d6 100644 --- a/contrib/test_decoding/expected/binary.out +++ b/contrib/test_decoding/expected/binary.out @@ -7,22 +7,22 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d (1 row) -- succeeds, textual plugin, textual consumer -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '0', 'skip-empty-xacts', '1'); data ------ (0 rows) -- fails, binary plugin, textual consumer -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '1'); -ERROR: output plugin cannot produce binary output +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '1', 'skip-empty-xacts', '1'); +ERROR: logical decoding output plugin "test_decoding" produces binary output, but "pg_logical_slot_get_changes(name,pg_lsn,integer,text[])" expects textual data -- succeeds, textual plugin, binary consumer -SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '0'); +SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '0', 'skip-empty-xacts', '1'); data ------ (0 rows) -- succeeds, binary plugin, binary consumer -SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '1'); +SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '1', 'skip-empty-xacts', '1'); data ------ (0 rows) diff --git a/contrib/test_decoding/expected/concurrent_ddl_dml.out b/contrib/test_decoding/expected/concurrent_ddl_dml.out index cc9165655fb8e..a15bfa292ef76 100644 --- a/contrib/test_decoding/expected/concurrent_ddl_dml.out +++ b/contrib/test_decoding/expected/concurrent_ddl_dml.out @@ -10,11 +10,9 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_float: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE float; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[double precision]:1 @@ -34,7 +32,7 @@ step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -58,11 +56,9 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_char: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE character varying; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[character varying]:'1' @@ -82,7 +78,7 @@ step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varyi step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -107,7 +103,7 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; step s1_commit: COMMIT; step s2_alter_tbl1_float: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -132,7 +128,7 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -158,11 +154,9 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_float: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; step s1_commit: COMMIT; step s2_alter_tbl1_float: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[double precision]:1 @@ -186,11 +180,9 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[character varying]:'1' @@ -213,13 +205,9 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_text: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE text; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[text]:'1' @@ -241,13 +229,9 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s2_alter_tbl1_char: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; step s1_commit: COMMIT; step s2_alter_tbl1_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[text]:'1' @@ -270,7 +254,7 @@ step s2_alter_tbl2_boolean: ALTER TABLE tbl2 ALTER COLUMN val2 TYPE boolean; ERROR: column "val2" cannot be cast automatically to type boolean step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -295,7 +279,7 @@ step s2_alter_tbl1_boolean: ALTER TABLE tbl1 ALTER COLUMN val2 TYPE boolean; error in steps s1_commit s2_alter_tbl1_boolean: ERROR: column "val2" cannot be cast automatically to type boolean -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -316,11 +300,9 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 @@ -342,7 +324,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_int: ALTER TABLE tbl2 ADD COLUMN val3 INTEGER; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -350,8 +332,6 @@ table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT ?column? @@ -368,11 +348,9 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[double precision]:1 @@ -394,7 +372,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_float: ALTER TABLE tbl2 ADD COLUMN val3 FLOAT; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -402,8 +380,6 @@ table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[double precision]:1 COMMIT ?column? @@ -420,11 +396,9 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' @@ -446,7 +420,7 @@ step s1_begin: BEGIN; step s2_alter_tbl2_add_char: ALTER TABLE tbl2 ADD COLUMN val3 character varying; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -454,8 +428,6 @@ table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT ?column? @@ -473,16 +445,12 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT -BEGIN -COMMIT ?column? stop @@ -500,18 +468,14 @@ step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:null COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT ?column? @@ -529,16 +493,12 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[integer]:1 COMMIT -BEGIN -COMMIT step s2_alter_tbl2_add_text: ALTER TABLE tbl2 ADD COLUMN val3 TEXT; step s1_begin: BEGIN; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); @@ -546,20 +506,16 @@ step s2_alter_tbl2_3rd_char: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE character v step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_char: <... completed> -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT -BEGIN -COMMIT step s2_alter_tbl2_3rd_int: ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer; step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data BEGIN @@ -588,19 +544,15 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_text: <... completed> step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT ?column? @@ -621,19 +573,15 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_3rd_char: <... completed> step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT ?column? @@ -653,20 +601,14 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[text]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT ?column? @@ -686,20 +628,14 @@ step s1_insert_tbl2_3col: INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1); step s1_commit: COMMIT; step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl2: INSERT INTO tbl2 (val1, val2) VALUES (1, 1); -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 val3[character varying]:'1' COMMIT BEGIN -COMMIT -BEGIN table public.tbl2: INSERT: val1[integer]:1 val2[integer]:1 COMMIT ?column? @@ -717,13 +653,9 @@ step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s2_alter_tbl2_drop_3rd_col: ALTER TABLE tbl2 DROP COLUMN val3; step s1_insert_tbl1: INSERT INTO tbl1 (val1, val2) VALUES (1, 1); step s1_commit: COMMIT; -step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); +step s2_get_changes: SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -BEGIN -COMMIT -BEGIN -COMMIT BEGIN table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 table public.tbl1: INSERT: val1[integer]:1 val2[integer]:1 diff --git a/contrib/test_decoding/expected/ddl.out b/contrib/test_decoding/expected/ddl.out index e13a6c737059e..a3ed9a5cbf98d 100644 --- a/contrib/test_decoding/expected/ddl.out +++ b/contrib/test_decoding/expected/ddl.out @@ -12,7 +12,7 @@ ERROR: replication slot "regression_slot" already exists -- fail because of an invalid name SELECT 'init' FROM pg_create_logical_replication_slot('Invalid Name', 'test_decoding'); ERROR: replication slot name "Invalid Name" contains invalid character -HINT: Replication slot names may only contain letters, numbers and the underscore character. +HINT: Replication slot names may only contain lower case letters, numbers, and the underscore character. -- fail twice because of an invalid parameter values SELECT 'init' FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', 'frakbar'); ERROR: could not parse value "frakbar" for parameter "include-xids" @@ -40,7 +40,7 @@ SELECT 'init' FROM pg_create_physical_replication_slot('repl'); init (1 row) -SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ERROR: cannot use physical replication slot for logical decoding SELECT pg_drop_replication_slot('repl'); pg_drop_replication_slot @@ -89,18 +89,14 @@ COMMIT; ALTER TABLE replication_example RENAME COLUMN text TO somenum; INSERT INTO replication_example(somedata, somenum) VALUES (4, 1); -- collect all changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data --------------------------------------------------------------------------------------------------------------------------- - BEGIN - COMMIT BEGIN table public.replication_example: INSERT: id[integer]:1 somedata[integer]:1 text[character varying]:'1' table public.replication_example: INSERT: id[integer]:2 somedata[integer]:1 text[character varying]:'2' COMMIT BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:3 somedata[integer]:2 text[character varying]:'1' bar[integer]:4 COMMIT BEGIN @@ -109,8 +105,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.replication_example: INSERT: id[integer]:6 somedata[integer]:2 text[character varying]:'4' bar[integer]:null COMMIT BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:7 somedata[integer]:3 text[character varying]:'1' COMMIT BEGIN @@ -118,15 +112,13 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.replication_example: INSERT: id[integer]:9 somedata[integer]:3 text[character varying]:'3' COMMIT BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:10 somedata[integer]:4 somenum[character varying]:'1' COMMIT -(30 rows) +(22 rows) ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); -- throw away changes, they contain oids -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); count ------- 12 @@ -142,7 +134,7 @@ INSERT INTO replication_example(somedata, somenum, zaphod2) VALUES (6, 3, 1); INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2); COMMIT; -- show changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------------------------------------------------------------------------ BEGIN @@ -161,17 +153,17 @@ CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int); INSERT INTO tr_unique(data) VALUES(10); ALTER TABLE tr_unique RENAME TO tr_pkey; ALTER TABLE tr_pkey ADD COLUMN id serial primary key; -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); count ------- - 10 + 6 (1 row) INSERT INTO tr_pkey(data) VALUES(1); --show deletion with primary key DELETE FROM tr_pkey; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------- BEGIN @@ -194,7 +186,7 @@ UPDATE tr_etoomuch SET data = - data WHERE id > 5000; COMMIT; /* display results, but hide most of the output */ SELECT count(*), min(data), max(data) -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0') +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') GROUP BY substring(data, 1, 24) ORDER BY 1,2; count | min | max @@ -204,6 +196,21 @@ ORDER BY 1,2; 20467 | table public.tr_etoomuch: DELETE: id[integer]:1 | table public.tr_etoomuch: UPDATE: id[integer]:9999 data[integer]:-9999 (3 rows) +-- check updates of primary keys work correctly +BEGIN; +CREATE TABLE spoolme AS SELECT g.i FROM generate_series(1, 5000) g(i); +UPDATE tr_etoomuch SET id = -id WHERE id = 5000; +DELETE FROM spoolme; +DROP TABLE spoolme; +COMMIT; +SELECT data +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') +WHERE data ~ 'UPDATE'; + data +------------------------------------------------------------------------------------------------------------- + table public.tr_etoomuch: UPDATE: old-key: id[integer]:5000 new-tuple: id[integer]:-5000 data[integer]:5000 +(1 row) + /* * check whether we decode subtransactions correctly in relation with each * other @@ -224,11 +231,9 @@ RELEASE SAVEPOINT c; INSERT INTO tr_sub(path) VALUES ('1-top-2-#1'); RELEASE SAVEPOINT b; COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------- - BEGIN - COMMIT BEGIN table public.tr_sub: INSERT: id[integer]:1 path[text]:'1-top-#1' table public.tr_sub: INSERT: id[integer]:2 path[text]:'1-top-1-#1' @@ -237,7 +242,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.tr_sub: INSERT: id[integer]:5 path[text]:'1-top-2-1-#2' table public.tr_sub: INSERT: id[integer]:6 path[text]:'1-top-2-#1' COMMIT -(10 rows) +(8 rows) -- check that we handle xlog assignments correctly BEGIN; @@ -265,7 +270,7 @@ INSERT INTO tr_sub(path) VALUES ('2-top-1...--#3'); RELEASE SAVEPOINT subtop; INSERT INTO tr_sub(path) VALUES ('2-top-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------ BEGIN @@ -286,7 +291,7 @@ INSERT INTO tr_sub(path) VALUES ('3-top-2-2-#1'); ROLLBACK TO SAVEPOINT b; INSERT INTO tr_sub(path) VALUES ('3-top-2-#2'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ----------------------------------------------------------------------- BEGIN @@ -315,7 +320,7 @@ BEGIN; SAVEPOINT a; INSERT INTO tr_sub(path) VALUES ('5-top-1-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data --------------------------------------------------------------------- BEGIN @@ -395,32 +400,22 @@ Options: user_catalog_table=false INSERT INTO replication_metadata(relation, options) VALUES ('zaphod', NULL); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------------------------------------------------------------------ - BEGIN - COMMIT BEGIN table public.replication_metadata: INSERT: id[integer]:1 relation[name]:'foo' options[text[]]:'{a,b}' COMMIT BEGIN - COMMIT - BEGIN table public.replication_metadata: INSERT: id[integer]:2 relation[name]:'bar' options[text[]]:'{a,b}' COMMIT BEGIN - COMMIT - BEGIN table public.replication_metadata: INSERT: id[integer]:3 relation[name]:'blub' options[text[]]:null COMMIT BEGIN - COMMIT - BEGIN - COMMIT - BEGIN table public.replication_metadata: INSERT: id[integer]:4 relation[name]:'zaphod' options[text[]]:null rewritemeornot[integer]:null COMMIT -(22 rows) +(12 rows) /* * check whether we handle updates/deletes correct with & without a pkey @@ -438,6 +433,10 @@ ALTER TABLE table_without_key REPLICA IDENTITY FULL; UPDATE table_without_key SET data = 3 WHERE data = 2; UPDATE table_without_key SET id = -id; UPDATE table_without_key SET id = -id; +-- ensure that FULL correctly deals with new columns +ALTER TABLE table_without_key ADD COLUMN new_column text; +UPDATE table_without_key SET id = -id; +UPDATE table_without_key SET id = -id, new_column = 'someval'; DELETE FROM table_without_key WHERE data = 3; CREATE TABLE table_with_pkey(id serial primary key, data int); INSERT INTO table_with_pkey(data) VALUES(1), (2); @@ -489,11 +488,9 @@ INSERT INTO toasttable(toasted_col2) SELECT repeat(string_agg(to_char(g.i, 'FM00 UPDATE toasttable SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ - BEGIN - COMMIT BEGIN table public.table_without_key: INSERT: id[integer]:1 data[integer]:1 table public.table_without_key: INSERT: id[integer]:2 data[integer]:2 @@ -511,17 +508,19 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_without_key: UPDATE: id[integer]:2 data[integer]:3 COMMIT BEGIN - COMMIT - BEGIN table public.table_without_key: UPDATE: old-key: id[integer]:2 data[integer]:3 new-tuple: id[integer]:-2 data[integer]:3 COMMIT BEGIN table public.table_without_key: UPDATE: old-key: id[integer]:-2 data[integer]:3 new-tuple: id[integer]:2 data[integer]:3 COMMIT BEGIN - table public.table_without_key: DELETE: id[integer]:2 data[integer]:3 + table public.table_without_key: UPDATE: old-key: id[integer]:2 data[integer]:3 new-tuple: id[integer]:-2 data[integer]:3 new_column[text]:null + COMMIT + BEGIN + table public.table_without_key: UPDATE: old-key: id[integer]:-2 data[integer]:3 new-tuple: id[integer]:2 data[integer]:3 new_column[text]:'someval' COMMIT BEGIN + table public.table_without_key: DELETE: id[integer]:2 data[integer]:3 new_column[text]:'someval' COMMIT BEGIN table public.table_with_pkey: INSERT: id[integer]:1 data[integer]:1 @@ -540,21 +539,15 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_with_pkey: UPDATE: old-key: id[integer]:-2 new-tuple: id[integer]:2 data[integer]:3 COMMIT BEGIN - COMMIT - BEGIN table public.table_with_pkey: UPDATE: old-key: id[integer]:2 new-tuple: id[integer]:-2 data[integer]:3 COMMIT BEGIN - COMMIT - BEGIN table public.table_with_pkey: UPDATE: old-key: id[integer]:-2 new-tuple: id[integer]:2 data[integer]:3 COMMIT BEGIN table public.table_with_pkey: DELETE: id[integer]:2 COMMIT BEGIN - COMMIT - BEGIN table public.table_with_unique_not_null: INSERT: id[integer]:1 data[integer]:1 table public.table_with_unique_not_null: INSERT: id[integer]:2 data[integer]:2 COMMIT @@ -574,8 +567,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_with_unique_not_null: DELETE: (no-tuple-data) COMMIT BEGIN - COMMIT - BEGIN table public.table_with_unique_not_null: INSERT: id[integer]:3 data[integer]:1 table public.table_with_unique_not_null: INSERT: id[integer]:4 data[integer]:2 COMMIT @@ -595,8 +586,6 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.table_with_unique_not_null: DELETE: id[integer]:4 COMMIT BEGIN - COMMIT - BEGIN table public.toasttable: INSERT: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578 COMMIT BEGIN @@ -605,7 +594,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc BEGIN table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand1[double precision]:79 toasted_col2[text]:null rand2[double precision]:1578 COMMIT -(113 rows) +(103 rows) INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i); -- update of second column, first column unchanged @@ -614,7 +603,7 @@ UPDATE toasttable WHERE id = 1; -- make sure we decode correctly even if the toast table is gone DROP TABLE toasttable; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN @@ -623,12 +612,10 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc BEGIN table public.toasttable: UPDATE: id[integer]:1 toasted_col1[text]:unchanged-toast-datum rand1[double precision]:79 toasted_col2[text]:'12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000' rand2[double precision]:1578 COMMIT - BEGIN - COMMIT -(8 rows) +(6 rows) -- done, free logical replication slot -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------ (0 rows) diff --git a/contrib/test_decoding/expected/decoding_in_xact.out b/contrib/test_decoding/expected/decoding_in_xact.out index d15b0b542ba76..456840886aabf 100644 --- a/contrib/test_decoding/expected/decoding_in_xact.out +++ b/contrib/test_decoding/expected/decoding_in_xact.out @@ -58,19 +58,17 @@ SELECT txid_current() = 0; -- don't show yet, haven't committed INSERT INTO nobarf(data) VALUES('2'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ----------------------------------------------------------- - BEGIN - COMMIT BEGIN table public.nobarf: INSERT: id[integer]:1 data[text]:'1' COMMIT -(5 rows) +(3 rows) COMMIT; INSERT INTO nobarf(data) VALUES('3'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ----------------------------------------------------------- BEGIN diff --git a/contrib/test_decoding/expected/decoding_into_rel.out b/contrib/test_decoding/expected/decoding_into_rel.out new file mode 100644 index 0000000000000..be759caa31de8 --- /dev/null +++ b/contrib/test_decoding/expected/decoding_into_rel.out @@ -0,0 +1,86 @@ +-- test that we can insert the result of a get_changes call into a +-- logged relation. That's really not a good idea in practical terms, +-- but provides a nice test. +-- predictability +SET synchronous_commit = on; +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + ?column? +---------- + init +(1 row) + +-- slot works +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +------ +(0 rows) + +-- create some changes +CREATE TABLE somechange(id serial primary key); +INSERT INTO somechange DEFAULT VALUES; +CREATE TABLE changeresult AS + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT * FROM changeresult; + data +------------------------------------------------ + BEGIN + table public.somechange: INSERT: id[integer]:1 + COMMIT +(3 rows) + +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT * FROM changeresult; + data +-------------------------------------------------------------------------------------------------------------------------------------------------- + BEGIN + table public.somechange: INSERT: id[integer]:1 + COMMIT + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.somechange: INSERT: id[integer]:1' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.somechange: INSERT: id[integer]:1' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.somechange: INSERT: id[integer]:1''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT +(20 rows) + +DROP TABLE changeresult; +DROP TABLE somechange; +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- + BEGIN + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.somechange: INSERT: id[integer]:1''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + table public.changeresult: INSERT: data[text]:'BEGIN' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''BEGIN''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''BEGIN''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''table public.somechange: INSERT: id[integer]:1''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''table public.changeresult: INSERT: data[text]:''''COMMIT''''''' + table public.changeresult: INSERT: data[text]:'table public.changeresult: INSERT: data[text]:''COMMIT''' + table public.changeresult: INSERT: data[text]:'COMMIT' + COMMIT +(14 rows) + +SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); + ?column? +---------- + stop +(1 row) + diff --git a/contrib/test_decoding/expected/ondisk_startup.out b/contrib/test_decoding/expected/ondisk_startup.out new file mode 100644 index 0000000000000..65115c830a495 --- /dev/null +++ b/contrib/test_decoding/expected/ondisk_startup.out @@ -0,0 +1,43 @@ +Parsed test spec with 3 sessions + +starting permutation: s2txid s1init s3txid s2alter s2c s1insert s1checkpoint s1start s1insert s1alter s1insert s1start +step s2txid: BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; +?column? + +f +step s1init: SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); +step s3txid: BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; +?column? + +f +step s2alter: ALTER TABLE do_write ADD COLUMN addedbys2 int; +step s2c: COMMIT; +step s1init: <... completed> +?column? + +init +step s1insert: INSERT INTO do_write DEFAULT VALUES; +step s1checkpoint: CHECKPOINT; +step s1start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false'); +data + +BEGIN +table public.do_write: INSERT: id[integer]:1 addedbys2[integer]:null +COMMIT +step s1insert: INSERT INTO do_write DEFAULT VALUES; +step s1alter: ALTER TABLE do_write ADD COLUMN addedbys1 int; +step s1insert: INSERT INTO do_write DEFAULT VALUES; +step s1start: SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false'); +data + +BEGIN +table public.do_write: INSERT: id[integer]:2 addedbys2[integer]:null +COMMIT +BEGIN +COMMIT +BEGIN +table public.do_write: INSERT: id[integer]:3 addedbys2[integer]:null addedbys1[integer]:null +COMMIT +?column? + +stop diff --git a/contrib/test_decoding/expected/permissions.out b/contrib/test_decoding/expected/permissions.out index 85b7f5d6257e0..212fd1df35905 100644 --- a/contrib/test_decoding/expected/permissions.out +++ b/contrib/test_decoding/expected/permissions.out @@ -14,7 +14,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d (1 row) INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data -------------------------------------------------------------- BEGIN @@ -39,7 +39,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d INSERT INTO lr_test VALUES('lr_superuser_init'); ERROR: permission denied for relation lr_test -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------ (0 rows) @@ -57,7 +57,7 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d ERROR: must be superuser or replication role to use replication slots INSERT INTO lr_test VALUES('lr_superuser_init'); ERROR: permission denied for relation lr_test -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ERROR: must be superuser or replication role to use replication slots SELECT pg_drop_replication_slot('regression_slot'); ERROR: must be superuser or replication role to use replication slots diff --git a/contrib/test_decoding/expected/prepared.out b/contrib/test_decoding/expected/prepared.out index 8313f8b7aa7e3..46e915d4ffa2f 100644 --- a/contrib/test_decoding/expected/prepared.out +++ b/contrib/test_decoding/expected/prepared.out @@ -39,13 +39,9 @@ INSERT INTO test_prepared2 VALUES (9); DROP TABLE test_prepared1; DROP TABLE test_prepared2; -- show results -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ------------------------------------------------------------------------- - BEGIN - COMMIT - BEGIN - COMMIT BEGIN table public.test_prepared1: INSERT: id[integer]:1 COMMIT @@ -68,11 +64,7 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc BEGIN table public.test_prepared2: INSERT: id[integer]:9 COMMIT - BEGIN - COMMIT - BEGIN - COMMIT -(30 rows) +(22 rows) SELECT pg_drop_replication_slot('regression_slot'); pg_drop_replication_slot diff --git a/contrib/test_decoding/expected/rewrite.out b/contrib/test_decoding/expected/rewrite.out index ec23ab9024a3a..4dcd489543837 100644 --- a/contrib/test_decoding/expected/rewrite.out +++ b/contrib/test_decoding/expected/rewrite.out @@ -9,15 +9,13 @@ SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_d CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120)); INSERT INTO replication_example(somedata) VALUES (1); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------------------------------------- - BEGIN - COMMIT BEGIN table public.replication_example: INSERT: id[integer]:1 somedata[integer]:1 text[character varying]:null COMMIT -(5 rows) +(3 rows) BEGIN; INSERT INTO replication_example(somedata) VALUES (2); @@ -58,7 +56,7 @@ INSERT INTO replication_example(somedata, testcolumn1, testcolumn3) VALUES (7, 5 COMMIT; -- make old files go away CHECKPOINT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); data ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- BEGIN @@ -70,33 +68,13 @@ SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'inc table public.replication_example: INSERT: id[integer]:5 somedata[integer]:4 text[character varying]:null testcolumn1[integer]:2 testcolumn2[integer]:1 COMMIT BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN - COMMIT - BEGIN table public.replication_example: INSERT: id[integer]:6 somedata[integer]:5 text[character varying]:null testcolumn1[integer]:3 testcolumn2[integer]:null COMMIT BEGIN table public.replication_example: INSERT: id[integer]:7 somedata[integer]:6 text[character varying]:null testcolumn1[integer]:4 testcolumn2[integer]:null table public.replication_example: INSERT: id[integer]:8 somedata[integer]:7 text[character varying]:null testcolumn1[integer]:5 testcolumn2[integer]:null testcolumn3[integer]:1 COMMIT -(35 rows) +(15 rows) SELECT pg_drop_replication_slot('regression_slot'); pg_drop_replication_slot diff --git a/contrib/test_decoding/expected/toast.out b/contrib/test_decoding/expected/toast.out index 6adef83f02908..b7bae65ee82d7 100644 --- a/contrib/test_decoding/expected/toast.out +++ b/contrib/test_decoding/expected/toast.out @@ -40,13 +40,17 @@ UPDATE toasted_key SET toasted_col2 = toasted_col1; -- test update of a toasted key, changing it UPDATE toasted_key SET toasted_key = toasted_key || '1'; DELETE FROM toasted_key; -SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +-- Test that HEAP2_MULTI_INSERT insertions with and without toasted +-- columns are handled correctly +CREATE TABLE toasted_copy ( + id int primary key, -- no default, copy didn't use to handle that with multi inserts + data text +); +ALTER TABLE toasted_copy ALTER COLUMN data SET STORAGE EXTERNAL; +\copy toasted_copy FROM STDIN +SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); substr ---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- - BEGIN - COMMIT - BEGIN - COMMIT BEGIN table public.xpto: INSERT: id[integer]:1 toasted_col1[text]:'1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374 COMMIT @@ -63,24 +67,281 @@ SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', table public.xpto: DELETE: id[integer]:1 COMMIT BEGIN + table public.toasted_key: INSERT: id[integer]:1 toasted_key[text]:'1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 COMMIT BEGIN + table public.toasted_key: UPDATE: id[integer]:1 toasted_key[text]:unchanged-toast-datum toasted_col1[text]:unchanged-toast-datum toasted_col2[text]:'987654321098765432109876543210987654321098765432109 COMMIT BEGIN + table public.toasted_key: UPDATE: old-key: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 COMMIT BEGIN - table public.toasted_key: INSERT: id[integer]:1 toasted_key[text]:'1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123 + table public.toasted_key: DELETE: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567 COMMIT BEGIN - table public.toasted_key: UPDATE: id[integer]:1 toasted_key[text]:unchanged-toast-datum toasted_col1[text]:unchanged-toast-datum toasted_col2[text]:'987654321098765432109876543210987654321098765432109 + table public.toasted_copy: INSERT: id[integer]:1 data[text]:'untoasted1' + table public.toasted_copy: INSERT: id[integer]:2 data[text]:'toasted1-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + table public.toasted_copy: INSERT: id[integer]:3 data[text]:'untoasted2' + table public.toasted_copy: INSERT: id[integer]:4 data[text]:'toasted2-1234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 + table public.toasted_copy: INSERT: id[integer]:5 data[text]:'untoasted3' + table public.toasted_copy: INSERT: id[integer]:6 data[text]:'untoasted4' + table public.toasted_copy: INSERT: id[integer]:7 data[text]:'untoasted5' + table public.toasted_copy: INSERT: id[integer]:8 data[text]:'untoasted6' + table public.toasted_copy: INSERT: id[integer]:9 data[text]:'untoasted7' + table public.toasted_copy: INSERT: id[integer]:10 data[text]:'untoasted8' + table public.toasted_copy: INSERT: id[integer]:11 data[text]:'untoasted9' + table public.toasted_copy: INSERT: id[integer]:12 data[text]:'untoasted10' + table public.toasted_copy: INSERT: id[integer]:13 data[text]:'untoasted11' + table public.toasted_copy: INSERT: id[integer]:14 data[text]:'untoasted12' + table public.toasted_copy: INSERT: id[integer]:15 data[text]:'untoasted13' + table public.toasted_copy: INSERT: id[integer]:16 data[text]:'untoasted14' + table public.toasted_copy: INSERT: id[integer]:17 data[text]:'untoasted15' + table public.toasted_copy: INSERT: id[integer]:18 data[text]:'untoasted16' + table public.toasted_copy: INSERT: id[integer]:19 data[text]:'untoasted17' + table public.toasted_copy: INSERT: id[integer]:20 data[text]:'untoasted18' + table public.toasted_copy: INSERT: id[integer]:21 data[text]:'untoasted19' + table public.toasted_copy: INSERT: id[integer]:22 data[text]:'untoasted20' + table public.toasted_copy: INSERT: id[integer]:23 data[text]:'untoasted21' + table public.toasted_copy: INSERT: id[integer]:24 data[text]:'untoasted22' + table public.toasted_copy: INSERT: id[integer]:25 data[text]:'untoasted23' + table public.toasted_copy: INSERT: id[integer]:26 data[text]:'untoasted24' + table public.toasted_copy: INSERT: id[integer]:27 data[text]:'untoasted25' + table public.toasted_copy: INSERT: id[integer]:28 data[text]:'untoasted26' + table public.toasted_copy: INSERT: id[integer]:29 data[text]:'untoasted27' + table public.toasted_copy: INSERT: id[integer]:30 data[text]:'untoasted28' + table public.toasted_copy: INSERT: id[integer]:31 data[text]:'untoasted29' + table public.toasted_copy: INSERT: id[integer]:32 data[text]:'untoasted30' + table public.toasted_copy: INSERT: id[integer]:33 data[text]:'untoasted31' + table public.toasted_copy: INSERT: id[integer]:34 data[text]:'untoasted32' + table public.toasted_copy: INSERT: id[integer]:35 data[text]:'untoasted33' + table public.toasted_copy: INSERT: id[integer]:36 data[text]:'untoasted34' + table public.toasted_copy: INSERT: id[integer]:37 data[text]:'untoasted35' + table public.toasted_copy: INSERT: id[integer]:38 data[text]:'untoasted36' + table public.toasted_copy: INSERT: id[integer]:39 data[text]:'untoasted37' + table public.toasted_copy: INSERT: id[integer]:40 data[text]:'untoasted38' + table public.toasted_copy: INSERT: id[integer]:41 data[text]:'untoasted39' + table public.toasted_copy: INSERT: id[integer]:42 data[text]:'untoasted40' + table public.toasted_copy: INSERT: id[integer]:43 data[text]:'untoasted41' + table public.toasted_copy: INSERT: id[integer]:44 data[text]:'untoasted42' + table public.toasted_copy: INSERT: id[integer]:45 data[text]:'untoasted43' + table public.toasted_copy: INSERT: id[integer]:46 data[text]:'untoasted44' + table public.toasted_copy: INSERT: id[integer]:47 data[text]:'untoasted45' + table public.toasted_copy: INSERT: id[integer]:48 data[text]:'untoasted46' + table public.toasted_copy: INSERT: id[integer]:49 data[text]:'untoasted47' + table public.toasted_copy: INSERT: id[integer]:50 data[text]:'untoasted48' + table public.toasted_copy: INSERT: id[integer]:51 data[text]:'untoasted49' + table public.toasted_copy: INSERT: id[integer]:52 data[text]:'untoasted50' + table public.toasted_copy: INSERT: id[integer]:53 data[text]:'untoasted51' + table public.toasted_copy: INSERT: id[integer]:54 data[text]:'untoasted52' + table public.toasted_copy: INSERT: id[integer]:55 data[text]:'untoasted53' + table public.toasted_copy: INSERT: id[integer]:56 data[text]:'untoasted54' + table public.toasted_copy: INSERT: id[integer]:57 data[text]:'untoasted55' + table public.toasted_copy: INSERT: id[integer]:58 data[text]:'untoasted56' + table public.toasted_copy: INSERT: id[integer]:59 data[text]:'untoasted57' + table public.toasted_copy: INSERT: id[integer]:60 data[text]:'untoasted58' + table public.toasted_copy: INSERT: id[integer]:61 data[text]:'untoasted59' + table public.toasted_copy: INSERT: id[integer]:62 data[text]:'untoasted60' + table public.toasted_copy: INSERT: id[integer]:63 data[text]:'untoasted61' + table public.toasted_copy: INSERT: id[integer]:64 data[text]:'untoasted62' + table public.toasted_copy: INSERT: id[integer]:65 data[text]:'untoasted63' + table public.toasted_copy: INSERT: id[integer]:66 data[text]:'untoasted64' + table public.toasted_copy: INSERT: id[integer]:67 data[text]:'untoasted65' + table public.toasted_copy: INSERT: id[integer]:68 data[text]:'untoasted66' + table public.toasted_copy: INSERT: id[integer]:69 data[text]:'untoasted67' + table public.toasted_copy: INSERT: id[integer]:70 data[text]:'untoasted68' + table public.toasted_copy: INSERT: id[integer]:71 data[text]:'untoasted69' + table public.toasted_copy: INSERT: id[integer]:72 data[text]:'untoasted70' + table public.toasted_copy: INSERT: id[integer]:73 data[text]:'untoasted71' + table public.toasted_copy: INSERT: id[integer]:74 data[text]:'untoasted72' + table public.toasted_copy: INSERT: id[integer]:75 data[text]:'untoasted73' + table public.toasted_copy: INSERT: id[integer]:76 data[text]:'untoasted74' + table public.toasted_copy: INSERT: id[integer]:77 data[text]:'untoasted75' + table public.toasted_copy: INSERT: id[integer]:78 data[text]:'untoasted76' + table public.toasted_copy: INSERT: id[integer]:79 data[text]:'untoasted77' + table public.toasted_copy: INSERT: id[integer]:80 data[text]:'untoasted78' + table public.toasted_copy: INSERT: id[integer]:81 data[text]:'untoasted79' + table public.toasted_copy: INSERT: id[integer]:82 data[text]:'untoasted80' + table public.toasted_copy: INSERT: id[integer]:83 data[text]:'untoasted81' + table public.toasted_copy: INSERT: id[integer]:84 data[text]:'untoasted82' + table public.toasted_copy: INSERT: id[integer]:85 data[text]:'untoasted83' + table public.toasted_copy: INSERT: id[integer]:86 data[text]:'untoasted84' + table public.toasted_copy: INSERT: id[integer]:87 data[text]:'untoasted85' + table public.toasted_copy: INSERT: id[integer]:88 data[text]:'untoasted86' + table public.toasted_copy: INSERT: id[integer]:89 data[text]:'untoasted87' + table public.toasted_copy: INSERT: id[integer]:90 data[text]:'untoasted88' + table public.toasted_copy: INSERT: id[integer]:91 data[text]:'untoasted89' + table public.toasted_copy: INSERT: id[integer]:92 data[text]:'untoasted90' + table public.toasted_copy: INSERT: id[integer]:93 data[text]:'untoasted91' + table public.toasted_copy: INSERT: id[integer]:94 data[text]:'untoasted92' + table public.toasted_copy: INSERT: id[integer]:95 data[text]:'untoasted93' + table public.toasted_copy: INSERT: id[integer]:96 data[text]:'untoasted94' + table public.toasted_copy: INSERT: id[integer]:97 data[text]:'untoasted95' + table public.toasted_copy: INSERT: id[integer]:98 data[text]:'untoasted96' + table public.toasted_copy: INSERT: id[integer]:99 data[text]:'untoasted97' + table public.toasted_copy: INSERT: id[integer]:100 data[text]:'untoasted98' + table public.toasted_copy: INSERT: id[integer]:101 data[text]:'untoasted99' + table public.toasted_copy: INSERT: id[integer]:102 data[text]:'untoasted100' + table public.toasted_copy: INSERT: id[integer]:103 data[text]:'untoasted101' + table public.toasted_copy: INSERT: id[integer]:104 data[text]:'untoasted102' + table public.toasted_copy: INSERT: id[integer]:105 data[text]:'untoasted103' + table public.toasted_copy: INSERT: id[integer]:106 data[text]:'untoasted104' + table public.toasted_copy: INSERT: id[integer]:107 data[text]:'untoasted105' + table public.toasted_copy: INSERT: id[integer]:108 data[text]:'untoasted106' + table public.toasted_copy: INSERT: id[integer]:109 data[text]:'untoasted107' + table public.toasted_copy: INSERT: id[integer]:110 data[text]:'untoasted108' + table public.toasted_copy: INSERT: id[integer]:111 data[text]:'untoasted109' + table public.toasted_copy: INSERT: id[integer]:112 data[text]:'untoasted110' + table public.toasted_copy: INSERT: id[integer]:113 data[text]:'untoasted111' + table public.toasted_copy: INSERT: id[integer]:114 data[text]:'untoasted112' + table public.toasted_copy: INSERT: id[integer]:115 data[text]:'untoasted113' + table public.toasted_copy: INSERT: id[integer]:116 data[text]:'untoasted114' + table public.toasted_copy: INSERT: id[integer]:117 data[text]:'untoasted115' + table public.toasted_copy: INSERT: id[integer]:118 data[text]:'untoasted116' + table public.toasted_copy: INSERT: id[integer]:119 data[text]:'untoasted117' + table public.toasted_copy: INSERT: id[integer]:120 data[text]:'untoasted118' + table public.toasted_copy: INSERT: id[integer]:121 data[text]:'untoasted119' + table public.toasted_copy: INSERT: id[integer]:122 data[text]:'untoasted120' + table public.toasted_copy: INSERT: id[integer]:123 data[text]:'untoasted121' + table public.toasted_copy: INSERT: id[integer]:124 data[text]:'untoasted122' + table public.toasted_copy: INSERT: id[integer]:125 data[text]:'untoasted123' + table public.toasted_copy: INSERT: id[integer]:126 data[text]:'untoasted124' + table public.toasted_copy: INSERT: id[integer]:127 data[text]:'untoasted125' + table public.toasted_copy: INSERT: id[integer]:128 data[text]:'untoasted126' + table public.toasted_copy: INSERT: id[integer]:129 data[text]:'untoasted127' + table public.toasted_copy: INSERT: id[integer]:130 data[text]:'untoasted128' + table public.toasted_copy: INSERT: id[integer]:131 data[text]:'untoasted129' + table public.toasted_copy: INSERT: id[integer]:132 data[text]:'untoasted130' + table public.toasted_copy: INSERT: id[integer]:133 data[text]:'untoasted131' + table public.toasted_copy: INSERT: id[integer]:134 data[text]:'untoasted132' + table public.toasted_copy: INSERT: id[integer]:135 data[text]:'untoasted133' + table public.toasted_copy: INSERT: id[integer]:136 data[text]:'untoasted134' + table public.toasted_copy: INSERT: id[integer]:137 data[text]:'untoasted135' + table public.toasted_copy: INSERT: id[integer]:138 data[text]:'untoasted136' + table public.toasted_copy: INSERT: id[integer]:139 data[text]:'untoasted137' + table public.toasted_copy: INSERT: id[integer]:140 data[text]:'untoasted138' + table public.toasted_copy: INSERT: id[integer]:141 data[text]:'untoasted139' + table public.toasted_copy: INSERT: id[integer]:142 data[text]:'untoasted140' + table public.toasted_copy: INSERT: id[integer]:143 data[text]:'untoasted141' + table public.toasted_copy: INSERT: id[integer]:144 data[text]:'untoasted142' + table public.toasted_copy: INSERT: id[integer]:145 data[text]:'untoasted143' + table public.toasted_copy: INSERT: id[integer]:146 data[text]:'untoasted144' + table public.toasted_copy: INSERT: id[integer]:147 data[text]:'untoasted145' + table public.toasted_copy: INSERT: id[integer]:148 data[text]:'untoasted146' + table public.toasted_copy: INSERT: id[integer]:149 data[text]:'untoasted147' + table public.toasted_copy: INSERT: id[integer]:150 data[text]:'untoasted148' + table public.toasted_copy: INSERT: id[integer]:151 data[text]:'untoasted149' + table public.toasted_copy: INSERT: id[integer]:152 data[text]:'untoasted150' + table public.toasted_copy: INSERT: id[integer]:153 data[text]:'untoasted151' + table public.toasted_copy: INSERT: id[integer]:154 data[text]:'untoasted152' + table public.toasted_copy: INSERT: id[integer]:155 data[text]:'untoasted153' + table public.toasted_copy: INSERT: id[integer]:156 data[text]:'untoasted154' + table public.toasted_copy: INSERT: id[integer]:157 data[text]:'untoasted155' + table public.toasted_copy: INSERT: id[integer]:158 data[text]:'untoasted156' + table public.toasted_copy: INSERT: id[integer]:159 data[text]:'untoasted157' + table public.toasted_copy: INSERT: id[integer]:160 data[text]:'untoasted158' + table public.toasted_copy: INSERT: id[integer]:161 data[text]:'untoasted159' + table public.toasted_copy: INSERT: id[integer]:162 data[text]:'untoasted160' + table public.toasted_copy: INSERT: id[integer]:163 data[text]:'untoasted161' + table public.toasted_copy: INSERT: id[integer]:164 data[text]:'untoasted162' + table public.toasted_copy: INSERT: id[integer]:165 data[text]:'untoasted163' + table public.toasted_copy: INSERT: id[integer]:166 data[text]:'untoasted164' + table public.toasted_copy: INSERT: id[integer]:167 data[text]:'untoasted165' + table public.toasted_copy: INSERT: id[integer]:168 data[text]:'untoasted166' + table public.toasted_copy: INSERT: id[integer]:169 data[text]:'untoasted167' + table public.toasted_copy: INSERT: id[integer]:170 data[text]:'untoasted168' + table public.toasted_copy: INSERT: id[integer]:171 data[text]:'untoasted169' + table public.toasted_copy: INSERT: id[integer]:172 data[text]:'untoasted170' + table public.toasted_copy: INSERT: id[integer]:173 data[text]:'untoasted171' + table public.toasted_copy: INSERT: id[integer]:174 data[text]:'untoasted172' + table public.toasted_copy: INSERT: id[integer]:175 data[text]:'untoasted173' + table public.toasted_copy: INSERT: id[integer]:176 data[text]:'untoasted174' + table public.toasted_copy: INSERT: id[integer]:177 data[text]:'untoasted175' + table public.toasted_copy: INSERT: id[integer]:178 data[text]:'untoasted176' + table public.toasted_copy: INSERT: id[integer]:179 data[text]:'untoasted177' + table public.toasted_copy: INSERT: id[integer]:180 data[text]:'untoasted178' + table public.toasted_copy: INSERT: id[integer]:181 data[text]:'untoasted179' + table public.toasted_copy: INSERT: id[integer]:182 data[text]:'untoasted180' + table public.toasted_copy: INSERT: id[integer]:183 data[text]:'untoasted181' + table public.toasted_copy: INSERT: id[integer]:184 data[text]:'untoasted182' + table public.toasted_copy: INSERT: id[integer]:185 data[text]:'untoasted183' + table public.toasted_copy: INSERT: id[integer]:186 data[text]:'untoasted184' + table public.toasted_copy: INSERT: id[integer]:187 data[text]:'untoasted185' + table public.toasted_copy: INSERT: id[integer]:188 data[text]:'untoasted186' + table public.toasted_copy: INSERT: id[integer]:189 data[text]:'untoasted187' + table public.toasted_copy: INSERT: id[integer]:190 data[text]:'untoasted188' + table public.toasted_copy: INSERT: id[integer]:191 data[text]:'untoasted189' + table public.toasted_copy: INSERT: id[integer]:192 data[text]:'untoasted190' + table public.toasted_copy: INSERT: id[integer]:193 data[text]:'untoasted191' + table public.toasted_copy: INSERT: id[integer]:194 data[text]:'untoasted192' + table public.toasted_copy: INSERT: id[integer]:195 data[text]:'untoasted193' + table public.toasted_copy: INSERT: id[integer]:196 data[text]:'untoasted194' + table public.toasted_copy: INSERT: id[integer]:197 data[text]:'untoasted195' + table public.toasted_copy: INSERT: id[integer]:198 data[text]:'untoasted196' + table public.toasted_copy: INSERT: id[integer]:199 data[text]:'untoasted197' + table public.toasted_copy: INSERT: id[integer]:200 data[text]:'untoasted198' + table public.toasted_copy: INSERT: id[integer]:201 data[text]:'toasted3-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 + table public.toasted_copy: INSERT: id[integer]:202 data[text]:'untoasted199' + table public.toasted_copy: INSERT: id[integer]:203 data[text]:'untoasted200' + COMMIT +(232 rows) + +-- test we can decode "old" tuples bigger than the max heap tuple size correctly +DROP TABLE IF EXISTS toasted_several; +NOTICE: table "toasted_several" does not exist, skipping +CREATE TABLE toasted_several ( + id serial unique not null, + toasted_key text primary key, + toasted_col1 text, + toasted_col2 text +); +ALTER TABLE toasted_several REPLICA IDENTITY FULL; +ALTER TABLE toasted_several ALTER COLUMN toasted_key SET STORAGE EXTERNAL; +ALTER TABLE toasted_several ALTER COLUMN toasted_col1 SET STORAGE EXTERNAL; +ALTER TABLE toasted_several ALTER COLUMN toasted_col2 SET STORAGE EXTERNAL; +INSERT INTO toasted_several(toasted_key) VALUES(repeat('9876543210', 2000)); +SELECT regexp_replace(data, '^(.{100}).*(.{100})$', '\1..\2') FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + regexp_replace +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + BEGIN + table public.toasted_several: INSERT: id[integer]:1 toasted_key[text]:'98765432109876543210987654321..098765432109876543210987654321098765432109876543210' toasted_col1[text]:null toasted_col2[text]:null COMMIT +(3 rows) + +-- test update of a toasted key without changing it +UPDATE toasted_several SET toasted_col1 = toasted_key; +UPDATE toasted_several SET toasted_col2 = toasted_col1; +SELECT regexp_replace(data, '^(.{100}).*(.{100})$', '\1..\2') FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + regexp_replace +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ BEGIN - table public.toasted_key: UPDATE: old-key: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678 + table public.toasted_several: INSERT: id[integer]:1 toasted_key[text]:'98765432109876543210987654321..098765432109876543210987654321098765432109876543210' toasted_col1[text]:null toasted_col2[text]:null COMMIT BEGIN - table public.toasted_key: DELETE: toasted_key[text]:'123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567 + table public.toasted_several: UPDATE: old-key: id[integer]:1 toasted_key[text]:'98765432109876543210..432109876543210987654321098765432109876543210987654321098765432109876543210' toasted_col2[text]:null + COMMIT + BEGIN + table public.toasted_several: UPDATE: old-key: id[integer]:1 toasted_key[text]:'98765432109876543210..876543210987654321098765432109876543210987654321098765432109876543210987654321098765432109876543210' + COMMIT +(9 rows) + +/* + * update with large tuplebuf, in a transaction large enough to force to spool to disk + */ +BEGIN; +INSERT INTO toasted_several(toasted_key) SELECT * FROM generate_series(1, 10234); +UPDATE toasted_several SET toasted_col1 = toasted_col2 WHERE id = 1; +DELETE FROM toasted_several WHERE id = 1; +COMMIT; +DROP TABLE toasted_several; +SELECT regexp_replace(data, '^(.{100}).*(.{100})$', '\1..\2') FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') +WHERE data NOT LIKE '%INSERT: %'; + regexp_replace +------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ + BEGIN + table public.toasted_several: UPDATE: old-key: id[integer]:1 toasted_key[text]:'98765432109876543210..7654321098765432109876543210987654321098765432109876543210' toasted_col2[text]:unchanged-toast-datum + table public.toasted_several: DELETE: id[integer]:1 toasted_key[text]:'98765432109876543210987654321..876543210987654321098765432109876543210987654321098765432109876543210987654321098765432109876543210' COMMIT -(37 rows) +(4 rows) SELECT pg_drop_replication_slot('regression_slot'); pg_drop_replication_slot diff --git a/contrib/test_decoding/expected/xact.out b/contrib/test_decoding/expected/xact.out new file mode 100644 index 0000000000000..507b701c3ab59 --- /dev/null +++ b/contrib/test_decoding/expected/xact.out @@ -0,0 +1,42 @@ +-- predictability +SET synchronous_commit = on; +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + ?column? +---------- + init +(1 row) + +-- bug #13844, xids in non-decoded records need to be inspected +CREATE TABLE xact_test(data text); +INSERT INTO xact_test VALUES ('before-test'); +BEGIN; +-- perform operation in xact that creates and logs xid, but isn't decoded +SELECT * FROM xact_test FOR UPDATE; + data +------------- + before-test +(1 row) + +SAVEPOINT foo; +-- and now actually insert in subxact, xid is expected to be known +INSERT INTO xact_test VALUES ('after-assignment'); +COMMIT; +-- and now show those changes +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + data +--------------------------------------------------------------- + BEGIN + table public.xact_test: INSERT: data[text]:'before-test' + COMMIT + BEGIN + table public.xact_test: INSERT: data[text]:'after-assignment' + COMMIT +(6 rows) + +DROP TABLE xact_test; +SELECT pg_drop_replication_slot('regression_slot'); + pg_drop_replication_slot +-------------------------- + +(1 row) + diff --git a/contrib/test_decoding/specs/concurrent_ddl_dml.spec b/contrib/test_decoding/specs/concurrent_ddl_dml.spec index 7c8a7c7977f7a..a27b19fe7f24f 100644 --- a/contrib/test_decoding/specs/concurrent_ddl_dml.spec +++ b/contrib/test_decoding/specs/concurrent_ddl_dml.spec @@ -14,6 +14,8 @@ teardown } session "s1" +setup { SET synchronous_commit=on; } + step "s1_init" { SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding'); } step "s1_begin" { BEGIN; } step "s1_insert_tbl1" { INSERT INTO tbl1 (val1, val2) VALUES (1, 1); } @@ -23,6 +25,8 @@ step "s1_insert_tbl2_3col" { INSERT INTO tbl2 (val1, val2, val3) VALUES (1, 1, 1 step "s1_commit" { COMMIT; } session "s2" +setup { SET synchronous_commit=on; } + step "s2_alter_tbl1_float" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE float; } step "s2_alter_tbl1_char" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE character varying; } step "s2_alter_tbl1_text" { ALTER TABLE tbl1 ALTER COLUMN val2 TYPE text; } @@ -50,7 +54,7 @@ step "s2_alter_tbl2_3rd_char" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE characte step "s2_alter_tbl2_3rd_text" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE text; } step "s2_alter_tbl2_3rd_int" { ALTER TABLE tbl2 ALTER COLUMN val3 TYPE int USING val3::integer; } -step "s2_get_changes" { SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0'); } +step "s2_get_changes" { SELECT regexp_replace(data, 'temp_\d+', 'temp') AS data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); } diff --git a/contrib/test_decoding/specs/ondisk_startup.spec b/contrib/test_decoding/specs/ondisk_startup.spec new file mode 100644 index 0000000000000..39c4a223aeeb2 --- /dev/null +++ b/contrib/test_decoding/specs/ondisk_startup.spec @@ -0,0 +1,43 @@ +# Force usage of ondisk decoding snapshots to test that code path. +setup +{ + DROP TABLE IF EXISTS do_write; + CREATE TABLE do_write(id serial primary key); +} + +teardown +{ + DROP TABLE do_write; + SELECT 'stop' FROM pg_drop_replication_slot('isolation_slot'); +} + + +session "s1" +setup { SET synchronous_commit=on; } + +step "s1init" {SELECT 'init' FROM pg_create_logical_replication_slot('isolation_slot', 'test_decoding');} +step "s1start" {SELECT data FROM pg_logical_slot_get_changes('isolation_slot', NULL, NULL, 'include-xids', 'false');} +step "s1insert" { INSERT INTO do_write DEFAULT VALUES; } +step "s1checkpoint" { CHECKPOINT; } +step "s1alter" { ALTER TABLE do_write ADD COLUMN addedbys1 int; } + +session "s2" +setup { SET synchronous_commit=on; } + +step "s2txid" { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; } +step "s2alter" { ALTER TABLE do_write ADD COLUMN addedbys2 int; } +step "s2c" { COMMIT; } + + +session "s3" +setup { SET synchronous_commit=on; } + +step "s3txid" { BEGIN ISOLATION LEVEL REPEATABLE READ; SELECT txid_current() IS NULL; } +step "s3c" { COMMIT; } + +# Force usage of ondisk snapshot by starting and not finishing a +# transaction with a assigned xid after consistency has been +# reached. In combination with a checkpoint forcing a snapshot to be +# written and a new restart point computed that'll lead to the usage +# of the snapshot. +permutation "s2txid" "s1init" "s3txid" "s2alter" "s2c" "s1insert" "s1checkpoint" "s1start" "s1insert" "s1alter" "s1insert" "s1start" diff --git a/contrib/test_decoding/sql/binary.sql b/contrib/test_decoding/sql/binary.sql index 619f00b3bc8c9..df1c5fbd42263 100644 --- a/contrib/test_decoding/sql/binary.sql +++ b/contrib/test_decoding/sql/binary.sql @@ -3,12 +3,12 @@ SET synchronous_commit = on; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); -- succeeds, textual plugin, textual consumer -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '0', 'skip-empty-xacts', '1'); -- fails, binary plugin, textual consumer -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '1'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'force-binary', '1', 'skip-empty-xacts', '1'); -- succeeds, textual plugin, binary consumer -SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '0'); +SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '0', 'skip-empty-xacts', '1'); -- succeeds, binary plugin, binary consumer -SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '1'); +SELECT data FROM pg_logical_slot_get_binary_changes('regression_slot', NULL, NULL, 'force-binary', '1', 'skip-empty-xacts', '1'); SELECT 'init' FROM pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/ddl.sql b/contrib/test_decoding/sql/ddl.sql index 87e74c64f31cd..8e3dae629317b 100644 --- a/contrib/test_decoding/sql/ddl.sql +++ b/contrib/test_decoding/sql/ddl.sql @@ -19,7 +19,7 @@ SELECT pg_drop_replication_slot('regression_slot'); -- check that we're detecting a streaming rep slot used for logical decoding SELECT 'init' FROM pg_create_physical_replication_slot('repl'); -SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('repl', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('repl'); @@ -64,11 +64,11 @@ ALTER TABLE replication_example RENAME COLUMN text TO somenum; INSERT INTO replication_example(somedata, somenum) VALUES (4, 1); -- collect all changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); ALTER TABLE replication_example ALTER COLUMN somenum TYPE int4 USING (somenum::int4); -- throw away changes, they contain oids -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); INSERT INTO replication_example(somedata, somenum) VALUES (5, 1); @@ -82,21 +82,21 @@ INSERT INTO replication_example(somedata, somenum, zaphod1) VALUES (6, 4, 2); COMMIT; -- show changes -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- hide changes bc of oid visible in full table rewrites CREATE TABLE tr_unique(id2 serial unique NOT NULL, data int); INSERT INTO tr_unique(data) VALUES(10); ALTER TABLE tr_unique RENAME TO tr_pkey; ALTER TABLE tr_pkey ADD COLUMN id serial primary key; -SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT count(data) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); INSERT INTO tr_pkey(data) VALUES(1); --show deletion with primary key DELETE FROM tr_pkey; /* display results */ -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); /* * check that disk spooling works @@ -110,10 +110,22 @@ COMMIT; /* display results, but hide most of the output */ SELECT count(*), min(data), max(data) -FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0') +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') GROUP BY substring(data, 1, 24) ORDER BY 1,2; +-- check updates of primary keys work correctly +BEGIN; +CREATE TABLE spoolme AS SELECT g.i FROM generate_series(1, 5000) g(i); +UPDATE tr_etoomuch SET id = -id WHERE id = 5000; +DELETE FROM spoolme; +DROP TABLE spoolme; +COMMIT; + +SELECT data +FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') +WHERE data ~ 'UPDATE'; + /* * check whether we decode subtransactions correctly in relation with each * other @@ -138,7 +150,7 @@ INSERT INTO tr_sub(path) VALUES ('1-top-2-#1'); RELEASE SAVEPOINT b; COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- check that we handle xlog assignments correctly BEGIN; @@ -167,7 +179,7 @@ RELEASE SAVEPOINT subtop; INSERT INTO tr_sub(path) VALUES ('2-top-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- make sure rollbacked subtransactions aren't decoded BEGIN; @@ -180,7 +192,7 @@ ROLLBACK TO SAVEPOINT b; INSERT INTO tr_sub(path) VALUES ('3-top-2-#2'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- test whether a known, but not yet logged toplevel xact, followed by a -- subxact commit is handled correctly @@ -199,7 +211,7 @@ INSERT INTO tr_sub(path) VALUES ('5-top-1-#1'); COMMIT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); /* @@ -239,7 +251,7 @@ ALTER TABLE replication_metadata SET (user_catalog_table = false); INSERT INTO replication_metadata(relation, options) VALUES ('zaphod', NULL); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); /* * check whether we handle updates/deletes correct with & without a pkey @@ -258,6 +270,10 @@ ALTER TABLE table_without_key REPLICA IDENTITY FULL; UPDATE table_without_key SET data = 3 WHERE data = 2; UPDATE table_without_key SET id = -id; UPDATE table_without_key SET id = -id; +-- ensure that FULL correctly deals with new columns +ALTER TABLE table_without_key ADD COLUMN new_column text; +UPDATE table_without_key SET id = -id; +UPDATE table_without_key SET id = -id, new_column = 'someval'; DELETE FROM table_without_key WHERE data = 3; CREATE TABLE table_with_pkey(id serial primary key, data int); @@ -315,7 +331,7 @@ UPDATE toasttable SET toasted_col1 = (SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i)) WHERE id = 1; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); INSERT INTO toasttable(toasted_col1) SELECT string_agg(g.i::text, '') FROM generate_series(1, 2000) g(i); @@ -327,10 +343,10 @@ WHERE id = 1; -- make sure we decode correctly even if the toast table is gone DROP TABLE toasttable; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); -- done, free logical replication slot -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/decoding_in_xact.sql b/contrib/test_decoding/sql/decoding_in_xact.sql index 2771afee7a4c6..990f61885e7f0 100644 --- a/contrib/test_decoding/sql/decoding_in_xact.sql +++ b/contrib/test_decoding/sql/decoding_in_xact.sql @@ -32,10 +32,10 @@ BEGIN; SELECT txid_current() = 0; -- don't show yet, haven't committed INSERT INTO nobarf(data) VALUES('2'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); COMMIT; INSERT INTO nobarf(data) VALUES('3'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/decoding_into_rel.sql b/contrib/test_decoding/sql/decoding_into_rel.sql new file mode 100644 index 0000000000000..54670fd39e76c --- /dev/null +++ b/contrib/test_decoding/sql/decoding_into_rel.sql @@ -0,0 +1,31 @@ +-- test that we can insert the result of a get_changes call into a +-- logged relation. That's really not a good idea in practical terms, +-- but provides a nice test. + +-- predictability +SET synchronous_commit = on; + +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + +-- slot works +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +-- create some changes +CREATE TABLE somechange(id serial primary key); +INSERT INTO somechange DEFAULT VALUES; + +CREATE TABLE changeresult AS + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +SELECT * FROM changeresult; + +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +INSERT INTO changeresult + SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +SELECT * FROM changeresult; +DROP TABLE changeresult; +DROP TABLE somechange; +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); +SELECT 'stop' FROM pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/permissions.sql b/contrib/test_decoding/sql/permissions.sql index 39d70b56b000c..8680c55771d81 100644 --- a/contrib/test_decoding/sql/permissions.sql +++ b/contrib/test_decoding/sql/permissions.sql @@ -11,7 +11,7 @@ CREATE TABLE lr_test(data text); SET ROLE lr_superuser; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); RESET ROLE; @@ -19,7 +19,7 @@ RESET ROLE; SET ROLE lr_replication; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); RESET ROLE; @@ -27,7 +27,7 @@ RESET ROLE; SET ROLE lr_normal; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); INSERT INTO lr_test VALUES('lr_superuser_init'); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); RESET ROLE; diff --git a/contrib/test_decoding/sql/prepared.sql b/contrib/test_decoding/sql/prepared.sql index 652f3d3f4476f..e72639767ee34 100644 --- a/contrib/test_decoding/sql/prepared.sql +++ b/contrib/test_decoding/sql/prepared.sql @@ -45,6 +45,6 @@ DROP TABLE test_prepared1; DROP TABLE test_prepared2; -- show results -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/rewrite.sql b/contrib/test_decoding/sql/rewrite.sql index 9a3dcbf85791b..8a7329423ded8 100644 --- a/contrib/test_decoding/sql/rewrite.sql +++ b/contrib/test_decoding/sql/rewrite.sql @@ -6,7 +6,7 @@ DROP TABLE IF EXISTS replication_example; SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); CREATE TABLE replication_example(id SERIAL PRIMARY KEY, somedata int, text varchar(120)); INSERT INTO replication_example(somedata) VALUES (1); -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); BEGIN; INSERT INTO replication_example(somedata) VALUES (2); @@ -56,7 +56,7 @@ COMMIT; -- make old files go away CHECKPOINT; -SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); SELECT pg_drop_replication_slot('regression_slot'); DROP TABLE IF EXISTS replication_example; diff --git a/contrib/test_decoding/sql/toast.sql b/contrib/test_decoding/sql/toast.sql index 943db9d2eedcc..a333d99abcefd 100644 --- a/contrib/test_decoding/sql/toast.sql +++ b/contrib/test_decoding/sql/toast.sql @@ -47,5 +47,254 @@ UPDATE toasted_key SET toasted_key = toasted_key || '1'; DELETE FROM toasted_key; -SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0'); +-- Test that HEAP2_MULTI_INSERT insertions with and without toasted +-- columns are handled correctly +CREATE TABLE toasted_copy ( + id int primary key, -- no default, copy didn't use to handle that with multi inserts + data text +); +ALTER TABLE toasted_copy ALTER COLUMN data SET STORAGE EXTERNAL; +\copy toasted_copy FROM STDIN +1 untoasted1 +2 toasted1-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +3 untoasted2 +4 toasted2-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +5 untoasted3 +6 untoasted4 +7 untoasted5 +8 untoasted6 +9 untoasted7 +10 untoasted8 +11 untoasted9 +12 untoasted10 +13 untoasted11 +14 untoasted12 +15 untoasted13 +16 untoasted14 +17 untoasted15 +18 untoasted16 +19 untoasted17 +20 untoasted18 +21 untoasted19 +22 untoasted20 +23 untoasted21 +24 untoasted22 +25 untoasted23 +26 untoasted24 +27 untoasted25 +28 untoasted26 +29 untoasted27 +30 untoasted28 +31 untoasted29 +32 untoasted30 +33 untoasted31 +34 untoasted32 +35 untoasted33 +36 untoasted34 +37 untoasted35 +38 untoasted36 +39 untoasted37 +40 untoasted38 +41 untoasted39 +42 untoasted40 +43 untoasted41 +44 untoasted42 +45 untoasted43 +46 untoasted44 +47 untoasted45 +48 untoasted46 +49 untoasted47 +50 untoasted48 +51 untoasted49 +52 untoasted50 +53 untoasted51 +54 untoasted52 +55 untoasted53 +56 untoasted54 +57 untoasted55 +58 untoasted56 +59 untoasted57 +60 untoasted58 +61 untoasted59 +62 untoasted60 +63 untoasted61 +64 untoasted62 +65 untoasted63 +66 untoasted64 +67 untoasted65 +68 untoasted66 +69 untoasted67 +70 untoasted68 +71 untoasted69 +72 untoasted70 +73 untoasted71 +74 untoasted72 +75 untoasted73 +76 untoasted74 +77 untoasted75 +78 untoasted76 +79 untoasted77 +80 untoasted78 +81 untoasted79 +82 untoasted80 +83 untoasted81 +84 untoasted82 +85 untoasted83 +86 untoasted84 +87 untoasted85 +88 untoasted86 +89 untoasted87 +90 untoasted88 +91 untoasted89 +92 untoasted90 +93 untoasted91 +94 untoasted92 +95 untoasted93 +96 untoasted94 +97 untoasted95 +98 untoasted96 +99 untoasted97 +100 untoasted98 +101 untoasted99 +102 untoasted100 +103 untoasted101 +104 untoasted102 +105 untoasted103 +106 untoasted104 +107 untoasted105 +108 untoasted106 +109 untoasted107 +110 untoasted108 +111 untoasted109 +112 untoasted110 +113 untoasted111 +114 untoasted112 +115 untoasted113 +116 untoasted114 +117 untoasted115 +118 untoasted116 +119 untoasted117 +120 untoasted118 +121 untoasted119 +122 untoasted120 +123 untoasted121 +124 untoasted122 +125 untoasted123 +126 untoasted124 +127 untoasted125 +128 untoasted126 +129 untoasted127 +130 untoasted128 +131 untoasted129 +132 untoasted130 +133 untoasted131 +134 untoasted132 +135 untoasted133 +136 untoasted134 +137 untoasted135 +138 untoasted136 +139 untoasted137 +140 untoasted138 +141 untoasted139 +142 untoasted140 +143 untoasted141 +144 untoasted142 +145 untoasted143 +146 untoasted144 +147 untoasted145 +148 untoasted146 +149 untoasted147 +150 untoasted148 +151 untoasted149 +152 untoasted150 +153 untoasted151 +154 untoasted152 +155 untoasted153 +156 untoasted154 +157 untoasted155 +158 untoasted156 +159 untoasted157 +160 untoasted158 +161 untoasted159 +162 untoasted160 +163 untoasted161 +164 untoasted162 +165 untoasted163 +166 untoasted164 +167 untoasted165 +168 untoasted166 +169 untoasted167 +170 untoasted168 +171 untoasted169 +172 untoasted170 +173 untoasted171 +174 untoasted172 +175 untoasted173 +176 untoasted174 +177 untoasted175 +178 untoasted176 +179 untoasted177 +180 untoasted178 +181 untoasted179 +182 untoasted180 +183 untoasted181 +184 untoasted182 +185 untoasted183 +186 untoasted184 +187 untoasted185 +188 untoasted186 +189 untoasted187 +190 untoasted188 +191 untoasted189 +192 untoasted190 +193 untoasted191 +194 untoasted192 +195 untoasted193 +196 untoasted194 +197 untoasted195 +198 untoasted196 +199 untoasted197 +200 untoasted198 +201 toasted3-12345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890 +202 untoasted199 +203 untoasted200 +\. +SELECT substr(data, 1, 200) FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +-- test we can decode "old" tuples bigger than the max heap tuple size correctly +DROP TABLE IF EXISTS toasted_several; +CREATE TABLE toasted_several ( + id serial unique not null, + toasted_key text primary key, + toasted_col1 text, + toasted_col2 text +); +ALTER TABLE toasted_several REPLICA IDENTITY FULL; +ALTER TABLE toasted_several ALTER COLUMN toasted_key SET STORAGE EXTERNAL; +ALTER TABLE toasted_several ALTER COLUMN toasted_col1 SET STORAGE EXTERNAL; +ALTER TABLE toasted_several ALTER COLUMN toasted_col2 SET STORAGE EXTERNAL; + +INSERT INTO toasted_several(toasted_key) VALUES(repeat('9876543210', 2000)); + +SELECT regexp_replace(data, '^(.{100}).*(.{100})$', '\1..\2') FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +-- test update of a toasted key without changing it +UPDATE toasted_several SET toasted_col1 = toasted_key; +UPDATE toasted_several SET toasted_col2 = toasted_col1; + +SELECT regexp_replace(data, '^(.{100}).*(.{100})$', '\1..\2') FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +/* + * update with large tuplebuf, in a transaction large enough to force to spool to disk + */ +BEGIN; +INSERT INTO toasted_several(toasted_key) SELECT * FROM generate_series(1, 10234); +UPDATE toasted_several SET toasted_col1 = toasted_col2 WHERE id = 1; +DELETE FROM toasted_several WHERE id = 1; +COMMIT; + +DROP TABLE toasted_several; + +SELECT regexp_replace(data, '^(.{100}).*(.{100})$', '\1..\2') FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1') +WHERE data NOT LIKE '%INSERT: %'; SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/sql/xact.sql b/contrib/test_decoding/sql/xact.sql new file mode 100644 index 0000000000000..9ce238f62dfc0 --- /dev/null +++ b/contrib/test_decoding/sql/xact.sql @@ -0,0 +1,22 @@ +-- predictability +SET synchronous_commit = on; + +SELECT 'init' FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); + +-- bug #13844, xids in non-decoded records need to be inspected +CREATE TABLE xact_test(data text); +INSERT INTO xact_test VALUES ('before-test'); + +BEGIN; +-- perform operation in xact that creates and logs xid, but isn't decoded +SELECT * FROM xact_test FOR UPDATE; +SAVEPOINT foo; +-- and now actually insert in subxact, xid is expected to be known +INSERT INTO xact_test VALUES ('after-assignment'); +COMMIT; +-- and now show those changes +SELECT data FROM pg_logical_slot_get_changes('regression_slot', NULL, NULL, 'include-xids', '0', 'skip-empty-xacts', '1'); + +DROP TABLE xact_test; + +SELECT pg_drop_replication_slot('regression_slot'); diff --git a/contrib/test_decoding/test_decoding.c b/contrib/test_decoding/test_decoding.c index 5ce052b5c6140..1028eaa4c3283 100644 --- a/contrib/test_decoding/test_decoding.c +++ b/contrib/test_decoding/test_decoding.c @@ -42,6 +42,8 @@ typedef struct MemoryContext context; bool include_xids; bool include_timestamp; + bool skip_empty_xacts; + bool xact_wrote_changes; } TestDecodingData; static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, @@ -49,6 +51,10 @@ static void pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions * static void pg_decode_shutdown(LogicalDecodingContext *ctx); static void pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn); +static void pg_output_begin(LogicalDecodingContext *ctx, + TestDecodingData *data, + ReorderBufferTXN *txn, + bool last_write); static void pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, XLogRecPtr commit_lsn); static void pg_decode_change(LogicalDecodingContext *ctx, @@ -83,7 +89,7 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, ListCell *option; TestDecodingData *data; - data = palloc(sizeof(TestDecodingData)); + data = palloc0(sizeof(TestDecodingData)); data->context = AllocSetContextCreate(ctx->context, "text conversion context", ALLOCSET_DEFAULT_MINSIZE, @@ -91,6 +97,7 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, ALLOCSET_DEFAULT_MAXSIZE); data->include_xids = true; data->include_timestamp = false; + data->skip_empty_xacts = false; ctx->output_plugin_private = data; @@ -138,6 +145,17 @@ pg_decode_startup(LogicalDecodingContext *ctx, OutputPluginOptions *opt, if (force_binary) opt->output_type = OUTPUT_PLUGIN_BINARY_OUTPUT; } + else if (strcmp(elem->defname, "skip-empty-xacts") == 0) + { + + if (elem->arg == NULL) + data->skip_empty_xacts = true; + else if (!parse_bool(strVal(elem->arg), &data->skip_empty_xacts)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("could not parse value \"%s\" for parameter \"%s\"", + strVal(elem->arg), elem->defname))); + } else { ereport(ERROR, @@ -165,12 +183,22 @@ pg_decode_begin_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn) { TestDecodingData *data = ctx->output_plugin_private; - OutputPluginPrepareWrite(ctx, true); + data->xact_wrote_changes = false; + if (data->skip_empty_xacts) + return; + + pg_output_begin(ctx, data, txn, true); +} + +static void +pg_output_begin(LogicalDecodingContext *ctx, TestDecodingData *data, ReorderBufferTXN *txn, bool last_write) +{ + OutputPluginPrepareWrite(ctx, last_write); if (data->include_xids) appendStringInfo(ctx->out, "BEGIN %u", txn->xid); else appendStringInfoString(ctx->out, "BEGIN"); - OutputPluginWrite(ctx, true); + OutputPluginWrite(ctx, last_write); } /* COMMIT callback */ @@ -180,6 +208,9 @@ pg_decode_commit_txn(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, { TestDecodingData *data = ctx->output_plugin_private; + if (data->skip_empty_xacts && !data->xact_wrote_changes) + return; + OutputPluginPrepareWrite(ctx, true); if (data->include_xids) appendStringInfo(ctx->out, "COMMIT %u", txn->xid); @@ -287,7 +318,7 @@ tuple_to_stringinfo(StringInfo s, TupleDesc tupdesc, HeapTuple tuple, bool skip_ typid = attr->atttypid; /* get Datum from tuple */ - origval = fastgetattr(tuple, natt + 1, tupdesc, &isnull); + origval = heap_getattr(tuple, natt + 1, tupdesc, &isnull); if (isnull && skip_nulls) continue; @@ -339,6 +370,14 @@ pg_decode_change(LogicalDecodingContext *ctx, ReorderBufferTXN *txn, MemoryContext old; data = ctx->output_plugin_private; + + /* output BEGIN if we haven't yet */ + if (data->skip_empty_xacts && !data->xact_wrote_changes) + { + pg_output_begin(ctx, data, txn, false); + } + data->xact_wrote_changes = true; + class_form = RelationGetForm(relation); tupdesc = RelationGetDescr(relation); diff --git a/contrib/test_parser/test_parser--unpackaged--1.0.sql b/contrib/test_parser/test_parser--unpackaged--1.0.sql index 34120f2346bc7..62458bd2c6878 100644 --- a/contrib/test_parser/test_parser--unpackaged--1.0.sql +++ b/contrib/test_parser/test_parser--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/test_parser/test_parser--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION test_parser" to load this file. \quit +\echo Use "CREATE EXTENSION test_parser FROM unpackaged" to load this file. \quit ALTER EXTENSION test_parser ADD function testprs_start(internal,integer); ALTER EXTENSION test_parser ADD function testprs_getlexeme(internal,internal,internal); diff --git a/contrib/tsearch2/tsearch2--unpackaged--1.0.sql b/contrib/tsearch2/tsearch2--unpackaged--1.0.sql index af970a4862ff0..e123297132501 100644 --- a/contrib/tsearch2/tsearch2--unpackaged--1.0.sql +++ b/contrib/tsearch2/tsearch2--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/tsearch2/tsearch2--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION tsearch2" to load this file. \quit +\echo Use "CREATE EXTENSION tsearch2 FROM unpackaged" to load this file. \quit ALTER EXTENSION tsearch2 ADD type @extschema@.tsvector; ALTER EXTENSION tsearch2 ADD type @extschema@.tsquery; diff --git a/contrib/unaccent/unaccent--unpackaged--1.0.sql b/contrib/unaccent/unaccent--unpackaged--1.0.sql index abd06983ac4ca..f3fb5d87600a5 100644 --- a/contrib/unaccent/unaccent--unpackaged--1.0.sql +++ b/contrib/unaccent/unaccent--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/unaccent/unaccent--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION unaccent" to load this file. \quit +\echo Use "CREATE EXTENSION unaccent FROM unpackaged" to load this file. \quit ALTER EXTENSION unaccent ADD function unaccent(regdictionary,text); ALTER EXTENSION unaccent ADD function unaccent(text); diff --git a/contrib/unaccent/unaccent.c b/contrib/unaccent/unaccent.c index a337df61af4f5..f2dc76bf16906 100644 --- a/contrib/unaccent/unaccent.c +++ b/contrib/unaccent/unaccent.c @@ -15,6 +15,7 @@ #include "catalog/namespace.h" #include "commands/defrem.h" +#include "lib/stringinfo.h" #include "tsearch/ts_cache.h" #include "tsearch/ts_locale.h" #include "tsearch/ts_public.h" @@ -263,46 +264,48 @@ unaccent_lexize(PG_FUNCTION_ARGS) TrieChar *rootTrie = (TrieChar *) PG_GETARG_POINTER(0); char *srcchar = (char *) PG_GETARG_POINTER(1); int32 len = PG_GETARG_INT32(2); - char *srcstart, - *trgchar = NULL; - int charlen; - TSLexeme *res = NULL; - TrieChar *node; + char *srcstart = srcchar; + TSLexeme *res; + StringInfoData buf; + + /* we allocate storage for the buffer only if needed */ + buf.data = NULL; - srcstart = srcchar; while (srcchar - srcstart < len) { + TrieChar *node; + int charlen; + charlen = pg_mblen(srcchar); node = findReplaceTo(rootTrie, (unsigned char *) srcchar, charlen); if (node && node->replaceTo) { - if (!res) + if (buf.data == NULL) { - /* allocate res only if it's needed */ - res = palloc0(sizeof(TSLexeme) * 2); - res->lexeme = trgchar = palloc(len * pg_database_encoding_max_length() + 1 /* \0 */ ); - res->flags = TSL_FILTER; + /* initialize buffer */ + initStringInfo(&buf); + /* insert any data we already skipped over */ if (srcchar != srcstart) - { - memcpy(trgchar, srcstart, srcchar - srcstart); - trgchar += (srcchar - srcstart); - } + appendBinaryStringInfo(&buf, srcstart, srcchar - srcstart); } - memcpy(trgchar, node->replaceTo, node->replacelen); - trgchar += node->replacelen; - } - else if (res) - { - memcpy(trgchar, srcchar, charlen); - trgchar += charlen; + appendBinaryStringInfo(&buf, node->replaceTo, node->replacelen); } + else if (buf.data != NULL) + appendBinaryStringInfo(&buf, srcchar, charlen); srcchar += charlen; } - if (res) - *trgchar = '\0'; + /* return a result only if we made at least one substitution */ + if (buf.data != NULL) + { + res = (TSLexeme *) palloc0(sizeof(TSLexeme) * 2); + res->lexeme = buf.data; + res->flags = TSL_FILTER; + } + else + res = NULL; PG_RETURN_POINTER(res); } diff --git a/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql b/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql index 5776b6f93001e..444c5c7ceffc7 100644 --- a/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql +++ b/contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/uuid-ossp/uuid-ossp--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use '''CREATE EXTENSION "uuid-ossp"''' to load this file. \quit +\echo Use '''CREATE EXTENSION "uuid-ossp" FROM unpackaged''' to load this file. \quit ALTER EXTENSION "uuid-ossp" ADD function uuid_nil(); ALTER EXTENSION "uuid-ossp" ADD function uuid_ns_dns(); diff --git a/contrib/xml2/xml2--unpackaged--1.0.sql b/contrib/xml2/xml2--unpackaged--1.0.sql index b02dabffc2a03..8badef3079bbf 100644 --- a/contrib/xml2/xml2--unpackaged--1.0.sql +++ b/contrib/xml2/xml2--unpackaged--1.0.sql @@ -1,7 +1,7 @@ /* contrib/xml2/xml2--unpackaged--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION -\echo Use "CREATE EXTENSION xml2" to load this file. \quit +\echo Use "CREATE EXTENSION xml2 FROM unpackaged" to load this file. \quit ALTER EXTENSION xml2 ADD function xslt_process(text,text); ALTER EXTENSION xml2 ADD function xslt_process(text,text,text); diff --git a/contrib/xml2/xslt_proc.c b/contrib/xml2/xslt_proc.c index 9f1378733228f..343924e99177e 100644 --- a/contrib/xml2/xslt_proc.c +++ b/contrib/xml2/xslt_proc.c @@ -146,16 +146,16 @@ xslt_process(PG_FUNCTION_ARGS) } PG_CATCH(); { - if (stylesheet != NULL) - xsltFreeStylesheet(stylesheet); if (restree != NULL) xmlFreeDoc(restree); - if (doctree != NULL) - xmlFreeDoc(doctree); - if (xslt_sec_prefs != NULL) - xsltFreeSecurityPrefs(xslt_sec_prefs); if (xslt_ctxt != NULL) xsltFreeTransformContext(xslt_ctxt); + if (xslt_sec_prefs != NULL) + xsltFreeSecurityPrefs(xslt_sec_prefs); + if (stylesheet != NULL) + xsltFreeStylesheet(stylesheet); + if (doctree != NULL) + xmlFreeDoc(doctree); xsltCleanupGlobals(); pg_xml_done(xmlerrcxt, true); @@ -164,11 +164,11 @@ xslt_process(PG_FUNCTION_ARGS) } PG_END_TRY(); - xsltFreeStylesheet(stylesheet); xmlFreeDoc(restree); - xmlFreeDoc(doctree); - xsltFreeSecurityPrefs(xslt_sec_prefs); xsltFreeTransformContext(xslt_ctxt); + xsltFreeSecurityPrefs(xslt_sec_prefs); + xsltFreeStylesheet(stylesheet); + xmlFreeDoc(doctree); xsltCleanupGlobals(); pg_xml_done(xmlerrcxt, false); diff --git a/doc/bug.template b/doc/bug.template index 4e29e6e3295d9..73c931787a3e7 100644 --- a/doc/bug.template +++ b/doc/bug.template @@ -27,7 +27,7 @@ System Configuration: Operating System (example: Linux 2.4.18) : - PostgreSQL version (example: PostgreSQL 9.4beta1): PostgreSQL 9.4beta1 + PostgreSQL version (example: PostgreSQL 9.4.6): PostgreSQL 9.4.6 Compiler used (example: gcc 3.3.5) : diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml index 9ea10682a5670..5236b882aebc7 100644 --- a/doc/src/sgml/array.sgml +++ b/doc/src/sgml/array.sgml @@ -346,7 +346,7 @@ SELECT array_length(schedule, 1) FROM sal_emp WHERE name = 'Carol'; SELECT cardinality(schedule) FROM sal_emp WHERE name = 'Carol'; - cardinality + cardinality ------------- 4 (1 row) @@ -494,11 +494,7 @@ SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]); array_prepend, array_append, or array_cat. The first two only support one-dimensional arrays, but array_cat supports multidimensional arrays. - - Note that the concatenation operator discussed above is preferred over - direct use of these functions. In fact, these functions primarily exist for use - in implementing the concatenation operator. However, they might be directly - useful in the creation of user-defined aggregates. Some examples: + Some examples: SELECT array_prepend(1, ARRAY[2,3]); @@ -531,6 +527,45 @@ SELECT array_cat(ARRAY[5,6], ARRAY[[1,2],[3,4]]); {{5,6},{1,2},{3,4}} + + + In simple cases, the concatenation operator discussed above is preferred + over direct use of these functions. However, because the concatenation + operator is overloaded to serve all three cases, there are situations where + use of one of the functions is helpful to avoid ambiguity. For example + consider: + + +SELECT ARRAY[1, 2] || '{3, 4}'; -- the untyped literal is taken as an array + ?column? +----------- + {1,2,3,4} + +SELECT ARRAY[1, 2] || '7'; -- so is this one +ERROR: malformed array literal: "7" + +SELECT ARRAY[1, 2] || NULL; -- so is an undecorated NULL + ?column? +---------- + {1,2} +(1 row) + +SELECT array_append(ARRAY[1, 2], NULL); -- this might have been meant + array_append +-------------- + {1,2,NULL} + + + In the examples above, the parser sees an integer array on one side of the + concatenation operator, and a constant of undetermined type on the other. + The heuristic it uses to resolve the constant's type is to assume it's of + the same type as the operator's other input — in this case, + integer array. So the concatenation operator is presumed to + represent array_cat, not array_append. When + that's the wrong choice, it could be fixed by casting the constant to the + array's element type; but explicit use of array_append might + be a preferable solution. + diff --git a/doc/src/sgml/auto-explain.sgml b/doc/src/sgml/auto-explain.sgml index 334996b0e668b..d527208271f00 100644 --- a/doc/src/sgml/auto-explain.sgml +++ b/doc/src/sgml/auto-explain.sgml @@ -81,6 +81,8 @@ LOAD 'auto_explain'; When this parameter is on, per-plan-node timing occurs for all statements executed, whether or not they run long enough to actually get logged. This can have an extremely negative impact on performance. + Turning off auto_explain.log_timing ameliorates the + performance cost, at the price of obtaining less information. @@ -88,16 +90,19 @@ LOAD 'auto_explain'; - auto_explain.log_verbose (boolean) + auto_explain.log_buffers (boolean) - auto_explain.log_verbose configuration parameter + auto_explain.log_buffers configuration parameter - auto_explain.log_verbose causes EXPLAIN VERBOSE - output, rather than just EXPLAIN output, to be printed - when an execution plan is logged. This parameter is off by default. + auto_explain.log_buffers controls whether buffer + usage statistics are printed when an execution plan is logged; it's + equivalent to the BUFFERS option of EXPLAIN. + This parameter has no effect + unless auto_explain.log_analyze is enabled. + This parameter is off by default. Only superusers can change this setting. @@ -105,19 +110,24 @@ LOAD 'auto_explain'; - auto_explain.log_buffers (boolean) + auto_explain.log_timing (boolean) - auto_explain.log_buffers configuration parameter + auto_explain.log_timing configuration parameter - auto_explain.log_buffers causes EXPLAIN - (ANALYZE, BUFFERS) output, rather than just EXPLAIN - output, to be printed when an execution plan is logged. This parameter is - off by default. Only superusers can change this setting. This - parameter has no effect unless auto_explain.log_analyze - parameter is set. + auto_explain.log_timing controls whether per-node + timing information is printed when an execution plan is logged; it's + equivalent to the TIMING option of EXPLAIN. + The overhead of repeatedly reading the system clock can slow down + queries significantly on some systems, so it may be useful to set this + parameter to off when only actual row counts, and not exact times, are + needed. + This parameter has no effect + unless auto_explain.log_analyze is enabled. + This parameter is on by default. + Only superusers can change this setting. @@ -133,26 +143,27 @@ LOAD 'auto_explain'; auto_explain.log_triggers causes trigger execution statistics to be included when an execution plan is logged. - This parameter is off by default. Only superusers can change this - setting. This parameter has no effect unless - auto_explain.log_analyze parameter is set. + This parameter has no effect + unless auto_explain.log_analyze is enabled. + This parameter is off by default. + Only superusers can change this setting. - auto_explain.log_format (enum) + auto_explain.log_verbose (boolean) - auto_explain.log_format configuration parameter + auto_explain.log_verbose configuration parameter - auto_explain.log_format selects the - EXPLAIN output format to be used. - The allowed values are text, xml, - json, and yaml. The default is text. + auto_explain.log_verbose controls whether verbose + details are printed when an execution plan is logged; it's + equivalent to the VERBOSE option of EXPLAIN. + This parameter is off by default. Only superusers can change this setting. @@ -160,25 +171,22 @@ LOAD 'auto_explain'; - auto_explain.log_timing (boolean) + auto_explain.log_format (enum) - auto_explain.log_timing configuration parameter + auto_explain.log_format configuration parameter - auto_explain.log_timing causes EXPLAIN - (ANALYZE, TIMING off) output, rather than just EXPLAIN (ANALYZE) - output. The overhead of repeatedly reading the system clock can slow down the - query significantly on some systems, so it may be useful to set this - parameter to off when only actual row counts, and not exact times, are needed. - This parameter is only effective when auto_explain.log_analyze - is also enabled. This parameter is on by default. + auto_explain.log_format selects the + EXPLAIN output format to be used. + The allowed values are text, xml, + json, and yaml. The default is text. Only superusers can change this setting. - + auto_explain.log_nested_statements (boolean) @@ -198,7 +206,9 @@ LOAD 'auto_explain'; - These parameters must be set in postgresql.conf. + In ordinary usage, these parameters are set + in postgresql.conf, although superusers can alter them + on-the-fly within their own sessions. Typical usage might be: @@ -216,6 +226,7 @@ auto_explain.log_min_duration = '3s' postgres=# LOAD 'auto_explain'; postgres=# SET auto_explain.log_min_duration = 0; +postgres=# SET auto_explain.log_analyze = true; postgres=# SELECT count(*) FROM pg_class, pg_index WHERE oid = indrelid AND indisunique; diff --git a/doc/src/sgml/backup.sgml b/doc/src/sgml/backup.sgml index 06f064e1a6f7d..07ca0dc62d6c0 100644 --- a/doc/src/sgml/backup.sgml +++ b/doc/src/sgml/backup.sgml @@ -28,7 +28,7 @@ <acronym>SQL</> Dump - The idea behind this dump method is to generate a text file with SQL + The idea behind this dump method is to generate a file with SQL commands that, when fed back to the server, will recreate the database in the same state as it was at the time of the dump. PostgreSQL provides the utility program @@ -39,6 +39,9 @@ pg_dump dbname > As you see, pg_dump writes its result to the standard output. We will see below how this can be useful. + While the above command creates a text file, pg_dump + can create files in other formats that allow for parallism and more + fine-grained control of object restoration. @@ -98,20 +101,11 @@ pg_dump dbname > ALTER TABLE.) - - - If your database schema relies on OIDs (for instance, as foreign - keys) you must instruct pg_dump to dump the OIDs - as well. To do this, use the command-line - option. - - - Restoring the Dump - The text files created by pg_dump are intended to + Text files created by pg_dump are intended to be read in by the psql program. The general command form to restore a dump is @@ -127,6 +121,8 @@ psql dbname < pg_dump for specifying the database server to connect to and the user name to use. See the reference page for more information. + Non-text file dumps are restored using the utility. @@ -225,7 +221,14 @@ psql -f infile postgres roles, tablespaces, and empty databases, then invoking pg_dump for each database. This means that while each database will be internally consistent, the snapshots of - different databases might not be exactly in-sync. + different databases are not sychronized. + + + + Cluster-wide data can be dumped alone using the + pg_dumpall diff --git a/doc/src/sgml/bgworker.sgml b/doc/src/sgml/bgworker.sgml index d3c8ddb382fbb..8e218ac0406a7 100644 --- a/doc/src/sgml/bgworker.sgml +++ b/doc/src/sgml/bgworker.sgml @@ -69,7 +69,7 @@ typedef struct BackgroundWorker - bgw_flags is a bitwise-or'd bitmask indicating the + bgw_flags is a bitwise-or'd bit mask indicating the capabilities that the module wants. Possible values are BGWORKER_SHMEM_ACCESS (requesting shared memory access) and BGWORKER_BACKEND_DATABASE_CONNECTION (requesting the @@ -114,14 +114,14 @@ typedef struct BackgroundWorker passed at registration time. bgw_main may be NULL; in that case, bgw_library_name and bgw_function_name will be used to determine - the entrypoint. This is useful for background workers launched after + the entry point. This is useful for background workers launched after postmaster startup, where the postmaster does not have the requisite library loaded. bgw_library_name is the name of a library in - which the initial entrypoint for the background worker should be sought. + which the initial entry point for the background worker should be sought. It is ignored unless bgw_main is NULL. But if bgw_main is NULL, then the named library will be dynamically loaded by the worker process and @@ -131,7 +131,7 @@ typedef struct BackgroundWorker bgw_function_name is the name of a function in - a dynamically loaded library which should be used as the initial entrypoint + a dynamically loaded library which should be used as the initial entry point for a new background worker. It is ignored unless bgw_main is NULL. @@ -192,7 +192,7 @@ typedef struct BackgroundWorker opaque handle that can subsequently be passed to GetBackgroundWorkerPid(BackgroundWorkerHandle *, pid_t *) or TerminateBackgroundWorker(BackgroundWorkerHandle *). - GetBackgroundWorker can be used to poll the status of the + GetBackgroundWorkerPid can be used to poll the status of the worker: a return value of BGWH_NOT_YET_STARTED indicates that the worker has not yet been started by the postmaster; BGWH_STOPPED indicates that it has been started but is diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index b4a06e48348f4..7aeab6c34436a 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -408,13 +408,13 @@ aggfinalextra bool - True to pass extra dummy arguments to aggfinalfn + True to pass extra dummy arguments to aggfinalfn aggmfinalextra bool - True to pass extra dummy arguments to aggmfinalfn + True to pass extra dummy arguments to aggmfinalfn aggsortop @@ -838,7 +838,7 @@ amopsortfamily oid pg_opfamily.oid - The btree operator family this entry sorts according to, if an + The B-tree operator family this entry sorts according to, if an ordering operator; zero if a search operator @@ -853,7 +853,7 @@ indexed_column operator constant. - Obviously, such an operator must return boolean, and its left-hand input + Obviously, such an operator must return boolean, and its left-hand input type must match the index's column data type. @@ -868,13 +868,13 @@ its left-hand input type must match the index's column data type. The exact semantics of the ORDER BY are specified by the amopsortfamily column, which must reference - a btree operator family for the operator's result type. + a B-tree operator family for the operator's result type. At present, it's assumed that the sort order for an ordering operator - is the default for the referenced opfamily, i.e., ASC NULLS + is the default for the referenced operator family, i.e., ASC NULLS LAST. This might someday be relaxed by adding additional columns to specify sort options explicitly. @@ -974,7 +974,7 @@ these match the input data type(s) of the support procedure itself, for others not. There is a notion of default support procedures for an index, which are those with amproclefttype and - amprocrighttype both equal to the index opclass's + amprocrighttype both equal to the index operator class's opcintype. @@ -1959,7 +1959,7 @@ d = default (primary key, if any), n = nothing, f = all columns - i = index with indisreplident set, or default + i = index with indisreplident set, or default @@ -1981,11 +1981,11 @@ xid - All multitransaction IDs before this one have been replaced by a + All multixact IDs before this one have been replaced by a transaction ID in this table. This is used to track - whether the table needs to be vacuumed in order to prevent multitransaction ID - ID wraparound or to allow pg_clog to be shrunk. Zero - (InvalidTransactionId) if the relation is not a table. + whether the table needs to be vacuumed in order to prevent multixact ID + wraparound or to allow pg_multixact to be shrunk. Zero + (InvalidMultiXactId) if the relation is not a table. @@ -2649,10 +2649,10 @@ xid - All multitransaction IDs before this one have been replaced with a + All multixact IDs before this one have been replaced with a transaction ID in this database. This is used to track whether the database needs to be vacuumed in order to prevent - transaction ID wraparound or to allow pg_clog to be shrunk. + multixact ID wraparound or to allow pg_multixact to be shrunk. It is the minimum of the per-table pg_class.relminmxid values. @@ -5261,7 +5261,7 @@ plugin name - The basename of the shared object containing the output plugin this logical slot is using, or null for physical slots. + The base name of the shared object containing the output plugin this logical slot is using, or null for physical slots. @@ -5275,7 +5275,7 @@ datoid oid pg_database.oid - The oid of the database this slot is associated with, or + The OID of the database this slot is associated with, or null. Only logical slots have an associated database. diff --git a/doc/src/sgml/charset.sgml b/doc/src/sgml/charset.sgml index 1bbd2f4415bf6..f8c7ac3b1694c 100644 --- a/doc/src/sgml/charset.sgml +++ b/doc/src/sgml/charset.sgml @@ -694,7 +694,7 @@ SELECT a COLLATE "C" < b COLLATE "POSIX" FROM test1; National Standard Chinese No - 1-2 + 1-4 diff --git a/doc/src/sgml/citext.sgml b/doc/src/sgml/citext.sgml index 0c6855fea62b5..7fdf30252a587 100644 --- a/doc/src/sgml/citext.sgml +++ b/doc/src/sgml/citext.sgml @@ -124,6 +124,11 @@ SELECT * FROM users WHERE nick = 'Larry'; + + + regexp_matches() + + regexp_replace() diff --git a/doc/src/sgml/client-auth.sgml b/doc/src/sgml/client-auth.sgml index 0064302a8bb3a..e7c80b912b777 100644 --- a/doc/src/sgml/client-auth.sgml +++ b/doc/src/sgml/client-auth.sgml @@ -229,14 +229,15 @@ hostnossl database user address - Specifies the client machine addresses that this record + Specifies the client machine address(es) that this record matches. This field can contain either a host name, an IP address range, or one of the special key words mentioned below. - An IP address is specified in standard dotted decimal - notation with a CIDR mask length. The mask + An IP address range is specified using standard numeric notation + for the range's starting address, then a slash (/) + and a CIDR mask length. The mask length indicates the number of high-order bits of the client IP address that must match. Bits to the right of this should be zero in the given IP address. @@ -245,25 +246,27 @@ hostnossl database user - Typical examples of an IP address range specified this way are + Typical examples of an IPv4 address range specified this way are 172.20.143.89/32 for a single host, or 172.20.143.0/24 for a small network, or 10.6.0.0/16 for a larger one. + An IPv6 address range might look like ::1/128 + for a single host (in this case the IPv6 loopback address) or + fe80::7a31:c1ff:0000:0000/96 for a small + network. 0.0.0.0/0 represents all - IPv4 addresses, and ::/0 represents + IPv4 addresses, and ::0/0 represents all IPv6 addresses. - To specify a single host, use a CIDR mask of 32 for IPv4 or + To specify a single host, use a mask length of 32 for IPv4 or 128 for IPv6. In a network address, do not omit trailing zeroes. - An IP address given in IPv4 format will match IPv6 connections that - have the corresponding address, for example 127.0.0.1 - will match the IPv6 address ::ffff:127.0.0.1. An entry - given in IPv6 format will match only IPv6 connections, even if the - represented address is in the IPv4-in-IPv6 range. Note that entries - in IPv6 format will be rejected if the system's C library does not have - support for IPv6 addresses. + An entry given in IPv4 format will match only IPv4 connections, + and an entry given in IPv6 format will match only IPv6 connections, + even if the represented address is in the IPv4-in-IPv6 range. + Note that entries in IPv6 format will be rejected if the system's + C library does not have support for IPv6 addresses. @@ -275,7 +278,7 @@ hostnossl database user If a host name is specified (anything that is not an IP address - or a special key word is treated as a host name), + range or a special key word is treated as a host name), that name is compared with the result of a reverse name resolution of the client's IP address (e.g., reverse DNS lookup, if DNS is used). Host name comparisons are case @@ -354,8 +357,9 @@ hostnossl database user IP-mask - These fields can be used as an alternative to the - CIDR-address notation. Instead of + These two fields can be used as an alternative to the + IP-address/mask-length + notation. Instead of specifying the mask length, the actual mask is specified in a separate column. For example, 255.0.0.0 represents an IPv4 CIDR mask length of 8, and 255.255.255.255 represents a @@ -996,7 +1000,12 @@ omicron bryanh guest1 If set to 1, the realm name from the authenticated user principal is included in the system user name that's passed through user name mapping (). This is - useful for handling users from multiple realms. + the recommended configuration as, otherwise, it is impossible to + differentiate users with the same username who are from different + realms. The default for this parameter is 0 (meaning to not include + the realm in the system user name) but may change to 1 in a future + version of PostgreSQL. Users can set it + explicitly to avoid any issues when upgrading. @@ -1006,12 +1015,16 @@ omicron bryanh guest1 Allows for mapping between system and database user names. See - for details. For a Kerberos - principal username/hostbased@EXAMPLE.COM, the - user name used for mapping is username/hostbased - if include_realm is disabled, and - username/hostbased@EXAMPLE.COM if - include_realm is enabled. + for details. For a GSSAPI/Kerberos + principal, such as username@EXAMPLE.COM (or, less + commonly, username/hostbased@EXAMPLE.COM), the + default user name used for mapping is + username (or username/hostbased, + respectively), unless include_realm has been set to + 1 (as recommended, see above), in which case + username@EXAMPLE.COM (or + username/hostbased@EXAMPLE.COM) + is what is seen as the system username when mapping. @@ -1069,7 +1082,12 @@ omicron bryanh guest1 If set to 1, the realm name from the authenticated user principal is included in the system user name that's passed through user name mapping (). This is - useful for handling users from multiple realms. + the recommended configuration as, otherwise, it is impossible to + differentiate users with the same username who are from different + realms. The default for this parameter is 0 (meaning to not include + the realm in the system user name) but may change to 1 in a future + version of PostgreSQL. Users can set it + explicitly to avoid any issues when upgrading. @@ -1079,7 +1097,16 @@ omicron bryanh guest1 Allows for mapping between system and database user names. See - for details. + for details. For a SSPI/Kerberos + principal, such as username@EXAMPLE.COM (or, less + commonly, username/hostbased@EXAMPLE.COM), the + default user name used for mapping is + username (or username/hostbased, + respectively), unless include_realm has been set to + 1 (as recommended, see above), in which case + username@EXAMPLE.COM (or + username/hostbased@EXAMPLE.COM) + is what is seen as the system username when mapping. @@ -1215,7 +1242,7 @@ omicron bryanh guest1 socket parameter, or similar mechanisms. Currently that includes Linux, most flavors of BSD including - Mac OS X, + OS X, and Solaris. @@ -1434,7 +1461,7 @@ host ... ldap ldapserver=ldap.example.net ldapbasedn="dc=example, dc=net" ldapse Here is the same search+bind configuration written as a URL: -host ... ldap lapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" +host ... ldap ldapurl="ldap://ldap.example.net/dc=example,dc=net?uid?sub" Some other software that supports authentication against LDAP uses the same URL format, so it will be easier to share the configuration. diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 697cf99de54e1..871b04a94b0db 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -10,8 +10,8 @@ There are many configuration parameters that affect the behavior of - the database system. In the first section of this chapter, we - describe how to set configuration parameters. The subsequent sections + the database system. In the first section of this chapter we + describe how to interact with configuration parameters. The subsequent sections discuss each parameter in detail. @@ -23,47 +23,104 @@ All parameter names are case-insensitive. Every parameter takes a - value of one of five types: Boolean, integer, floating point, - string or enum. Boolean values can be written as on, - off, true, - false, yes, - no, 1, 0 - (all case-insensitive) or any unambiguous prefix of these. + value of one of five types: boolean, string, integer, floating point, + or enumerated (enum). The type determines the syntax for setting the + parameter: - - Some settings specify a memory or time value. Each of these has an - implicit unit, which is either kilobytes, blocks (typically eight - kilobytes), milliseconds, seconds, or minutes. Default units can be - found by referencing pg_settings.unit. - For convenience, - a different unit can also be specified explicitly. Valid memory units - are kB (kilobytes), MB - (megabytes), GB (gigabytes), and TB (terabytes); valid time units - are ms (milliseconds), s - (seconds), min (minutes), h - (hours), and d (days). Note that the multiplier - for memory units is 1024, not 1000. - + + + + Boolean: + Values can be written as + on, + off, + true, + false, + yes, + no, + 1, + 0 + (all case-insensitive) or any unambiguous prefix of one of these. + + - - Parameters of type enum are specified in the same way as string - parameters, but are restricted to a limited set of values. The allowed - values can be found - from pg_settings.enumvals. - Enum parameter values are case-insensitive. - + + + String: + In general, enclose the value in single quotes, doubling any single + quotes within the value. Quotes can usually be omitted if the value + is a simple number or identifier, however. + + + + + + Numeric (integer and floating point): + A decimal point is permitted only for floating-point parameters. + Do not use thousands separators. Quotes are not required. + + + + + + Numeric with Unit: + Some numeric parameters have an implicit unit, because they describe + quantities of memory or time. The unit might be kilobytes, blocks + (typically eight kilobytes), milliseconds, seconds, or minutes. + An unadorned numeric value for one of these settings will use the + setting's default unit, which can be learned from + pg_settings.unit. + For convenience, settings can be given with a unit specified explicitly, + for example '120 ms' for a time value, and they will be + converted to whatever the parameter's actual unit is. Note that the + value must be written as a string (with quotes) to use this feature. + The unit name is case-sensitive, and there can be whitespace between + the numeric value and the unit. + + + + + Valid memory units are kB (kilobytes), + MB (megabytes), GB + (gigabytes), and TB (terabytes). + The multiplier for memory units is 1024, not 1000. + + + + + + Valid time units are ms (milliseconds), + s (seconds), min (minutes), + h (hours), and d (days). + + + + + + + + + Enumerated: + Enumerated-type parameters are written in the same way as string + parameters, but are restricted to have one of a limited set of + values. The values allowable for such a parameter can be found from + pg_settings.enumvals. + Enum parameter values are case-insensitive. + + + - Setting Parameters via the Configuration File + Parameter Interaction via the Configuration File - One way to set these parameters is to edit the file + The most fundamental way to set these parameters is to edit the file postgresql.confpostgresql.conf, - which is normally kept in the data directory. (A default copy is - installed there when the database cluster directory is - initialized.) An example of what this file might look like is: + which is normally kept in the data directory. A default copy is + installed when the database cluster directory is initialized. + An example of what this file might look like is: # This is a comment log_connections = yes @@ -72,11 +129,20 @@ search_path = '"$user", public' shared_buffers = 128MB One parameter is specified per line. The equal sign between name and - value is optional. Whitespace is insignificant and blank lines are - ignored. Hash marks (#) designate the remainder of the - line as a comment. Parameter values that are not simple identifiers or - numbers must be single-quoted. To embed a single quote in a parameter - value, write either two quotes (preferred) or backslash-quote. + value is optional. Whitespace is insignificant (except within a quoted + parameter value) and blank lines are + ignored. Hash marks (#) designate the remainder + of the line as a comment. Parameter values that are not simple + identifiers or numbers must be single-quoted. To embed a single + quote in a parameter value, write either two quotes (preferred) + or backslash-quote. + + + + Parameters set in this way provide default values for the cluster. + The settings seen by active sessions will be these values unless they + are overridden. The following sections describe ways in which the + administrator or user can override these defaults. @@ -84,137 +150,209 @@ shared_buffers = 128MB SIGHUP The configuration file is reread whenever the main server process - receives a SIGHUP signal; this is most easily done by - running pg_ctl reload from the command-line or by calling - the SQL function pg_reload_conf(). The main - server process - also propagates this signal to all currently running server - processes so that existing sessions also get the new - value. Alternatively, you can send the signal to a single server - process directly. Some parameters can only be set at server start; - any changes to their entries in the configuration file will be ignored - until the server is restarted. Invalid parameter settings in the - configuration file are likewise ignored (but logged) during - SIGHUP processing. + receives a SIGHUP signal; this signal is most easily + sent by running pg_ctl reload from the command line or by + calling the SQL function pg_reload_conf(). The main + server process also propagates this signal to all currently running + server processes, so that existing sessions also adopt the new values + (this will happen after they complete any currently-executing client + command). Alternatively, you can + send the signal to a single server process directly. Some parameters + can only be set at server start; any changes to their entries in the + configuration file will be ignored until the server is restarted. + Invalid parameter settings in the configuration file are likewise + ignored (but logged) during SIGHUP processing. - - - - Other Ways to Set Parameters - A second way to set these configuration parameters is to give them - as a command-line option to the postgres command, - such as: - -postgres -c log_connections=yes -c log_destination='syslog' - - Command-line options override any conflicting settings in - postgresql.conf. Note that this means you won't - be able to change the value on-the-fly by editing - postgresql.conf, so while the command-line - method might be convenient, it can cost you flexibility later. + In addition to postgresql.conf, + a PostgreSQL data directory contains a file + postgresql.auto.confpostgresql.auto.conf, + which has the same format as postgresql.conf but should + never be edited manually. This file holds settings provided through + the command. This file is automatically + read whenever postgresql.conf is, and its settings take + effect in the same way. Settings in postgresql.auto.conf + override those in postgresql.conf. + - - Occasionally it is useful to give a command line option to - one particular session only. The environment variable - PGOPTIONS can be used for this purpose on the - client side: - -env PGOPTIONS='-c geqo=off' psql - - (This works for any libpq-based client application, not - just psql.) Note that this won't work for - parameters that are fixed when the server is started or that must be - specified in postgresql.conf. - + + Parameter Interaction via SQL - - Furthermore, it is possible to assign a set of parameter settings to - a user or a database. Whenever a session is started, the default - settings for the user and database involved are loaded. The - commands - and , - respectively, are used to configure these settings. Per-database - settings override anything received from the - postgres command-line or the configuration - file, and in turn are overridden by per-user settings; both are - overridden by per-session settings. + + PostgreSQL provides three SQL + commands to establish configuration defaults. + The already-mentioned command + provides a SQL-accessible means of changing global defaults; it is + functionally equivalent to editing postgresql.conf. + In addition, there are two commands that allow setting of defaults + on a per-database or per-role basis: + + + + + + The command allows global + settings to be overridden on a per-database basis. + + + + + + The command allows both global and + per-database settings to be overridden with user-specific values. + + + + + + Values set with ALTER DATABASE and ALTER ROLE + are applied only when starting a fresh database session. They + override values obtained from the configuration files or server + command line, and constitute defaults for the rest of the session. + Note that some settings cannot be changed after server start, and + so cannot be set with these commands (or the ones listed below). - - Some parameters can be changed in individual SQL - sessions with the - command, for example: - -SET ENABLE_SEQSCAN TO OFF; - - If SET is allowed, it overrides all other sources of - values for the parameter. Some parameters cannot be changed via - SET: for example, if they control behavior that - cannot be changed without restarting the entire - PostgreSQL server. Also, some parameters - require superuser permission to change via SET or - ALTER. + + Once a client is connected to the database, PostgreSQL + provides two additional SQL commands (and equivalent functions) to + interact with session-local configuration settings: + + + + The command allows inspection of the + current value of all parameters. The corresponding function is + current_setting(setting_name text). + + + + + + The command allows modification of the + current value of those parameters that can be set locally to a + session; it has no effect on other sessions. + The corresponding function is + set_config(setting_name, new_value, is_local). + + + + - Another way to change configuration parameters persistently is by - use of - command, for example: - -ALTER SYSTEM SET checkpoint_timeout TO 600; - - This command will allow users to change values persistently - through SQL command. The values will be effective after reload of server configuration - (SIGHUP) or server startup. The effect of this command is similar to when - user manually changes values in postgresql.conf. + In addition, the system view pg_settings can be + used to view and change session-local values: + + + + + Querying this view is similar to using SHOW ALL but + provides more detail. It is also more flexible, since it's possible + to specify filter conditions or join against other relations. + + + + + + Using on this view, specifically + updating the setting column, is the equivalent + of issuing SET commands. For example, the equivalent of + +SET configuration_parameter TO DEFAULT; + + is: + +UPDATE pg_settings SET setting = reset_val WHERE name = 'configuration_parameter'; + + + + + - - Examining Parameter Settings + + Parameter Interaction via the Shell - - The - command allows inspection of the current values of all parameters. - + + In addition to setting global defaults or attaching + overrides at the database or role level, you can pass settings to + PostgreSQL via shell facilities. + Both the server and libpq client library + accept parameter values via the shell. + - - The virtual table pg_settings also allows - displaying and updating session run-time parameters; see for details and a description of the - different variable types and when they can be changed. - pg_settings is equivalent to SHOW - and SET, but can be more convenient - to use because it can be joined with other tables, or selected from using - any desired selection condition. It also contains more information about - each parameter than is available from SHOW. - + + + + During server startup, parameter settings can be + passed to the postgres command via the + + + + + + When starting a client session via libpq, + parameter settings can be + specified using the PGOPTIONS environment variable. + Settings established in this way constitute defaults for the life + of the session, but do not affect other sessions. + For historical reasons, the format of PGOPTIONS is + similar to that used when launching the postgres + command; specifically, the + + + Other clients and libraries might provide their own mechanisms, + via the shell or otherwise, that allow the user to alter session + settings without direct use of SQL commands. + + + - Configuration File Includes + Managing Configuration File Contents + + + PostgreSQL provides several features for breaking + down complex postgresql.conf files into sub-files. + These features are especially useful when managing multiple servers + with related, but not identical, configurations. + include in configuration file - - In addition to parameter settings, the postgresql.conf - file can contain include directives, which specify - another file to read and process as if it were inserted into the - configuration file at this point. This feature allows a configuration - file to be divided into physically separate parts. - Include directives simply look like: + + In addition to individual parameter settings, + the postgresql.conf file can contain include + directives, which specify another file to read and process as if + it were inserted into the configuration file at this point. This + feature allows a configuration file to be divided into physically + separate parts. Include directives simply look like: include 'filename' - If the file name is not an absolute path, it is taken as relative to - the directory containing the referencing configuration file. - Inclusions can be nested. + If the file name is not an absolute path, it is taken as relative to + the directory containing the referencing configuration file. + Inclusions can be nested. @@ -222,12 +360,12 @@ include 'filename' include_if_exists in configuration file - There is also an include_if_exists directive, which acts - the same as the include directive, except for the behavior - when the referenced file does not exist or cannot be read. A regular - include will consider this an error condition, but - include_if_exists merely logs a message and continues - processing the referencing configuration file. + There is also an include_if_exists directive, which acts + the same as the include directive, except + when the referenced file does not exist or cannot be read. A regular + include will consider this an error condition, but + include_if_exists merely logs a message and continues + processing the referencing configuration file. @@ -235,79 +373,83 @@ include 'filename' include_dir in configuration file - The postgresql.conf file can also contain - include_dir directives, which specify an entire directory - of configuration files to include. It is used similarly: - - include_dir 'directory' - - Non-absolute directory names follow the same rules as single file include - directives: they are relative to the directory containing the referencing - configuration file. Within that directory, only non-directory files whose - names end with the suffix .conf will be included. File - names that start with the . character are also excluded, - to prevent mistakes as they are hidden on some platforms. Multiple files - within an include directory are processed in file name order. The file names - are ordered by C locale rules, i.e. numbers before letters, and uppercase - letters before lowercase ones. + The postgresql.conf file can also contain + include_dir directives, which specify an entire + directory of configuration files to include. These look like + +include_dir 'directory' + + Non-absolute directory names are taken as relative to the directory + containing the referencing configuration file. Within the specified + directory, only non-directory files whose names end with the + suffix .conf will be included. File names that + start with the . character are also ignored, to + prevent mistakes since such files are hidden on some platforms. Multiple + files within an include directory are processed in file name order + (according to C locale rules, i.e. numbers before letters, and + uppercase letters before lowercase ones). - Include files or directories can be used to logically separate portions - of the database configuration, rather than having a single large - postgresql.conf file. Consider a company that has two - database servers, each with a different amount of memory. There are likely - elements of the configuration both will share, for things such as logging. - But memory-related parameters on the server will vary between the two. And - there might be server specific customizations, too. One way to manage this - situation is to break the custom configuration changes for your site into - three files. You could add this to the end of your - postgresql.conf file to include them: - - include 'shared.conf' - include 'memory.conf' - include 'server.conf' - - All systems would have the same shared.conf. Each server - with a particular amount of memory could share the same - memory.conf; you might have one for all servers with 8GB of RAM, - another for those having 16GB. And finally server.conf could - have truly server-specific configuration information in it. + Include files or directories can be used to logically separate portions + of the database configuration, rather than having a single large + postgresql.conf file. Consider a company that has two + database servers, each with a different amount of memory. There are + likely elements of the configuration both will share, for things such + as logging. But memory-related parameters on the server will vary + between the two. And there might be server specific customizations, + too. One way to manage this situation is to break the custom + configuration changes for your site into three files. You could add + this to the end of your postgresql.conf file to include + them: + +include 'shared.conf' +include 'memory.conf' +include 'server.conf' + + All systems would have the same shared.conf. Each + server with a particular amount of memory could share the + same memory.conf; you might have one for all servers + with 8GB of RAM, another for those having 16GB. And + finally server.conf could have truly server-specific + configuration information in it. - Another possibility is to create a configuration file directory and - put this information into files there. For example, a conf.d - directory could be referenced at the end ofpostgresql.conf: - - include_dir 'conf.d' - - Then you could name the files in the conf.d directory like this: - - 00shared.conf - 01memory.conf - 02server.conf - - This shows a clear order in which these files will be loaded. This is - important because only the last setting encountered when the server is - reading its configuration will be used. Something set in - conf.d/02server.conf in this example would override a value - set in conf.d/01memory.conf. + Another possibility is to create a configuration file directory and + put this information into files there. For example, a conf.d + directory could be referenced at the end of postgresql.conf: + +include_dir 'conf.d' + + Then you could name the files in the conf.d directory + like this: + +00shared.conf +01memory.conf +02server.conf + + This naming convention establishes a clear order in which these + files will be loaded. This is important because only the last + setting encountered for a particular parameter while the server is + reading configuration files will be used. In this example, + something set in conf.d/02server.conf would override a + value set in conf.d/01memory.conf. - You might instead use this configuration directory approach while naming - these files more descriptively: - - 00shared.conf - 01memory-8GB.conf - 02server-foo.conf - - This sort of arrangement gives a unique name for each configuration file - variation. This can help eliminate ambiguity when several servers have - their configurations all stored in one place, such as in a version - control repository. (Storing database configuration files under version - control is another good practice to consider). + You might instead use this approach to naming the files + descriptively: + +00shared.conf +01memory-8GB.conf +02server-foo.conf + + This sort of arrangement gives a unique name for each configuration file + variation. This can help eliminate ambiguity when several servers have + their configurations all stored in one place, such as in a version + control repository. (Storing database configuration files under version + control is another good practice to consider.) @@ -898,7 +1040,7 @@ include 'filename' cryptanalysis when large amounts of traffic can be examined, but it also carries a large performance penalty. The sum of sent and received traffic is used to check the limit. If this parameter is set to 0, - renegotiation is disabled. The default is 512MB. + renegotiation is disabled. The default is 0. @@ -910,6 +1052,14 @@ include 'filename' disabled. + + + + Due to bugs in OpenSSL enabling ssl renegotiation, by + configuring a non-zero ssl_renegotiation_limit, is likely + to lead to problems like long-lived connections breaking. + + @@ -2489,7 +2639,7 @@ include 'filename' Specifies the maximum number of replication slots - (see that the server + (see ) that the server can support. The default is zero. This parameter can only be set at server start. wal_level must be set @@ -2603,7 +2753,7 @@ include 'filename' The name of a standby server for this purpose is the application_name setting of the standby, as set in the - primary_conninfo of the standby's walreceiver. There is + primary_conninfo of the standby's WAL receiver. There is no mechanism to enforce uniqueness. In case of duplicates one of the matching standbys will be chosen to be the synchronous standby, though exactly which one is indeterminate. @@ -2778,7 +2928,7 @@ include 'filename' - + hot_standby_feedback (boolean) hot_standby_feedback configuration parameter @@ -3667,6 +3817,7 @@ local0.* /var/log/postgresql cluster data directory. This parameter can only be set in the postgresql.conf file or on the server command line. + The default is pg_log. @@ -3693,6 +3844,7 @@ local0.* /var/log/postgresql specification. Note that the system's strftime is not used directly, so platform-specific (nonstandard) extensions do not work. + The default is postgresql-%Y-%m-%d_%H%M%S.log. If you specify a file name without escapes, you should plan to @@ -3709,8 +3861,6 @@ local0.* /var/log/postgresql log file name to create the file name for CSV-format output. (If log_filename ends in .log, the suffix is replaced instead.) - In the case of the example above, the CSV - file name will be server_log.1093827753.csv. This parameter can only be set in the postgresql.conf @@ -5807,9 +5957,9 @@ SET XML OPTION { DOCUMENT | CONTENT }; Sets the collection of time zone abbreviations that will be accepted by the server for datetime input. The default is 'Default', which is a collection that works in most of the world; there are - also 'Australia' and 'India', and other collections can be defined - for a particular installation. See for more information. + also 'Australia' and 'India', + and other collections can be defined for a particular installation. + See for more information. diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 828ed9e0927b0..2b043b0d6388c 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -1073,14 +1073,15 @@ SELECT '52093.89'::money::numeric::float8; stored and displayed that way. However, trailing spaces are treated as semantically insignificant and disregarded when comparing two values of type character. In collations where whitespace - is significant, this behavior can produce unexpected results, - e.g. SELECT 'a '::CHAR(2) collate "C" < - 'a\n'::CHAR(2) returns true. + is significant, this behavior can produce unexpected results; + for example SELECT 'a '::CHAR(2) collate "C" < + E'a\n'::CHAR(2) returns true, even though C + locale would consider a space to be greater than a newline. Trailing spaces are removed when converting a character value to one of the other string types. Note that trailing spaces are semantically significant in character varying and text values, and - when using pattern matching, e.g. LIKE, + when using pattern matching, that is LIKE and regular expressions. @@ -2321,11 +2322,11 @@ January 8 04:05:06 1999 PST Time zones, and time-zone conventions, are influenced by political decisions, not just earth geometry. Time zones around the - world became somewhat standardized during the 1900's, + world became somewhat standardized during the 1900s, but continue to be prone to arbitrary changes, particularly with respect to daylight-savings rules. PostgreSQL uses the widely-used - zoneinfo (Olson) time zone database for information about + IANA (Olson) time zone database for information about historical time zone rules. For times in the future, the assumption is that the latest known rules for a given time zone will continue to be observed indefinitely far into the future. @@ -2390,8 +2391,8 @@ January 8 04:05:06 1999 PST The recognized time zone names are listed in the pg_timezone_names view (see ). - PostgreSQL uses the widely-used - zoneinfo time zone data for this purpose, so the same + PostgreSQL uses the widely-used IANA + time zone data for this purpose, so the same time zone names are also recognized by much other software. @@ -2427,7 +2428,7 @@ January 8 04:05:06 1999 PST When a daylight-savings zone abbreviation is present, it is assumed to be used according to the same daylight-savings transition rules used in the - zoneinfo time zone database's posixrules entry. + IANA time zone database's posixrules entry. In a standard PostgreSQL installation, posixrules is the same as US/Eastern, so that POSIX-style time zone specifications follow USA daylight-savings @@ -2438,9 +2439,25 @@ January 8 04:05:06 1999 PST In short, this is the difference between abbreviations - and full names: abbreviations always represent a fixed offset from - UTC, whereas most of the full names imply a local daylight-savings time - rule, and so have two possible UTC offsets. + and full names: abbreviations represent a specific offset from UTC, + whereas many of the full names imply a local daylight-savings time + rule, and so have two possible UTC offsets. As an example, + 2014-06-04 12:00 America/New_York represents noon local + time in New York, which for this particular date was Eastern Daylight + Time (UTC-4). So 2014-06-04 12:00 EDT specifies that + same time instant. But 2014-06-04 12:00 EST specifies + noon Eastern Standard Time (UTC-5), regardless of whether daylight + savings was nominally in effect on that date. + + + + To complicate matters, some jurisdictions have used the same timezone + abbreviation to mean different UTC offsets at different times; for + example, in Moscow MSK has meant UTC+3 in some years and + UTC+4 in others. PostgreSQL interprets such + abbreviations according to whatever they meant (or had most recently + meant) on the specified date; but, as with the EST example + above, this is not necessarily the same as local civil time on that date. @@ -2457,13 +2474,14 @@ January 8 04:05:06 1999 PST - In all cases, timezone names are recognized case-insensitively. - (This is a change from PostgreSQL versions - prior to 8.2, which were case-sensitive in some contexts but not others.) + In all cases, timezone names and abbreviations are recognized + case-insensitively. (This is a change from PostgreSQL + versions prior to 8.2, which were case-sensitive in some contexts but + not others.) - Neither full names nor abbreviations are hard-wired into the server; + Neither timezone names nor abbreviations are hard-wired into the server; they are obtained from configuration files stored under .../share/timezone/ and .../share/timezonesets/ of the installation directory @@ -3163,9 +3181,10 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - Lines (line) are represented by the linear equation Ax + By - + C = 0, where A and B are not both zero. Values of - type line is input and output in the following form: + Lines are represented by the linear + equation Ax + By + C = 0, + where A and B are not both zero. Values + of type line are input and output in the following form: { A, B, C } @@ -3183,7 +3202,7 @@ SELECT person.name, holidays.num_weeks FROM person, holidays (x1,y1) and (x2,y2) - are two (different) points on the line. + are two different points on the line. @@ -3199,9 +3218,9 @@ SELECT person.name, holidays.num_weeks FROM person, holidays - Line segments (lseg) are represented by pairs of points. - Values of type lseg are specified using any of the following - syntaxes: + Line segments are represented by pairs of points that are the endpoints + of the segment. Values of type lseg are specified using any + of the following syntaxes: [ ( x1 , y1 ) , ( x2 , y2 ) ] @@ -4487,7 +4506,7 @@ SELECT * FROM pg_attribute The pg_lsn data type can be used to store LSN (Log Sequence Number) data which is a pointer to a location in the XLOG. This type is a - representation of XLogRecPtr and an internal system type of + representation of XLogRecPtr and an internal system type of PostgreSQL. @@ -4496,7 +4515,7 @@ SELECT * FROM pg_attribute the write-ahead log stream. It is printed as two hexadecimal numbers of up to 8 digits each, separated by a slash; for example, 16/B374D848. The pg_lsn type supports the - standard comparison operators, like = and + standard comparison operators, like = and >. Two LSNs can be subtracted using the - operator; the result is the number of bytes separating those write-ahead log positions. @@ -4542,6 +4561,10 @@ SELECT * FROM pg_attribute trigger + + event_trigger + + language_handler @@ -4646,7 +4669,7 @@ SELECT * FROM pg_attribute record - Identifies a function returning an unspecified row type. + Identifies a function taking or returning an unspecified row type. @@ -4654,6 +4677,11 @@ SELECT * FROM pg_attribute A trigger function is declared to return trigger. + + event_trigger + An event trigger function is declared to return event_trigger. + + void Indicates that a function returns no value. @@ -4676,10 +4704,11 @@ SELECT * FROM pg_attribute Functions coded in procedural languages can use pseudo-types only as - allowed by their implementation languages. At present the procedural - languages all forbid use of a pseudo-type as argument type, and allow + allowed by their implementation languages. At present most procedural + languages forbid use of a pseudo-type as an argument type, and allow only void and record as a result type (plus - trigger when the function is used as a trigger). Some also + trigger or event_trigger when the function is used + as a trigger or event trigger). Some also support polymorphic functions using the types anyelement, anyarray, anynonarray, anyenum, and anyrange. diff --git a/doc/src/sgml/datetime.sgml b/doc/src/sgml/datetime.sgml index 444b0ec2b93a9..ffd0715128255 100644 --- a/doc/src/sgml/datetime.sgml +++ b/doc/src/sgml/datetime.sgml @@ -374,22 +374,27 @@ these formats: -time_zone_name offset -time_zone_name offset D +zone_abbreviation offset +zone_abbreviation offset D +zone_abbreviation time_zone_name @INCLUDE file_name @OVERRIDE - A time_zone_name is just the abbreviation - being defined. The offset is the zone's + A zone_abbreviation is just the abbreviation + being defined. The offset is the equivalent offset in seconds from UTC, positive being east from Greenwich and negative being west. For example, -18000 would be five hours west of Greenwich, or North American east coast standard time. D - indicates that the zone name represents local daylight-savings time - rather than standard time. Since all known time zone offsets are on - 15 minute boundaries, the number of seconds has to be a multiple of 900. + indicates that the zone name represents local daylight-savings time rather + than standard time. Alternatively, a time_zone_name can + be given, in which case that time zone definition is consulted, and the + abbreviation's meaning in that zone is used. This alternative is + recommended only for abbreviations whose meaning has historically varied, + as looking up the meaning is noticeably more expensive than just using + a fixed integer value. @@ -400,9 +405,9 @@ The @OVERRIDE syntax indicates that subsequent entries in the - file can override previous entries (i.e., entries obtained from included - files). Without this, conflicting definitions of the same timezone - abbreviation are considered an error. + file can override previous entries (typically, entries obtained from + included files). Without this, conflicting definitions of the same + timezone abbreviation are considered an error. @@ -410,14 +415,14 @@ all the non-conflicting time zone abbreviations for most of the world. Additional files Australia and India are provided for those regions: these files first include the - Default file and then add or modify timezones as needed. + Default file and then add or modify abbreviations as needed. For reference purposes, a standard installation also contains files Africa.txt, America.txt, etc, containing information about every time zone abbreviation known to be in use - according to the zoneinfo timezone database. The zone name + according to the IANA timezone database. The zone name definitions found in these files can be copied and pasted into a custom configuration file as needed. Note that these files cannot be directly referenced as timezone_abbreviations settings, because of @@ -426,9 +431,9 @@ - If an error occurs while reading the time zone data sets, no new value is - applied but the old set is kept. If the error occurs while starting the - database, startup fails. + If an error occurs while reading the time zone abbreviation set, no new + value is applied and the old set is kept. If the error occurs while + starting the database, startup fails. diff --git a/doc/src/sgml/dblink.sgml b/doc/src/sgml/dblink.sgml index b07ac48c005b4..a33a4f7cf85bf 100644 --- a/doc/src/sgml/dblink.sgml +++ b/doc/src/sgml/dblink.sgml @@ -69,7 +69,7 @@ dblink_connect(text connname, text connstr) returns text - conname + connname The name to use for this connection; if omitted, an unnamed @@ -276,7 +276,7 @@ dblink_disconnect(text connname) returns text - conname + connname The name of a named connection to be closed. @@ -359,7 +359,7 @@ dblink(text sql [, bool fail_on_error]) returns setof record - conname + connname Name of the connection to use; omit this parameter to use the @@ -577,7 +577,7 @@ dblink_exec(text sql [, bool fail_on_error]) returns text - conname + connname Name of the connection to use; omit this parameter to use the @@ -706,7 +706,7 @@ dblink_open(text connname, text cursorname, text sql [, bool fail_on_error]) ret - conname + connname Name of the connection to use; omit this parameter to use the @@ -829,7 +829,7 @@ dblink_fetch(text connname, text cursorname, int howmany [, bool fail_on_error]) - conname + connname Name of the connection to use; omit this parameter to use the @@ -982,7 +982,7 @@ dblink_close(text connname, text cursorname [, bool fail_on_error]) returns text - conname + connname Name of the connection to use; omit this parameter to use the @@ -1137,7 +1137,7 @@ dblink_error_message(text connname) returns text - conname + connname Name of the connection to use. @@ -1210,7 +1210,7 @@ dblink_send_query(text connname, text sql) returns int - conname + connname Name of the connection to use. @@ -1281,7 +1281,7 @@ dblink_is_busy(text connname) returns int - conname + connname Name of the connection to check. @@ -1350,7 +1350,7 @@ dblink_get_notify(text connname) returns setof (notify_name text, be_pid int, ex - conname + connname The name of a named connection to get notifications on. @@ -1429,7 +1429,7 @@ dblink_get_result(text connname [, bool fail_on_error]) returns setof record - conname + connname Name of the connection to use. @@ -1596,7 +1596,7 @@ dblink_cancel_query(text connname) returns text - conname + connname Name of the connection to use. diff --git a/doc/src/sgml/ddl.sgml b/doc/src/sgml/ddl.sgml index 8ace8bd3a2537..3123192c70b3c 100644 --- a/doc/src/sgml/ddl.sgml +++ b/doc/src/sgml/ddl.sgml @@ -495,8 +495,8 @@ CREATE TABLE products ( - Unique constraints ensure that the data contained in a column or a - group of columns is unique with respect to all the rows in the + Unique constraints ensure that the data contained in a column, or a + group of columns, is unique among all the rows in the table. The syntax is: CREATE TABLE products ( @@ -518,8 +518,8 @@ CREATE TABLE products ( - If a unique constraint refers to a group of columns, the columns - are listed separated by commas: + To define a unique constraint for a group of columns, write it as a + table constraint with the column names separated by commas: CREATE TABLE example ( a integer, @@ -545,8 +545,11 @@ CREATE TABLE products ( - Adding a unique constraint will automatically create a unique btree - index on the column or group of columns used in the constraint. + Adding a unique constraint will automatically create a unique B-tree + index on the column or group of columns listed in the constraint. + A uniqueness restriction covering only some rows cannot be written as + a unique constraint, but it is possible to enforce such a restriction by + creating a unique partial index. @@ -555,10 +558,10 @@ CREATE TABLE products ( - In general, a unique constraint is violated when there is more than + In general, a unique constraint is violated if there is more than one row in the table where the values of all of the columns included in the constraint are equal. - However, two null values are not considered equal in this + However, two null values are never considered equal in this comparison. That means even in the presence of a unique constraint it is possible to store duplicate rows that contain a null value in at least one of the constrained @@ -582,8 +585,9 @@ CREATE TABLE products ( - Technically, a primary key constraint is simply a combination of a - unique constraint and a not-null constraint. So, the following + A primary key constraint indicates that a column, or group of columns, + can be used as a unique identifier for rows in the table. This + requires that the values be both unique and not null. So, the following two table definitions accept the same data: CREATE TABLE products ( @@ -603,7 +607,7 @@ CREATE TABLE products ( - Primary keys can also constrain more than one column; the syntax + Primary keys can span more than one column; the syntax is similar to unique constraints: CREATE TABLE example ( @@ -616,31 +620,31 @@ CREATE TABLE example ( - A primary key indicates that a column or group of columns can be - used as a unique identifier for rows in the table. (This is a - direct consequence of the definition of a primary key. Note that - a unique constraint does not, by itself, provide a unique identifier - because it does not exclude null values.) This is useful both for - documentation purposes and for client applications. For example, - a GUI application that allows modifying row values probably needs - to know the primary key of a table to be able to identify rows - uniquely. - - - - Adding a primary key will automatically create a unique btree index - on the column or group of columns used in the primary key. + Adding a primary key will automatically create a unique B-tree index + on the column or group of columns listed in the primary key, and will + force the column(s) to be marked NOT NULL. A table can have at most one primary key. (There can be any number - of unique and not-null constraints, which are functionally the same - thing, but only one can be identified as the primary key.) + of unique and not-null constraints, which are functionally almost the + same thing, but only one can be identified as the primary key.) Relational database theory dictates that every table must have a primary key. This rule is not enforced by PostgreSQL, but it is usually best to follow it. + + + Primary keys are useful both for + documentation purposes and for client applications. For example, + a GUI application that allows modifying row values probably needs + to know the primary key of a table to be able to identify rows + uniquely. There are also various ways in which the database system + makes use of a primary key if one has been declared; for example, + the primary key defines the default target column(s) for foreign keys + referencing its table. + diff --git a/doc/src/sgml/dfunc.sgml b/doc/src/sgml/dfunc.sgml index 3d90a36e5278b..71aa55b2747c2 100644 --- a/doc/src/sgml/dfunc.sgml +++ b/doc/src/sgml/dfunc.sgml @@ -127,8 +127,8 @@ cc -shared -o foo.so foo.o - Mac OS X - Mac OS Xshared library + OS X + OS Xshared library diff --git a/doc/src/sgml/docguide.sgml b/doc/src/sgml/docguide.sgml index 816375f3e3e59..943ab5a0ef220 100644 --- a/doc/src/sgml/docguide.sgml +++ b/doc/src/sgml/docguide.sgml @@ -279,7 +279,7 @@ apt-get install docbook docbook-dsssl docbook-xsl openjade1.3 opensp xsltproc - Mac OS X + OS X If you use MacPorts, the following will get you set up: diff --git a/doc/src/sgml/earthdistance.sgml b/doc/src/sgml/earthdistance.sgml index ef869c5bc32d4..6dedc4a5f4993 100644 --- a/doc/src/sgml/earthdistance.sgml +++ b/doc/src/sgml/earthdistance.sgml @@ -19,7 +19,7 @@ In this module, the Earth is assumed to be perfectly spherical. (If that's too inaccurate for you, you might want to look at the - PostGIS + PostGIS project.) diff --git a/doc/src/sgml/ecpg.sgml b/doc/src/sgml/ecpg.sgml index f9536ee1e49fe..47857890e9c33 100644 --- a/doc/src/sgml/ecpg.sgml +++ b/doc/src/sgml/ecpg.sgml @@ -1377,10 +1377,13 @@ EXEC SQL END DECLARE SECTION; Arrays - SQL-level arrays are not directly supported in ECPG. It is not - possible to simply map an SQL array into a C array host variable. - This will result in undefined behavior. Some workarounds exist, - however. + Multi-dimensional SQL-level arrays are not directly supported in ECPG. + One-dimensional SQL-level arrays can be mapped into C array host + variables and vice-versa. However, when creating a statement ecpg does + not know the types of the columns, so that it cannot check if a C array + is input into a corresponding SQL-level array. When processing the + output of a SQL statement, ecpg has the necessary information and thus + checks if both are arrays. diff --git a/doc/src/sgml/extend.sgml b/doc/src/sgml/extend.sgml index 3ee6ba2f67948..4a883814d65df 100644 --- a/doc/src/sgml/extend.sgml +++ b/doc/src/sgml/extend.sgml @@ -721,6 +721,17 @@ SELECT pg_catalog.pg_extension_config_dump('my_config', 'WHERE NOT standard_entr a table as no longer a configuration table is to dissociate it from the extension with ALTER EXTENSION ... DROP TABLE. + + + Note that foreign key relationships between these tables will dictate the + order in which the tables are dumped out by pg_dump. Specifically, pg_dump + will attempt to dump the referenced-by table before the referencing table. + As the foreign key relationships are set up at CREATE EXTENSION time (prior + to data being loaded into the tables) circular dependencies are not + supported. When circular dependencies exist, the data will still be dumped + out but the dump will not be able to be restored directly and user + intervention will be required. + @@ -1167,12 +1178,12 @@ include $(PGXS) This procedure is also called a VPATHVPATH build. Here's how: - - mkdir build_dir - cd build_dir - make -f /path/to/extension/source/tree/Makefile - make -f /path/to/extension/source/tree/Makefile install - + +mkdir build_dir +cd build_dir +make -f /path/to/extension/source/tree/Makefile +make -f /path/to/extension/source/tree/Makefile install + @@ -1180,11 +1191,11 @@ include $(PGXS) way to how it is done for the core code. One way to do this is using the core script config/prep_buildtree. Once this has been done you can build by setting the make variable - USE_VPATH like this: - - make USE_VPATH=/path/to/extension/source/tree - make USE_VPATH=/path/to/extension/source/tree install - + VPATH like this: + +make VPATH=/path/to/extension/source/tree +make VPATH=/path/to/extension/source/tree install + This procedure can work with a greater variety of directory layouts. diff --git a/doc/src/sgml/external-projects.sgml b/doc/src/sgml/external-projects.sgml index b616eb06515ca..7d26477123dac 100644 --- a/doc/src/sgml/external-projects.sgml +++ b/doc/src/sgml/external-projects.sgml @@ -86,7 +86,7 @@ Npgsql .NET .NET data provider - http://npgsql.projects.postgresql.org/ + http://www.npgsql.org/ @@ -100,7 +100,7 @@ psqlODBC ODBC ODBC driver - http://psqlodbc.projects.postgresql.org/ + https://odbc.postgresql.org/ @@ -172,7 +172,7 @@ PL/Java Java - http://pljava.projects.postgresql.org/ + https://github.com/tada/pljava @@ -208,7 +208,7 @@ PL/sh Unix shell - http://plsh.projects.postgresql.org/ + https://github.com/petere/plsh @@ -231,7 +231,7 @@ contains several extensions, which are described in . Other extensions are developed independently, like PostGIS. Even + url="http://postgis.net/">PostGIS. Even PostgreSQL replication solutions can be developed externally. For example, Slony-I is a popular diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml index 6b5c8b7e97b3a..9069cebb9003b 100644 --- a/doc/src/sgml/fdwhandler.sgml +++ b/doc/src/sgml/fdwhandler.sgml @@ -353,7 +353,7 @@ PlanForeignModify (PlannerInfo *root, plan is the ModifyTable plan node, which is complete except for the fdwPrivLists field. resultRelation identifies the target foreign table by its - rangetable index. subplan_index identifies which target of + range table index. subplan_index identifies which target of the ModifyTable plan node this is, counting from zero; use this if you want to index into plan->plans or other substructure of the plan node. @@ -430,7 +430,7 @@ ExecForeignInsert (EState *estate, rinfo is the ResultRelInfo struct describing the target foreign table. slot contains the tuple to be inserted; it will match the - rowtype definition of the foreign table. + row-type definition of the foreign table. planSlot contains the tuple that was generated by the ModifyTable plan node's subplan; it differs from slot in possibly containing additional junk @@ -476,7 +476,7 @@ ExecForeignUpdate (EState *estate, rinfo is the ResultRelInfo struct describing the target foreign table. slot contains the new data for the tuple; it will match the - rowtype definition of the foreign table. + row-type definition of the foreign table. planSlot contains the tuple that was generated by the ModifyTable plan node's subplan; it differs from slot in possibly containing additional junk @@ -576,7 +576,7 @@ IsForeignRelUpdatable (Relation rel); Report which update operations the specified foreign table supports. - The return value should be a bitmask of rule event numbers indicating + The return value should be a bit mask of rule event numbers indicating which operations are supported by the foreign table, using the CmdType enumeration; that is, (1 << CMD_UPDATE) = 4 for UPDATE, diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 5c906f36732ce..38280231999b7 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -505,7 +505,7 @@ ^ - exponentiation + exponentiation (associates left to right) 2.0 ^ 3.0 8 @@ -3584,6 +3584,28 @@ cast(-44 as bit(12)) 111111010100 + + + While most regular-expression searches can be executed very quickly, + regular expressions can be contrived that take arbitrary amounts of + time and memory to process. Be wary of accepting regular-expression + search patterns from hostile sources. If you must do so, it is + advisable to impose a statement timeout. + + + + Searches using SIMILAR TO patterns have the same + security hazards, since SIMILAR TO provides many + of the same capabilities as POSIX-style regular + expressions. + + + + LIKE searches, being much simpler than the other + two options, are safer to use with possibly-hostile pattern sources. + + + <function>LIKE</function> @@ -4653,7 +4675,7 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; \e the character whose collating-sequence name is ESC, - or failing that, the character with octal value 033 + or failing that, the character with octal value 033 @@ -4679,15 +4701,17 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; \uwxyz (where wxyz is exactly four hexadecimal digits) - the UTF16 (Unicode, 16-bit) character U+wxyz - in the local byte ordering + the character whose hexadecimal value is + 0xwxyz + \Ustuvwxyz (where stuvwxyz is exactly eight hexadecimal digits) - reserved for a hypothetical Unicode extension to 32 bits + the character whose hexadecimal value is + 0xstuvwxyz @@ -4736,6 +4760,17 @@ SELECT foo FROM regexp_split_to_table('the quick brown fox', E'\\s*') AS foo; Octal digits are 0-7. + + Numeric character-entry escapes specifying values outside the ASCII range + (0-127) have meanings dependent on the database encoding. When the + encoding is UTF-8, escape values are equivalent to Unicode code points, + for example \u1234 means the character U+1234. + For other multibyte encodings, character-entry escapes usually just + specify the concatenation of the byte values for the character. If the + escape value does not correspond to any legal character in the database + encoding, no error will be raised, but it will never match any data. + + The character-entry escapes are always taken as ordinary characters. For example, \135 is ] in ASCII, but @@ -5187,10 +5222,37 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); The quantifiers {1,1} and {1,1}? can be used to force greediness or non-greediness, respectively, on a subexpression or a whole RE. + This is useful when you need the whole RE to have a greediness attribute + different from what's deduced from its elements. As an example, + suppose that we are trying to separate a string containing some digits + into the digits and the parts before and after them. We might try to + do that like this: + +SELECT regexp_matches('abc01234xyz', '(.*)(\d+)(.*)'); +Result: {abc0123,4,xyz} + + That didn't work: the first .* is greedy so + it eats as much as it can, leaving the \d+ to + match at the last possible place, the last digit. We might try to fix + that by making it non-greedy: + +SELECT regexp_matches('abc01234xyz', '(.*?)(\d+)(.*)'); +Result: {abc,0,""} + + That didn't work either, because now the RE as a whole is non-greedy + and so it ends the overall match as soon as possible. We can get what + we want by forcing the RE as a whole to be greedy: + +SELECT regexp_matches('abc01234xyz', '(?:(.*?)(\d+)(.*)){1,1}'); +Result: {abc,01234,xyz} + + Controlling the RE's overall greediness separately from its components' + greediness allows great flexibility in handling variable-length patterns. - Match lengths are measured in characters, not collating elements. + When deciding what is a longer or shorter match, + match lengths are measured in characters, not collating elements. An empty string is considered longer than no match at all. For example: bb* @@ -5529,11 +5591,11 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); Y,YYY - year (4 and more digits) with comma + year (4 or more digits) with comma YYYY - year (4 and more digits) + year (4 or more digits) YYY @@ -5549,19 +5611,19 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); IYYY - ISO year (4 and more digits) + ISO 8601 week-numbering year (4 or more digits) IYY - last 3 digits of ISO year + last 3 digits of ISO 8601 week-numbering year IY - last 2 digits of ISO year + last 2 digits of ISO 8601 week-numbering year I - last digit of ISO year + last digit of ISO 8601 week-numbering year BC, bc, @@ -5631,7 +5693,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); IDDD - ISO day of year (001-371; day 1 of the year is Monday of the first ISO week.) + day of ISO 8601 week-numbering year (001-371; day 1 of the year is Monday of the first ISO week) DD @@ -5639,27 +5701,27 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); D - day of the week, Sunday(1) to Saturday(7) + day of the week, Sunday (1) to Saturday (7) ID - ISO day of the week, Monday(1) to Sunday(7) + ISO 8601 day of the week, Monday (1) to Sunday (7) W - week of month (1-5) (The first week starts on the first day of the month.) + week of month (1-5) (the first week starts on the first day of the month) WW - week number of year (1-53) (The first week starts on the first day of the year.) + week number of year (1-53) (the first week starts on the first day of the year) IW - ISO week number of year (01 - 53; the first Thursday of the new year is in week 1.) + week number of ISO 8601 week-numbering year (01-53; the first Thursday of the year is in week 1) CC - century (2 digits) (The twenty-first century starts on 2001-01-01.) + century (2 digits) (the twenty-first century starts on 2001-01-01) J @@ -5715,7 +5777,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); FM prefix - fill mode (suppress padding blanks and trailing zeroes) + fill mode (suppress leading zeroes and padding blanks) FMMonth @@ -5796,7 +5858,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); Casting does not have this behavior. - + Ordinary text is allowed in to_char @@ -5859,16 +5921,16 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); - An ISO week date (as distinct from a Gregorian date) can be - specified to to_timestamp and + An ISO 8601 week-numbering date (as distinct from a Gregorian date) + can be specified to to_timestamp and to_date in one of two ways: - Year, week, and weekday: for example to_date('2006-42-4', - 'IYYY-IW-ID') returns the date - 2006-10-19. If you omit the weekday it - is assumed to be 1 (Monday). + Year, week number, and weekday: for + example to_date('2006-42-4', 'IYYY-IW-ID') + returns the date 2006-10-19. + If you omit the weekday it is assumed to be 1 (Monday). @@ -5880,13 +5942,25 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); - Attempting to construct a date using a mixture of ISO week and - Gregorian date fields is nonsensical, and will cause an error. In the - context of an ISO year, the concept of a month or day - of month has no meaning. In the context of a Gregorian year, the - ISO week has no meaning. Users should avoid mixing Gregorian and - ISO date specifications. + Attempting to enter a date using a mixture of ISO 8601 week-numbering + fields and Gregorian date fields is nonsensical, and will cause an + error. In the context of an ISO 8601 week-numbering year, the + concept of a month or day of month has no + meaning. In the context of a Gregorian year, the ISO week has no + meaning. + + + While to_date will reject a mixture of + Gregorian and ISO week-numbering date + fields, to_char will not, since output format + specifications like YYYY-MM-DD (IYYY-IDDD) can be + useful. But avoid writing something like IYYY-MM-DD; + that would yield surprising results near the start of the year. + (See for more + information.) + + @@ -6104,7 +6178,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); FM prefix - fill mode (suppress padding blanks and trailing zeroes) + fill mode (suppress leading zeroes and padding blanks) FM9999 @@ -6242,7 +6316,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); to_char(485, 'L999') - 'DM 485 + 'DM 485' to_char(485, 'RN') @@ -6801,7 +6875,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); Create timestamp from year, month, day, hour, minute and seconds fields - make_timestamp(1-23, 7, 15, 8, 15, 23.5) + make_timestamp(2013, 7, 15, 8, 15, 23.5) 2013-07-15 08:15:23.5 @@ -6828,7 +6902,7 @@ SELECT SUBSTRING('XY1234Z', 'Y*?([0-9]{1,3})'); and seconds fields. When timezone is not specified, then current time zone is used. - make_timestamp(1-23, 7, 15, 8, 15, 23.5) + make_timestamptz(2013, 7, 15, 8, 15, 23.5) 2013-07-15 08:15:23.5+01 @@ -7090,8 +7164,8 @@ SELECT EXTRACT(DECADE FROM TIMESTAMP '2001-02-16 20:38:40'); dow - The day of the week as Sunday(0) to - Saturday(6) + The day of the week as Sunday (0) to + Saturday (6) @@ -7173,8 +7247,8 @@ SELECT EXTRACT(HOUR FROM TIMESTAMP '2001-02-16 20:38:40'); isodow - The day of the week as Monday(1) to - Sunday(7) + The day of the week as Monday (1) to + Sunday (7) @@ -7193,7 +7267,8 @@ SELECT EXTRACT(ISODOW FROM TIMESTAMP '2001-02-18 20:38:40'); isoyear - The ISO 8601 year that the date falls in (not applicable to intervals) + The ISO 8601 week-numbering year that the date + falls in (not applicable to intervals) @@ -7204,7 +7279,11 @@ SELECT EXTRACT(ISOYEAR FROM DATE '2006-01-02'); - Each ISO year begins with the Monday of the week containing the 4th of January, so in early January or late December the ISO year may be different from the Gregorian year. See the week field for more information. + Each ISO 8601 week-numbering year begins with the + Monday of the week containing the 4th of January, so in early + January or late December the ISO year may be + different from the Gregorian year. See the week + field for more information. This field is not available in PostgreSQL releases prior to 8.3. @@ -7364,14 +7443,14 @@ SELECT EXTRACT(SECOND FROM TIME '17:12:28.5'); week - The number of the week of the year that the day is in. By definition - (ISO 8601), weeks start on Mondays and the first + The number of the ISO 8601 week-numbering week of + the year. By definition, ISO weeks start on Mondays and the first week of a year contains January 4 of that year. In other words, the first Thursday of a year is in week 1 of that year. - In the ISO definition, it is possible for early-January dates to be - part of the 52nd or 53rd week of the previous year, and for + In the ISO week-numbering system, it is possible for early-January + dates to be part of the 52nd or 53rd week of the previous year, and for late-December dates to be part of the first week of the next year. For example, 2005-01-01 is part of the 53rd week of year 2004, and 2006-01-01 is part of the 52nd week of year @@ -10086,7 +10165,7 @@ table2-mapping shows the operators that - are available for use with the two JSON datatypes (see ). @@ -10152,15 +10231,25 @@ table2-mapping There are parallel variants of these operators for both the - json and jsonb types. The operators + json and jsonb types. + The field/element/path extraction operators return the same type as their left-hand input (either json or jsonb), except for those specified as returning text, which coerce the value to text. + The field/element/path extraction operators return NULL, rather than + failing, if the JSON input does not have the right structure to match + the request; for example if no such element exists. - In addition to those operators common to both types, some additional - operators exist only for jsonb, as shown + The standard comparison operators shown in are available for + jsonb, but not for json. They follow the + ordering rules for B-tree operations outlined at . + + + Some further operators also exist only for jsonb, as shown in . Many of these operators can be indexed by jsonb operator classes. For a full description of @@ -10181,12 +10270,6 @@ table2-mapping - - = - jsonb - Are the two JSON values equal? - '[1,2,3]'::jsonb = '[1,2,3]'::jsonb - @> jsonb @@ -10269,7 +10352,7 @@ table2-mapping (recursively) to arrays and objects; otherwise, if there is a cast from the type to json, the cast function will be used to perform the conversion; otherwise, a JSON scalar value is produced. - For any scalar type other than a number, a boolean, or a null value, + For any scalar type other than a number, a Boolean, or a null value, the text representation will be used, properly quoted and escaped so that it is a valid JSON string. @@ -10317,7 +10400,7 @@ table2-mapping Builds a JSON object out of a variadic argument list. By convention, the argument list consists of alternating - names and values. + keys and values. json_build_object('foo',1,'bar',2) {"foo": 1, "bar": 2} @@ -10329,9 +10412,9 @@ table2-mapping Builds a JSON object out of a text array. The array must have either exactly one dimension with an even number of members, in which case - they are taken as alternating name/value pairs, or two dimensions + they are taken as alternating key/value pairs, or two dimensions such that each inner array has exactly two elements, which - are taken as a name/value pair. + are taken as a key/value pair. json_object('{a, 1, b, "def", c, 3.5}') json_object('{{a, 1},{b, "def"},{c, 3.5}}') @@ -10495,7 +10578,7 @@ table2-mapping -----+------- a | "foo" b | "bar" - + @@ -10514,7 +10597,7 @@ table2-mapping -----+------- a | foo b | bar - + @@ -10524,7 +10607,8 @@ table2-mapping jsonjsonb - Returns JSON value pointed to by path_elems. + Returns JSON value pointed to by path_elems + (equivalent to #> operator). json_extract_path('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}','f4') {"f5":99,"f6":"foo"} @@ -10536,7 +10620,8 @@ table2-mapping text Returns JSON value pointed to by path_elems - as text. + as text + (equivalent to #>> operator). json_extract_path_text('{"f2":{"f3":1},"f4":{"f5":99,"f6":"foo"}}','f4', 'f6') foo @@ -10560,8 +10645,8 @@ table2-mapping - json_populate_record(base anyelement, from_json json, [, use_json_as_text bool=false]) - jsonb_populate_record(base anyelement, from_json jsonb, [, use_json_as_text bool=false]) + json_populate_record(base anyelement, from_json json) + jsonb_populate_record(base anyelement, from_json jsonb) anyelement @@ -10579,8 +10664,8 @@ table2-mapping - json_populate_recordset(base anyelement, from_json json, [, use_json_as_text bool=false]) - jsonb_populate_recordset(base anyelement, from_json jsonb, [, use_json_as_text bool=false]) + json_populate_recordset(base anyelement, from_json json) + jsonb_populate_recordset(base anyelement, from_json jsonb) setof anyelement @@ -10596,7 +10681,7 @@ table2-mapping ---+--- 1 | 2 3 | 4 - + @@ -10653,46 +10738,44 @@ table2-mapping number - json_to_record(json [, nested_as_text bool=false]) - jsonb_to_record(jsonb [, nested_as_text bool=false]) + json_to_record(json) + jsonb_to_record(jsonb) record Builds an arbitrary record from a JSON object (see note below). As with all functions returning record, the caller must explicitly define the structure of the record with an AS - clause. If nested_as_text is true, the function - coerces nested complex elements to text. + clause. - select * from json_to_record('{"a":1,"b":[1,2,3],"c":"bar"}',true) as x(a int, b text, d text) + select * from json_to_record('{"a":1,"b":[1,2,3],"c":"bar"}') as x(a int, b text, d text) - a | b | d + a | b | d ---+---------+--- - 1 | [1,2,3] | - + 1 | [1,2,3] | + - json_to_recordset(json [, nested_as_text bool=false]) - jsonb_to_recordset(jsonb [, nested_as_text bool=false]) + json_to_recordset(json) + jsonb_to_recordset(jsonb) setof record Builds an arbitrary set of records from a JSON array of objects (see note below). As with all functions returning record, the caller must explicitly define the structure of the record with - an AS clause. nested_as_text works as - with json_to_record. + an AS clause. - select * from json_to_recordset('[{"a":1,"b":"foo"},{"a":"2","c":"bar"}]',true) as x(a int, b text); + select * from json_to_recordset('[{"a":1,"b":"foo"},{"a":"2","c":"bar"}]') as x(a int, b text); a | b ---+----- 1 | foo 2 | - + @@ -10715,9 +10798,9 @@ table2-mapping json_to_record and json_to_recordset, type coercion from the JSON is best effort and may not result in desired values for some types. JSON keys are matched to - identical field names in the target row type, and fields that do - not exist in the JSON will simply be NULL. JSON keys that do not - appear in the target row type will be omitted from the output. + identical column names in the target row type. JSON fields that do not + appear in the target row type will be omitted from the output, and + target columns that do not match any JSON field will simply be NULL. @@ -11120,11 +11203,13 @@ SELECT ... WHERE CASE WHEN x <> 0 THEN y/x > 1.5 ELSE false END; - As described in , functions and - operators marked IMMUTABLE can be evaluated when - the query is planned rather than when it is executed. This means - that constant parts of a subexpression that is not evaluated during - query execution might still be evaluated during query planning. + As described in , there are various + situations in which subexpressions of an expression are evaluated at + different times, so that the principle that CASE + evaluates only necessary subexpressions is not ironclad. For + example a constant 1/0 subexpression will usually result in + a division-by-zero failure at planning time, even if it's within + a CASE arm that would never be entered at run time. @@ -12154,15 +12239,15 @@ NULL baz(3 rows) json_agg - json_agg(record) + json_agg(expression) - record + any json - aggregates records as a JSON array of objects + aggregates values as a JSON array @@ -12173,7 +12258,7 @@ NULL baz(3 rows) json_object_agg(name, value) - ("any", "any") + (any, any) json @@ -12242,14 +12327,13 @@ NULL baz(3 rows) smallint, int, bigint, real, double - precision, numeric, or - interval + precision, numeric, + interval, or money bigint for smallint or int arguments, numeric for - bigint arguments, double precision - for floating-point arguments, otherwise the same as the + bigint arguments, otherwise the same as the argument data type sum of expression across all input values @@ -12901,6 +12985,11 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; simply produces a null result. + + hypothetical-set aggregate + built-in + + Each of the aggregates listed in is associated with a @@ -13044,7 +13133,8 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; Window functions provide the ability to perform calculations across sets of rows that are related to the current query row. See for an introduction to this - feature. + feature, and for syntax + details. @@ -13161,9 +13251,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; lag - lag(value any + lag(value anyelement [, offset integer - [, default any ]]) + [, default anyelement ]]) @@ -13173,7 +13263,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; returns value evaluated at the row that is offset rows before the current row within the partition; if there is no such - row, instead return default. + row, instead return default + (which must be of the same type as + value). Both offset and default are evaluated with respect to the current row. If omitted, @@ -13188,9 +13280,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; lead - lead(value any + lead(value anyelement [, offset integer - [, default any ]]) + [, default anyelement ]]) @@ -13200,7 +13292,9 @@ SELECT xmlagg(x) FROM (SELECT x FROM test ORDER BY y DESC) AS tab; returns value evaluated at the row that is offset rows after the current row within the partition; if there is no such - row, instead return default. + row, instead return default + (which must be of the same type as + value). Both offset and default are evaluated with respect to the current row. If omitted, @@ -14006,8 +14100,8 @@ AND *>=. These operators compare the internal binary representation of the two rows. Two rows might have a different binary representation even - though comparisons of the two rows with the equality operator is true. - The ordering of rows under these comparision operators is deterministic + though comparisons of the two rows with the equality operator is true. + The ordering of rows under these comparison operators is deterministic but not otherwise meaningful. These operators are used internally for materialized views and might be useful for other specialized purposes such as replication but are not intended to be generally useful for @@ -15461,32 +15555,32 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); to_regclass(rel_name) regclass - get the oid of the named relation + get the OID of the named relation to_regproc(func_name) regproc - get the oid of the named function + get the OID of the named function to_regprocedure(func_name) regprocedure - get the oid of the named function + get the OID of the named function to_regoper(operator_name) regoper - get the oid of the named operator + get the OID of the named operator to_regoperator(operator_name) regoperator - get the oid of the named operator + get the OID of the named operator to_regtype(type_name) regtype - get the oid of the named type + get the OID of the named type @@ -15602,7 +15696,7 @@ SELECT pg_type_is_visible('myschema.widget'::regtype); schema is the schema name that the object belongs in, or NULL for object types that do not belong to schemas; name is the name of the object, quoted if necessary, only - present if it can be used (alongside schema name, if pertinent) as an unique + present if it can be used (alongside schema name, if pertinent) as a unique identifier of the object, otherwise NULL; identity is the complete object identity, with the precise format depending on object type, and each part within the format being @@ -16465,7 +16559,7 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); pg_xlog_replay_pause() void - Pauses recovery immediately. + Pauses recovery immediately (restricted to superusers). @@ -16473,7 +16567,7 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); pg_xlog_replay_resume() void - Restarts recovery if it was paused. + Restarts recovery if it was paused (restricted to superusers). @@ -16577,9 +16671,11 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); Replication Functions - PostgreSQL exposes a number of functions for controlling and interacting - with replication features. See - and . + The functions shown in are + for controlling and interacting with replication features. + See + and for information about the + underlying features. Use of these functions is restricted to superusers. @@ -16588,8 +16684,8 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); - The sections , and , , and are also relevant for replication. @@ -16617,8 +16713,8 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); Creates a new physical replication slot named slot_name. Streaming changes from a physical slot - is only possible with the walsender protocol - see . Corresponds to the walsender protocol + is only possible with the streaming-replication protocol - see . Corresponds to the replication protocol command CREATE_REPLICATION_SLOT ... PHYSICAL. @@ -16634,7 +16730,7 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); Drops the physical or logical replication slot - named slot_name. Same as walsender protocol + named slot_name. Same as replication protocol command DROP_REPLICATION_SLOT. @@ -16654,7 +16750,7 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); slot_name using the output plugin plugin. A call to this function has the same effect as the replication protocol command - CREATE REPLICATION SLOT ... LOGICAL. + CREATE_REPLICATION_SLOT ... LOGICAL. @@ -16926,20 +17022,40 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); - pg_relation_size accepts the OID or name of a table, index or - toast table, and returns the on-disk size in bytes. - Specifying 'main' or leaving out the second argument - returns the size of the main data fork of the relation. - Specifying 'fsm' returns the size of the Free Space - Map (see ) associated with the relation. - Specifying 'vm' returns the size of the Visibility - Map (see ) associated with the relation. - Specifying 'init' returns the size of the - initialization fork, if any, associated with the relation. - Note that this function shows the size of only one fork; - for most purposes it is more convenient to use the higher-level - functions pg_total_relation_size or - pg_table_size. + pg_relation_size accepts the OID or name of a table, index + or toast table, and returns the on-disk size in bytes of one fork of + that relation. (Note that for most purposes it is more convenient to + use the higher-level functions pg_total_relation_size + or pg_table_size, which sum the sizes of all forks.) + With one argument, it returns the size of the main data fork of the + relation. The second argument can be provided to specify which fork + to examine: + + + + 'main' returns the size of the main + data fork of the relation. + + + + + 'fsm' returns the size of the Free Space Map + (see ) associated with the relation. + + + + + 'vm' returns the size of the Visibility Map + (see ) associated with the relation. + + + + + 'init' returns the size of the initialization + fork, if any, associated with the relation. + + + @@ -17555,7 +17671,7 @@ FOR EACH ROW EXECUTE PROCEDURE suppress_redundant_updates_trigger(); text Name of the object, if the combination of schema and name can be - used as an unique identifier for the object; otherwise NULL. + used as a unique identifier for the object; otherwise NULL. No quoting is applied, and name is never schema-qualified. diff --git a/doc/src/sgml/gin.sgml b/doc/src/sgml/gin.sgml index 1cbc73c70cfed..8443c01f6f739 100644 --- a/doc/src/sgml/gin.sgml +++ b/doc/src/sgml/gin.sgml @@ -559,7 +559,7 @@ triConsistent alone is sufficient. However, if the boolean variant is significantly cheaper to calculate, it can be advantageous to provide both. If only the boolean variant is provided, some optimizations that depend on - refuting index items before fetching all the keys are disabled. + refuting index items before fetching all the keys are disabled. @@ -614,7 +614,7 @@ - GinLogicValue triConsistent(GinLogicValue check[], StrategyNumber n, Datum query, + GinTernaryValue triConsistent(GinTernaryValue check[], StrategyNumber n, Datum query, int32 nkeys, Pointer extra_data[], Datum queryKeys[], bool nullFlags[]) @@ -628,7 +628,7 @@ When GIN_MAYBE values are present, the function should only return GIN_TRUE if the item matches whether or not the index item contains the corresponding query keys. Likewise, the function must - return GIN_FALSE only if the item does not match, whether or not it + return GIN_FALSE only if the item does not match, whether or not it contains the GIN_MAYBE keys. If the result depends on the GIN_MAYBE entries, i.e. the match cannot be confirmed or refuted based on the known query keys, the function must return GIN_MAYBE. diff --git a/doc/src/sgml/high-availability.sgml b/doc/src/sgml/high-availability.sgml index 657097d6460a5..65281f7f97668 100644 --- a/doc/src/sgml/high-availability.sgml +++ b/doc/src/sgml/high-availability.sgml @@ -1894,7 +1894,7 @@ if (!triggered) might want to make adjustments to handle the period when hot_standby_feedback feedback is not being provided. For example, consider increasing max_standby_archive_delay - so that queries are not rapidly cancelled by conflicts in WAL archive + so that queries are not rapidly canceled by conflicts in WAL archive files during disconnected periods. You should also consider increasing max_standby_streaming_delay to avoid rapid cancellations by newly-arrived streaming WAL entries after reconnection. @@ -1993,6 +1993,11 @@ LOG: database system is ready to accept read only connections max_locks_per_transaction + + + max_worker_processes + + diff --git a/doc/src/sgml/hstore.sgml b/doc/src/sgml/hstore.sgml index fbe9543dfea1b..9618eb8e97859 100644 --- a/doc/src/sgml/hstore.sgml +++ b/doc/src/sgml/hstore.sgml @@ -325,11 +325,21 @@ b hstore_to_json(hstore)hstore_to_json json - get hstore as a json value + get hstore as a json value, converting + all non-null values to JSON strings hstore_to_json('"a key"=>1, b=>t, c=>null, d=>12345, e=>012345, f=>1.234, g=>2.345e+4') {"a key": "1", "b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4"} + + hstore_to_jsonb(hstore)hstore_to_jsonb + jsonb + get hstore as a jsonb value, converting + all non-null values to JSON strings + hstore_to_jsonb('"a key"=>1, b=>t, c=>null, d=>12345, e=>012345, f=>1.234, g=>2.345e+4') + {"a key": "1", "b": "t", "c": null, "d": "12345", "e": "012345", "f": "1.234", "g": "2.345e+4"} + + hstore_to_json_loose(hstore)hstore_to_json_loose json @@ -338,6 +348,14 @@ b {"a key": 1, "b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4} + + hstore_to_jsonb_loose(hstore)hstore_to_jsonb_loose + jsonb + get hstore as a jsonb value, but attempt to distinguish numerical and Boolean values so they are unquoted in the JSON + hstore_to_jsonb_loose('"a key"=>1, b=>t, c=>null, d=>12345, e=>012345, f=>1.234, g=>2.345e+4') + {"a key": 1, "b": true, "c": null, "d": 12345, "e": "012345", "f": 1.234, "g": 2.345e+4} + + slice(hstore, text[])slice hstore @@ -414,8 +432,10 @@ b - The function hstore_to_json is used when an hstore - value is cast to json. + The function hstore_to_json is used when + an hstore value is cast to json. + Likewise, hstore_to_jsonb is used when + an hstore value is cast to jsonb. diff --git a/doc/src/sgml/indexam.sgml b/doc/src/sgml/indexam.sgml index fd1168991adcf..157047a23ab15 100644 --- a/doc/src/sgml/indexam.sgml +++ b/doc/src/sgml/indexam.sgml @@ -168,7 +168,7 @@ ambuild (Relation heapRelation, void ambuildempty (Relation indexRelation); - Build an empty index, and write it to the initialization fork (INIT_FORKNUM) + Build an empty index, and write it to the initialization fork (INIT_FORKNUM) of the given relation. This method is called only for unlogged tables; the empty index written to the initialization fork will be copied over the main relation fork on each server restart. @@ -278,7 +278,7 @@ amcanreturn (Relation indexRelation); Check whether the index can support index-only scans by returning the indexed column values for an index entry in the form of an - IndexTuple. Return TRUE if so, else FALSE. If the index AM can never + IndexTuple. Return TRUE if so, else FALSE. If the index AM can never support index-only scans (an example is hash, which stores only the hash values not the original data), it is sufficient to set its amcanreturn field to zero in pg_am. diff --git a/doc/src/sgml/install-windows.sgml b/doc/src/sgml/install-windows.sgml index 71a5c2e96307a..4b191c4e6238b 100644 --- a/doc/src/sgml/install-windows.sgml +++ b/doc/src/sgml/install-windows.sgml @@ -436,6 +436,10 @@ $ENV{CONFIG}="Debug"; vcregress installcheck vcregress plcheck vcregress contribcheck +vcregress ecpgcheck +vcregress isolationcheck +vcregress bincheck +vcregress upgradecheck To change the schedule used (default is parallel), append it to the @@ -447,6 +451,29 @@ $ENV{CONFIG}="Debug"; For more information about the regression tests, see . + + + Running the regression tests on client programs, with "vcregress bincheck", + requires an additional Perl module to be installed: + + + IPC::Run + + As of this writing, IPC::Run is not included in the + ActiveState Perl installation, nor in the ActiveState Perl Package + Manager (PPM) library. To install, download the + IPC-Run-<version>.tar.gz source archive from CPAN, + at , and + uncompress. Edit the buildenv.pl file, and add a PERL5LIB + variable to point to the lib subdirectory from the + extracted archive. For example: + +$ENV{PERL5LIB}=$ENV{PERL5LIB} . ';c:\IPC-Run-0.94\lib'; + + + + + diff --git a/doc/src/sgml/installation.sgml b/doc/src/sgml/installation.sgml index 7353c612b1f67..de13e3b6252cf 100644 --- a/doc/src/sgml/installation.sgml +++ b/doc/src/sgml/installation.sgml @@ -864,7 +864,7 @@ su - postgres Build with Bonjour support. This requires Bonjour support - in your operating system. Recommended on Mac OS X. + in your operating system. Recommended on OS X. @@ -890,7 +890,7 @@ su - postgres @@ -1108,7 +1108,7 @@ su - postgres PostgreSQL includes its own time zone database, which it requires for date and time operations. This time zone - database is in fact compatible with the zoneinfo time zone + database is in fact compatible with the IANA time zone database provided by many operating systems such as FreeBSD, Linux, and Solaris, so it would be redundant to install it again. When this option is used, the system-supplied time zone database @@ -1271,6 +1271,16 @@ su - postgres + + + + + Enable tests using the Perl TAP tools. This requires a Perl + installation and the Perl module IPC::Run. + for more information.]]> + + + @@ -1783,7 +1793,7 @@ set path = ( /usr/local/pgsql/bin $path ) shell start-up file unless you installed into a location that is searched by default: -MANPATH=/usr/local/pgsql/man:$MANPATH +MANPATH=/usr/local/pgsql/share/man:$MANPATH export MANPATH @@ -1991,7 +2001,7 @@ kill `cat /usr/local/pgsql/data/postmaster.pid` PostgreSQL can be expected to work on these operating systems: Linux (all recent distributions), Windows (Win2000 SP4 and later), - FreeBSD, OpenBSD, NetBSD, Mac OS X, AIX, HP/UX, Solaris, Tru64 Unix, + FreeBSD, OpenBSD, NetBSD, OS X, AIX, HP/UX, Solaris, Tru64 Unix, and UnixWare. Other Unix-like systems may also work but are not currently being tested. In most cases, all CPU architectures supported by a given operating system will work. Look in @@ -2593,7 +2603,7 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch from , put its bin directory in the PATH, and run configure with the - --host=x86_64-w64-mingw option. + --host=x86_64-w64-mingw32 option. @@ -2728,13 +2738,13 @@ PHSS_30849 s700_800 u2comp/be/plugin library Patch By default, the PostgreSQL man pages are installed into - /usr/local/pgsql/man. By default, UnixWare + /usr/local/pgsql/share/man. By default, UnixWare does not look there for man pages. To be able to read them you need to modify the MANPATH variable in /etc/default/man, for example: -MANPATH=/usr/lib/scohelp/%L/man:/usr/dt/man:/usr/man:/usr/share/man:scohelp:/usr/local/man:/usr/local/pgsql/man +MANPATH=/usr/lib/scohelp/%L/man:/usr/dt/man:/usr/man:/usr/share/man:scohelp:/usr/local/man:/usr/local/pgsql/share/man diff --git a/doc/src/sgml/isn.sgml b/doc/src/sgml/isn.sgml index 4a2e7da76300f..c1da702df63ca 100644 --- a/doc/src/sgml/isn.sgml +++ b/doc/src/sgml/isn.sgml @@ -364,7 +364,9 @@ SELECT isbn13(id) FROM test; The prefixes used for hyphenation were also compiled from: - + + + diff --git a/doc/src/sgml/jadetex.cfg b/doc/src/sgml/jadetex.cfg index 25b79312ebc35..875598f1514b5 100644 --- a/doc/src/sgml/jadetex.cfg +++ b/doc/src/sgml/jadetex.cfg @@ -1,14 +1,37 @@ % doc/src/sgml/jadetex.cfg % -% This file redefines FlowObjectSetup to eliminate one of the two control -% sequences it normally creates, thereby substantially reducing string usage -% and permitting the complete Postgres documentation to be built without -% overflowing a hard-to-expand TeX limit. The only known penalty is an -% increased number of TeX warnings about ignoring duplicate definitions. +% This file redefines \FlowObjectSetup and some related macros to greatly +% reduce the number of control sequence names created, and also to avoid +% creation of many useless hyperlink anchors (bookmarks) in PDF files. % -% Curiously, we only see the failure when building PDF output --- plain PS -% output does not come anywhere close to overflowing the string table. -% There may be another solution hidden in that observation. +% The original coding of \FlowObjectSetup defined a control sequence x@LABEL +% for pretty nearly every flow object in the file, whether that object was +% cross-referenced or not. Worse yet, it created a hyperlink anchor for +% every such object, which not only bloated the output PDF with useless +% anchors but consumed an additional control sequence name per anchor. +% This results in overrunning TeX's limited-size string pool. +% +% To fix, extend \PageLabel's already-existing mechanism whereby a p@LABEL +% control sequence is filled in only for labels that are referenced by at +% least one \Pageref call. We now also fill in p@LABEL for labels that are +% referenced by a \Link. Then, we can drop x@LABEL entirely, and use p@LABEL +% to control emission of both a hyperlink anchor and a page-number label. +% Now, both of those things are emitted for all and only the flow objects +% that have either a hyperlink reference or a page-number reference. +% We consume about one control sequence name per flow object plus one per +% referenced object, which is a lot better than three per flow object. +% +% (With a more invasive patch, we could track the need for an anchor and a +% page-number label separately, but that would probably require two control +% sequences for every flow object. Besides, many objects that have one kind +% of reference will have the other one too; that's certainly true for objects +% referenced in either the TOC or the index, for example.) +% +% +% In addition to checking p@LABEL not x@LABEL, this version of \FlowObjectSetup +% is fixed to clear \Label and \Element whether or not it emits an anchor +% and page label. Failure to do that seems to explain some pre-existing bugs +% in which certain SGML constructs weren't correctly cross-referenced. % \def\FlowObjectSetup#1{% \ifDoFOBSet @@ -16,6 +39,8 @@ \ifx\Label\@empty\let\Label\Element\fi \fi \ifx\Label\@empty\else + \expandafter\ifx\csname p@\Label\endcsname\relax + \else \bgroup \ifNestedLink \else @@ -23,8 +48,42 @@ \PageLabel{\Label}% \fi \egroup - \let\Label\@empty - \let\Element\@empty + \fi + \let\Label\@empty + \let\Element\@empty \fi \fi } +% +% Adjust \PageLabel so that the p@NAME control sequence acquires a correct +% value immediately; this seems to be needed to avoid scenarios wherein +% additional TeX runs are needed to reach a stable state of the .aux file. +% +\def\PageLabel#1{% + \@bsphack + \expandafter\ifx\csname p@#1\endcsname\relax + \else + \protected@write\@auxout{}% + {\string\pagelabel{#1}{\thepage}}% + % Ensure the p@NAME control sequence acquires correct value immediately + \expandafter\xdef\csname p@#1\endcsname{\thepage}% + \fi + \@esphack} +% +% In \Link, add code to emit an aux-file entry if the p@NAME sequence isn't +% defined. Much as in \@Setref, this ensures we'll process the referenced +% item correctly on the next TeX run. +% +\def\Link#1{% + \begingroup + \SetupICs{#1}% + \ifx\Label\@empty\let\Label\Element\fi +% \typeout{Made a Link at \the\inputlineno, to \Label}% + \hyper@linkstart{\LinkType}{\Label}% + \NestedLinktrue + % If p@NAME control sequence isn't defined, emit dummy def to aux file + % so it will get defined properly on next run, much as in \@Setref + \expandafter\ifx\csname p@\Label\endcsname\relax + \immediate\write\@mainaux{\string\pagelabel{\Label}{qqq}}% + \fi +} diff --git a/doc/src/sgml/json.sgml b/doc/src/sgml/json.sgml index 66426189ca53f..f4546f02229c2 100644 --- a/doc/src/sgml/json.sgml +++ b/doc/src/sgml/json.sgml @@ -69,12 +69,14 @@ regardless of the database encoding, and are checked only for syntactic correctness (that is, that four hex digits follow \u). However, the input function for jsonb is stricter: it disallows - Unicode escapes for non-ASCII characters (those - above U+007F) unless the database encoding is UTF8. It also - insists that any use of Unicode surrogate pairs to designate characters - outside the Unicode Basic Multilingual Plane be correct. Valid Unicode - escapes, except for \u0000, are then converted to the - equivalent ASCII or UTF8 character for storage. + Unicode escapes for non-ASCII characters (those above U+007F) + unless the database encoding is UTF8. The jsonb type also + rejects \u0000 (because that cannot be represented in + PostgreSQL's text type), and it insists + that any use of Unicode surrogate pairs to designate characters outside + the Unicode Basic Multilingual Plane be correct. Valid Unicode escapes + are converted to the equivalent ASCII or UTF8 character for storage; + this includes folding surrogate pairs into a single character. @@ -101,7 +103,7 @@ constitutes valid jsonb data that do not apply to the json type, nor to JSON in the abstract, corresponding to limits on what can be represented by the underlying data type. - Specifically, jsonb will reject numbers that are outside the + Notably, jsonb will reject numbers that are outside the range of the PostgreSQL numeric data type, while json will not. Such implementation-defined restrictions are permitted by RFC 7159. However, in @@ -134,7 +136,8 @@ string text - See notes above concerning encoding restrictions + \u0000 is disallowed, as are non-ASCII Unicode + escapes if database encoding is not UTF8 number @@ -163,7 +166,7 @@ The following are all valid json (or jsonb) expressions: - + -- Simple scalar/primitive value -- Primitive values can be numbers, quoted strings, true, false, or null SELECT '5'::json; @@ -177,7 +180,7 @@ SELECT '{"bar": "baz", "balance": 7.77, "active": false}'::json; -- Arrays and objects can be nested arbitrarily SELECT '{"foo": [true, "bar"], "tags": {"a": 1, "b": null}}'::json; - + @@ -262,13 +265,19 @@ SELECT '{"reading": 1.230e-5}'::json, '{"reading": 1.230e-5}'::jsonb; one jsonb document has contained within it another one. These examples return true except as noted: - + -- Simple scalar/primitive values contain only the identical value: SELECT '"foo"'::jsonb @> '"foo"'::jsonb; -- The array on the right side is contained within the one on the left: SELECT '[1, 2, 3]'::jsonb @> '[1, 3]'::jsonb; +-- Order of array elements is not significant, so this is also true: +SELECT '[1, 2, 3]'::jsonb @> '[3, 1]'::jsonb; + +-- Duplicate array elements don't matter either: +SELECT '[1, 2, 3]'::jsonb @> '[1, 2, 2]'::jsonb; + -- The object with a single pair on the right side is contained -- within the object on the left side: SELECT '{"product": "PostgreSQL", "version": 9.4, "jsonb":true}'::jsonb @> '{"version":9.4}'::jsonb; @@ -282,27 +291,29 @@ SELECT '[1, 2, [1, 3]]'::jsonb @> '[[1, 3]]'::jsonb; -- Similarly, containment is not reported here: SELECT '{"foo": {"bar": "baz"}}'::jsonb @> '{"bar": "baz"}'::jsonb; -- yields false - + The general principle is that the contained object must match the containing object as to structure and data contents, possibly after discarding some non-matching array elements or object key/value pairs - from the containing object. However, the order of array elements is - not significant when doing a containment match. + from the containing object. + But remember that the order of array elements is not significant when + doing a containment match, and duplicate array elements are effectively + considered only once. As a special exception to the general principle that the structures must match, an array may contain a primitive value: - + -- This array contains the primitive string value: SELECT '["foo", "bar"]'::jsonb @> '"bar"'::jsonb; -- This exception is not reciprocal -- non-containment is reported here: SELECT '"bar"'::jsonb @> '["bar"]'::jsonb; -- yields false - + jsonb also has an existence operator, which is @@ -335,6 +346,34 @@ SELECT '"foo"'::jsonb ? 'foo'; need to be searched linearly. + + + Because JSON containment is nested, an appropriate query can skip + explicit selection of sub-objects. As an example, suppose that we have + a doc column containing objects at the top level, with + most objects containing tags fields that contain arrays of + sub-objects. This query finds entries in which sub-objects containing + both "term":"paris" and "term":"food" appear, + while ignoring any such keys outside the tags array: + +SELECT doc->'site_name' FROM websites + WHERE doc @> '{"tags":[{"term":"paris"}, {"term":"food"}]}'; + + One could accomplish the same thing with, say, + +SELECT doc->'site_name' FROM websites + WHERE doc->'tags' @> '[{"term":"paris"}, {"term":"food"}]'; + + but that approach is less flexible, and often less efficient as well. + + + + On the other hand, the JSON existence operator is not nested: it will + only look for the specified key or array element at top level of the + JSON value. + + + The various containment and existence operators, along with all other JSON operators and functions are documented @@ -354,7 +393,7 @@ SELECT '"foo"'::jsonb ? 'foo'; keys or key/value pairs occurring within a large number of jsonb documents (datums). Two GIN operator classes are provided, offering different - performance and flexibility tradeoffs. + performance and flexibility trade-offs. The default GIN operator class for jsonb supports queries with @@ -363,22 +402,22 @@ SELECT '"foo"'::jsonb ? 'foo'; (For details of the semantics that these operators implement, see .) An example of creating an index with this operator class is: - + CREATE INDEX idxgin ON api USING gin (jdoc); - + The non-default GIN operator class jsonb_path_ops supports indexing the @> operator only. An example of creating an index with this operator class is: - + CREATE INDEX idxginp ON api USING gin (jdoc jsonb_path_ops); - + Consider the example of a table that stores JSON documents retrieved from a third-party web service, with a documented schema definition. A typical document is: - + { "guid": "9c36adc1-7fb5-4d5b-83b4-90356a46061a", "name": "Angela Barton", @@ -394,32 +433,29 @@ CREATE INDEX idxginp ON api USING gin (jdoc jsonb_path_ops); "qui" ] } - + We store these documents in a table named api, in a jsonb column named jdoc. If a GIN index is created on this column, queries like the following can make use of the index: - + -- Find documents in which the key "company" has value "Magnafone" SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"company": "Magnafone"}'; - + However, the index could not be used for queries like the following, because though the operator ? is indexable, it is not applied directly to the indexed column jdoc: - + -- Find documents in which the key "tags" contains key or array element "qui" SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc -> 'tags' ? 'qui'; - + Still, with appropriate use of expression indexes, the above query can use an index. If querying for particular items within the "tags" key is common, defining an index like this may be worthwhile: - --- Note that the "jsonb -> text" operator can only be called on a JSON --- object, so as a consequence of creating this index the root of each --- "jdoc" value must be an object. This is enforced during insertion. + CREATE INDEX idxgintags ON api USING gin ((jdoc -> 'tags')); - + Now, the WHERE clause jdoc -> 'tags' ? 'qui' will be recognized as an application of the indexable operator ? to the indexed @@ -429,10 +465,10 @@ CREATE INDEX idxgintags ON api USING gin ((jdoc -> 'tags')); Another approach to querying is to exploit containment, for example: - + -- Find documents in which the key "tags" contains array element "qui" SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qui"]}'; - + A simple GIN index on the jdoc column can support this query. But note that such an index will store copies of every key and value in the jdoc column, whereas the expression index @@ -460,11 +496,16 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu and a jsonb_path_ops GIN index is that the former creates independent index items for each key and value in the data, while the latter creates index items only for each value in the - data.For this purpose, the term value - includes array elements, though JSON terminology sometimes considers - array elements distinct from values within objects. - But in jsonb_path_ops, each index item is a hash - of both the value and the key(s) leading to it; for example to index + data. + + + For this purpose, the term value includes array elements, + though JSON terminology sometimes considers array elements distinct + from values within objects. + + + Basically, each jsonb_path_ops index item is + a hash of the value and the key(s) leading to it; for example to index {"foo": {"bar": "baz"}}, a single index item would be created incorporating all three of foo, bar, and baz into the hash value. Thus a containment query @@ -496,17 +537,17 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu equality of complete JSON documents. The btree ordering for jsonb datums is seldom of great interest, but for completeness it is: - - Object > Array > Boolean > Number > String > Null + +Object > Array > Boolean > Number > String > Null - Object with n pairs > object with n - 1 pairs +Object with n pairs > object with n - 1 pairs - Array with n elements > array with n - 1 elements - +Array with n elements > array with n - 1 elements + Objects with equal numbers of pairs are compared in the order: - - key-1, value-1, key-2 ... - + +key-1, value-1, key-2 ... + Note that object keys are compared in their storage order; in particular, since shorter keys are stored before longer keys, this can lead to results that might be unintuitive, such as: @@ -515,9 +556,9 @@ SELECT jdoc->'guid', jdoc->'name' FROM api WHERE jdoc @> '{"tags": ["qu Similarly, arrays with equal numbers of elements are compared in the order: - - element-1, element-2 ... - + +element-1, element-2 ... + Primitive JSON values are compared using the same comparison rules as for the underlying PostgreSQL data type. Strings are diff --git a/doc/src/sgml/keywords.sgml b/doc/src/sgml/keywords.sgml index ecfde993da3ba..1c93b7c148dcc 100644 --- a/doc/src/sgml/keywords.sgml +++ b/doc/src/sgml/keywords.sgml @@ -3165,7 +3165,7 @@ ORDINALITY - + non-reserved non-reserved non-reserved @@ -5046,6 +5046,13 @@ non-reserved reserved + + VIEWS + non-reserved + + + + VOLATILE non-reserved @@ -5104,7 +5111,7 @@ WITHIN - + non-reserved reserved reserved diff --git a/doc/src/sgml/legal.sgml b/doc/src/sgml/legal.sgml index 1bd42fa6b8d91..84bc7beb5adf4 100644 --- a/doc/src/sgml/legal.sgml +++ b/doc/src/sgml/legal.sgml @@ -1,9 +1,9 @@ -2014 +2016 - 1996-2014 + 1996-2016 The PostgreSQL Global Development Group @@ -11,7 +11,7 @@ Legal Notice - PostgreSQL is Copyright © 1996-2014 + PostgreSQL is Copyright © 1996-2016 by the PostgreSQL Global Development Group. diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index e10ccec9f13c9..7f60e3dc20e60 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -124,7 +124,10 @@ PGconn *PQconnectdbParams(const char * const *keywords, When expand_dbname is non-zero, the dbname key word value is allowed to be recognized - as a connection string. More details on the possible formats appear in + as a connection string. Only the first occurrence of + dbname is expanded this way, any subsequent + dbname value is processed as plain database name. More + details on the possible connection string formats appear in . @@ -136,7 +139,7 @@ PGconn *PQconnectdbParams(const char * const *keywords, - If any parameter is NULL or an emptry string, the corresponding + If any parameter is NULL or an emptry string, the corresponding environment variable (see ) is checked. If the environment variable is not set either, then the indicated built-in defaults are used. @@ -1464,24 +1467,6 @@ char *PQhost(const PGconn *conn); - - - PQhostaddr - - PQhostaddr - - - - - - Returns the server numeric IP address of the connection. - -char *PQhostaddr(const PGconn *conn); - - - - - PQport @@ -4395,7 +4380,14 @@ int PQflush(PGconn *conn); After sending any command or data on a nonblocking connection, call PQflush. If it returns 1, wait for the socket - to be write-ready and call it again; repeat until it returns 0. Once + to become read- or write-ready. If it becomes write-ready, call + PQflush again. If it becomes read-ready, call + PQconsumeInput, then call + PQflush again. Repeat until + PQflush returns 0. (It is necessary to check for + read-ready and drain the input with PQconsumeInput, + because the server can block trying to send us data, e.g. NOTICE + messages, and won't read our data until we read its.) Once PQflush returns 0, wait for the socket to be read-ready and then read the response as described above. @@ -4695,22 +4687,29 @@ typedef struct parameters to be passed to the function; they must match the declared function argument list. When the isint field of a parameter structure is true, the u.integer value is sent - to the server as an integer of the indicated length (this must be 1, - 2, or 4 bytes); proper byte-swapping occurs. When isint + to the server as an integer of the indicated length (this must be + 2 or 4 bytes); proper byte-swapping occurs. When isint is false, the indicated number of bytes at *u.ptr are sent with no processing; the data must be in the format expected by the server for binary transmission of the function's argument data - type. result_buf is the buffer in which to - place the return value. The caller must have allocated sufficient + type. (The declaration of u.ptr as being of + type int * is historical; it would be better to consider + it void *.) + result_buf points to the buffer in which to place + the function's return value. The caller must have allocated sufficient space to store the return value. (There is no check!) The actual result - length will be returned in the integer pointed to by - result_len. If a 1, 2, or 4-byte integer result + length in bytes will be returned in the integer pointed to by + result_len. If a 2- or 4-byte integer result is expected, set result_is_int to 1, otherwise set it to 0. Setting result_is_int to 1 causes libpq to byte-swap the value if necessary, so that it - is delivered as a proper int value for the client machine. + is delivered as a proper int value for the client machine; + note that a 4-byte integer is delivered into *result_buf + for either allowed result size. When result_is_int is 0, the binary-format byte string - sent by the server is returned unmodified. + sent by the server is returned unmodified. (In this case it's better + to consider result_buf as being of + type void *.) @@ -5666,7 +5665,7 @@ int PQfireResultCreateEvents(PGconn *conn, PGresult *res); The main reason that this function is separate from - PQmakeEmptyPGResult is that it is often appropriate + PQmakeEmptyPGresult is that it is often appropriate to create a PGresult and fill it with data before invoking the event procedures. @@ -6967,7 +6966,7 @@ myEventProc(PGEventId evtId, void *evtInfo, void *passThrough) at ~/.pg_service.conf or the location specified by the environment variable PGSERVICEFILE, or it can be a system-wide file - at etc/pg_service.conf or in the directory + at `pg_config --sysconfdir`/pg_service.conf or in the directory specified by the environment variable PGSYSCONFDIR. If service definitions with the same name exist in the user and the system file, the user file takes @@ -7043,17 +7042,17 @@ version:1 dn:cn=mydatabase,dc=mycompany,dc=com changetype:add objectclass:top -objectclass:groupOfUniqueNames +objectclass:device cn:mydatabase -uniqueMember:host=dbserver.mycompany.com -uniqueMember:port=5439 -uniqueMember:dbname=mydb -uniqueMember:user=mydb_user -uniqueMember:sslmode=require +description:host=dbserver.mycompany.com +description:port=5439 +description:dbname=mydb +description:user=mydb_user +description:sslmode=require might be queried with the following LDAP URL: -ldap://ldap.mycompany.com/dc=mycompany,dc=com?uniqueMember?one?(cn=mydatabase) +ldap://ldap.mycompany.com/dc=mycompany,dc=com?description?one?(cn=mydatabase) @@ -7158,7 +7157,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) For backwards compatibility with earlier versions of PostgreSQL, if a root CA file exists, the behavior of sslmode=require will be the same - as that of verify-ca, meaning the sever certificate + as that of verify-ca, meaning the server certificate is validated against the CA. Relying on this behavior is discouraged, and applications that need certificate validation should always use verify-ca or verify-full. @@ -7200,7 +7199,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) - Note that the client's ~/.postgresql/root.crt lists the top-level CAs + Note that the client's ~/.postgresql/root.crt lists the top-level CAs that are considered trusted for signing server certificates. In principle it need not list the CA that signed the client's certificate, though in most cases that CA would also be trusted for server certificates. @@ -7442,7 +7441,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) libpq will not also initialize those libraries. See + url="http://h71000.www7.hp.com/doc/83final/ba554_90007/ch04.html"> for details on the SSL API. diff --git a/doc/src/sgml/lobj.sgml b/doc/src/sgml/lobj.sgml index d403586a0540a..c0174b71bee76 100644 --- a/doc/src/sgml/lobj.sgml +++ b/doc/src/sgml/lobj.sgml @@ -547,16 +547,16 @@ int lo_unlink(PGconn *conn, Oid lobjId); - lo_create + lo_from_bytea - lo_create(loid oid, string bytea) + lo_from_bytea(loid oid, string bytea) oid Create a large object and store data there, returning its OID. Pass 0 to have the system choose an OID. - lo_create(0, E'\\xffffff00') + lo_from_bytea(0, E'\\xffffff00') 24528 diff --git a/doc/src/sgml/logicaldecoding.sgml b/doc/src/sgml/logicaldecoding.sgml index a2108d68e2d39..70195f7f4235c 100644 --- a/doc/src/sgml/logicaldecoding.sgml +++ b/doc/src/sgml/logicaldecoding.sgml @@ -17,16 +17,15 @@ The format in which those changes are streamed is determined by the output - plugin used. An example plugin is provided, and additional plugins can be + plugin used. An example plugin is provided in the PostgreSQL distribution. + Additional plugins can be written to extend the choice of available formats without modifying any core code. Every output plugin has access to each individual new row produced by INSERT and the new row version created by UPDATE. Availability of old row versions for UPDATE and DELETE depends on - the configured - REPLICA - IDENTITY. + the configured replica identity (see ). @@ -40,18 +39,22 @@ - Logical Decoding Example + Logical Decoding Examples + - The following example demonstrates the SQL interface. + The following example demonstrates controlling logical decoding using the + SQL interface. + Before you can use logical decoding, you must set to logical and - to at least 1. - Then, you should connect to the target database (in the example + to at least 1. Then, you + should connect to the target database (in the example below, postgres) as a superuser. - + + postgres=# -- Create a slot named 'regression_slot' using the output plugin 'test_decoding' postgres=# SELECT * FROM pg_create_logical_replication_slot('regression_slot', 'test_decoding'); slot_name | xlog_position @@ -114,7 +117,7 @@ postgres=# SELECT * FROM pg_logical_slot_peek_changes('regression_slot', NULL, N 0/16E0B90 | 690 | COMMIT 690 (3 rows) -postgres=# -- You can also peek ahead in the change stream without consuming changes +postgres=# -- The next call to pg_logical_slot_peek_changes() returns the same changes again postgres=# SELECT * FROM pg_logical_slot_peek_changes('regression_slot', NULL, NULL); location | xid | data -----------+-----+----------------------------------------------- @@ -139,41 +142,48 @@ postgres=# SELECT pg_drop_replication_slot('regression_slot'); ----------------------- (1 row) - + + - The following example shows usage of the walsender interface using - the pg_recvlogical - shell command. It requires the replication configurations to be allowed - (see ) - and max_wal_senders to be set sufficiently high for - another connection. + The following example shows how logical decoding is controlled over the + streaming replication protocol, using the + program included in the PostgreSQL + distribution. This requires that client authentication is set up to allow + replication connections + (see ) and + that max_wal_senders is set sufficiently high to allow + an additional connection. - -# pg_recvlogical -d postgres --slot test --create -# pg_recvlogical -d postgres --slot test --start -f - -CTRL-Z -# psql -d postgres -c "INSERT INTO data(data) VALUES('4');" -# fg + +$ pg_recvlogical -d postgres --slot test --create-slot +$ pg_recvlogical -d postgres --slot test --start -f - +ControlZ +$ psql -d postgres -c "INSERT INTO data(data) VALUES('4');" +$ fg BEGIN 693 table public.data: INSERT: id[integer]:4 data[text]:'4' COMMIT 693 -CTRL-C -# pg_recvlogical -d postgres --slot test --drop - +ControlC +$ pg_recvlogical -d postgres --slot test --drop-slot + + Logical Decoding Concepts Logical Decoding + Logical Decoding + Logical decoding is the process of extracting all persistent changes to a database's tables into a coherent, easy to understand format which can be interpreted without detailed knowledge of the database's internal state. + In PostgreSQL, logical decoding is implemented by decoding the contents of the write-ahead @@ -184,34 +194,40 @@ CTRL-C Replication Slots + replication slot logical replication + In the context of logical replication, a slot represents a stream of - changes which can be replayed to a client in the order they were made on + changes that can be replayed to a client in the order they were made on the origin server. Each slot streams a sequence of changes from a single database, sending each change exactly once (except when peeking forward in the stream). + PostgreSQL also has streaming replication slots (see ), but they are used somewhat differently there. + - Replication slots have an identifier which is unique across all databases + A replication slot has an identifier that is unique across all databases in a PostgreSQL cluster. Slots persist independently of the connection using them and are crash-safe. + Multiple independent slots may exist for a single database. Each slot has its own state, allowing different consumers to receive changes from different points in the database change stream. For most applications, a separate slot will be required for each consumer. + A logical replication slot knows nothing about the state of the receiver(s). It's even possible to have multiple different receivers using @@ -219,17 +235,19 @@ CTRL-C on from when the last receiver stopped consuming them. Only one receiver may consume changes from a slot at any given time. + Replication slots persist across crashes and know nothing about the state of their consumer(s). They will prevent removal of required resources even when there is no connection using them. This consumes storage because neither required WAL nor required rows from the system catalogs - can be removed by VACUUM as long as they are required by a replication - slot, so if a slot is no longer required it should be dropped. + can be removed by VACUUM as long as they are required by a replication + slot. So if a slot is no longer required it should be dropped. + Output Plugins @@ -237,63 +255,84 @@ CTRL-C representation into the format the consumer of a replication slot desires. + Exported Snapshots - When a new replication slot is created using the walsender interface a - snapshot is exported - (see ) which will show + When a new replication slot is created using the streaming replication interface, + a snapshot is exported + (see ), which will show exactly the state of the database after which all changes will be included in the change stream. This can be used to create a new replica by using SET TRANSACTION SNAPSHOT to read the state of the database at the moment the slot was created. This transaction can then be used to dump the - database's state at that point in time which afterwards can be updated + database's state at that point in time, which afterwards can be updated using the slot's contents without losing any changes. + Streaming Replication Protocol Interface + - The CREATE_REPLICATION_SLOT slot_name LOGICAL - options, DROP_REPLICATION_SLOT slot_name - and START_REPLICATION SLOT slot_name LOGICAL options - commands can be used to create, drop and stream changes from a replication - slot respectively. These commands are only available over a replication + The commands + + + CREATE_REPLICATION_SLOT slot_name LOGICAL output_plugin + + + + DROP_REPLICATION_SLOT slot_name + + + + START_REPLICATION SLOT slot_name LOGICAL ... + + + are used to create, drop, and stream changes from a replication + slot, respectively. These commands are only available over a replication connection; they cannot be used via SQL. - See . + See for details on these commands. + - The pg_recvlogical command - (see ) can be used to control logical - decoding over a walsender connection. + The command can be used to control + logical decoding over a streaming replication connection. (It uses + these commands internally.) + Logical Decoding <acronym>SQL</acronym> Interface + See for detailed documentation on the SQL-level API for interacting with logical decoding. + Synchronous replication (see ) is - only supported on replication slots used over the walsender interface. The + only supported on replication slots used over the streaming replication interface. The function interface and additional, non-core interfaces do not support synchronous replication. + - System catalogs related to logical decoding + System Catalogs Related to Logical Decoding + The pg_replication_slots view and the pg_stat_replication view provide information about the current state of replication slots and - walsender connections respectively. These views apply to both physical and + streaming replication connections respectively. These views apply to both physical and logical replication. + Logical Decoding Output Plugins @@ -310,14 +349,14 @@ CTRL-C An output plugin is loaded by dynamically loading a shared library with - the output plugin's name as the library basename. The normal library + the output plugin's name as the library base name. The normal library search path is used to locate the library. To provide the required output plugin callbacks and to indicate that the library is actually an output plugin it needs to provide a function named _PG_output_plugin_init. This function is passed a struct that needs to be filled with the callback function pointers for individual actions. - + typedef struct OutputPluginCallbacks { LogicalDecodeStartupCB startup_cb; @@ -326,8 +365,9 @@ typedef struct OutputPluginCallbacks LogicalDecodeCommitCB commit_cb; LogicalDecodeShutdownCB shutdown_cb; } OutputPluginCallbacks; + typedef void (*LogicalOutputPluginInit)(struct OutputPluginCallbacks *cb); - + The begin_cb, change_cb and commit_cb callbacks are required, while startup_cb @@ -337,6 +377,7 @@ typedef void (*LogicalOutputPluginInit)(struct OutputPluginCallbacks *cb); Capabilities + To decode, format and output changes, output plugins can use most of the backend's normal infrastructure, including calling output functions. Read @@ -344,68 +385,93 @@ typedef void (*LogicalOutputPluginInit)(struct OutputPluginCallbacks *cb); accessed that either have been created by initdb in the pg_catalog schema, or have been marked as user provided catalog tables using - + ALTER TABLE user_catalog_table SET (user_catalog_table = true); CREATE TABLE another_catalog_table(data text) WITH (user_catalog_table = true); - - Any actions leading to xid assignment are prohibited. That, among others, - includes writing to tables, performing DDL changes and + + Any actions leading to transaction ID assignment are prohibited. That, among others, + includes writing to tables, performing DDL changes, and calling txid_current(). + + Output Modes + + + Output plugin callbacks can pass data to the consumer in nearly arbitrary + formats. For some use cases, like viewing the changes via SQL, returning + data in a data type that can contain arbitrary data (e.g., bytea) is + cumbersome. If the output plugin only outputs textual data in the + server's encoding, it can declare that by + setting OutputPluginOptions.output_mode + to OUTPUT_PLUGIN_TEXTUAL_OUTPUT instead + of OUTPUT_PLUGIN_BINARY_OUTPUT in + the startup + callback. In that case, all the data has to be in the server's encoding + so that a text datum can contain it. This is checked in assertion-enabled + builds. + + + Output Plugin Callbacks + An output plugin gets notified about changes that are happening via various callbacks it needs to provide. + - Concurrent transactions are decoded in commit order and only changes - belonging to a specific transaction are decoded inbetween + Concurrent transactions are decoded in commit order, and only changes + belonging to a specific transaction are decoded between the begin and commit callbacks. Transactions that were rolled back explicitly or implicitly never get - decoded. Successful SAVEPOINTs are + decoded. Successful savepoints are folded into the transaction containing them in the order they were executed within that transaction. + Only transactions that have already safely been flushed to disk will be - decoded. That can lead to a COMMIT not immediately being decoded in a + decoded. That can lead to a COMMIT not immediately being decoded in a directly following pg_logical_slot_get_changes() when synchronous_commit is set to off. + Startup Callback The optional startup_cb callback is called whenever a replication slot is created or asked to stream changes, independent of the number of changes that are ready to be put out. - + typedef void (*LogicalDecodeStartupCB) ( struct LogicalDecodingContext *ctx, OutputPluginOptions *options, bool is_init ); - + The is_init parameter will be true when the replication slot is being created and false otherwise. options points to a struct of options that output plugins can set: - + typedef struct OutputPluginOptions { OutputPluginOutputType output_type; } OutputPluginOptions; - + output_type has to either be set to OUTPUT_PLUGIN_TEXTUAL_OUTPUT - or OUTPUT_PLUGIN_BINARY_OUTPUT. + or OUTPUT_PLUGIN_BINARY_OUTPUT. See also + . + The startup callback should validate the options present in ctx->output_plugin_options. If the output plugin @@ -413,71 +479,78 @@ typedef struct OutputPluginOptions use ctx->output_plugin_private to store it. + Shutdown Callback + The optional shutdown_cb callback is called whenever a formerly active replication slot is not used anymore and can be used to deallocate resources private to the output plugin. The slot isn't necessarily being dropped, streaming is just being stopped. - + typedef void (*LogicalDecodeShutdownCB) ( struct LogicalDecodingContext *ctx ); - + - + + Transaction Begin Callback + The required begin_cb callback is called whenever a - start of a commited transaction has been decoded. Aborted transactions + start of a committed transaction has been decoded. Aborted transactions and their contents never get decoded. - + typedef void (*LogicalDecodeBeginCB) ( struct LogicalDecodingContext *, ReorderBufferTXN *txn ); - + The txn parameter contains meta information about - the transaction, like the timestamp at which it has been committed and + the transaction, like the time stamp at which it has been committed and its XID. - + + Transaction End Callback + The required commit_cb callback is called whenever a transaction commit has been decoded. The change_cb callbacks for all modified rows will have been called before this, if there have been any modified rows. - + typedef void (*LogicalDecodeCommitCB) ( struct LogicalDecodingContext *, ReorderBufferTXN *txn ); - + + - Callback called for each individual change in a - transaction + Change Callback + The required change_cb callback is called for every individual row modification inside a transaction, may it be - an INSERT, UPDATE + an INSERT, UPDATE, or DELETE. Even if the original command modified - several rows at once the callback will be called indvidually for each + several rows at once the callback will be called individually for each row. - + typedef void (*LogicalDecodeChangeCB) ( struct LogicalDecodingContext *ctx, ReorderBufferTXN *txn, Relation relation, ReorderBufferChange *change ); - + The ctx and txn parameters have the same contents as for the begin_cb and commit_cb callbacks, but additionally the @@ -486,6 +559,7 @@ typedef void (*LogicalDecodeChangeCB) ( change describing the row modification are passed in. + Only changes in user defined tables that are not unlogged @@ -496,33 +570,38 @@ typedef void (*LogicalDecodeChangeCB) ( + - Functions for producing output from an output plugin + Functions for Producing Output + To actually produce output, output plugins can write data to the StringInfo output buffer in ctx->out when inside - the begin_cb, commit_cb + the begin_cb, commit_cb, or change_cb callbacks. Before writing to the output - buffer OutputPluginPrepareWrite(ctx, last_write) has + buffer, OutputPluginPrepareWrite(ctx, last_write) has to be called, and after finishing writing to the - buffer OutputPluginWrite(ctx, last_write) has to be + buffer, OutputPluginWrite(ctx, last_write) has to be called to perform the write. The last_write indicates whether a particular write was the callback's last write. + The following example shows how to output data to the consumer of an output plugin: - + OutputPluginPrepareWrite(ctx, true); appendStringInfo(ctx->out, "BEGIN %u", txn->xid); OutputPluginWrite(ctx, true); - + + Logical Decoding Output Writers + It is possible to add more output methods for logical decoding. For details, see @@ -532,19 +611,22 @@ OutputPluginWrite(ctx, true); (see ). + - Synchronous replication support for Logical Decoding + Synchronous Replication Support for Logical Decoding + - Logical decoding may be used to to build + Logical decoding can be used to to build synchronous replication solutions with the same user interface as synchronous replication for streaming - replication. To do this, the walsender interface + replication. To do this, the streaming replication interface (see ) must be used to stream out data. Clients have to send Standby status update (F) (see ) messages, just like streaming replication clients do. + A synchronous replica receiving changes via logical decoding will work in diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml index d3fcb82ef751b..1babd0ec4a93f 100644 --- a/doc/src/sgml/maintenance.sgml +++ b/doc/src/sgml/maintenance.sgml @@ -528,7 +528,7 @@ the relfrozenxid column of a table's pg_class row contains the freeze cutoff XID that was used by the last whole-table VACUUM for that table. All rows - inserted by transactions with XIDs XIDs older than this cutoff XID are + inserted by transactions with XIDs older than this cutoff XID are guaranteed to have been frozen. Similarly, the datfrozenxid column of a database's pg_database row is a lower bound on the unfrozen XIDs @@ -628,6 +628,9 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. Like transaction IDs, multixact IDs are implemented as a 32-bit counter and corresponding storage, all of which requires careful aging management, storage cleanup, and wraparound handling. + There is a separate storage area which holds the list of members in + each multixact, which also uses a 32-bit counter and which must also + be managed. @@ -650,8 +653,12 @@ HINT: Stop the postmaster and vacuum that database in single-user mode. As a safety device, a whole-table vacuum scan will occur for any table whose multixact-age is greater than - . - This will occur even if autovacuum is nominally disabled. + . Whole-table + vacuum scans will also occur progressively for all tables, starting with + those that have the oldest multixact-age, if the amount of used member + storage space exceeds the amount 50% of the addressible storage space. + Both of these kinds of whole-table scans will occur even if autovacuum is + nominally disabled. @@ -787,10 +794,13 @@ analyze threshold = analyze base threshold + analyze scale factor * number of tu - When multiple workers are running, the cost limit is + When multiple workers are running, the cost delay parameters are balanced among all the running workers, so that the - total impact on the system is the same, regardless of the number - of workers actually running. + total I/O impact on the system is the same regardless of the number + of workers actually running. However, any workers processing tables whose + autovacuum_vacuum_cost_delay or + autovacuum_vacuum_cost_limit have been set are not considered + in the balancing algorithm. diff --git a/doc/src/sgml/monitoring.sgml b/doc/src/sgml/monitoring.sgml index 69d99e7c75ff5..1fa3e10c11bf9 100644 --- a/doc/src/sgml/monitoring.sgml +++ b/doc/src/sgml/monitoring.sgml @@ -631,7 +631,7 @@ postgres: user database host backend_xid xid - Toplevel transaction identifier of this backend, if any. + Top-level transaction identifier of this backend, if any. backend_xmin @@ -1907,7 +1907,7 @@ SELECT pg_stat_get_backend_pid(s.backendid) AS pid, Currently, the DTrace utility is supported, which, at the time of this writing, is available - on Solaris, Mac OS X, FreeBSD, NetBSD, and Oracle Linux. The + on Solaris, OS X, FreeBSD, NetBSD, and Oracle Linux. The SystemTap project for Linux provides a DTrace equivalent and can also be used. Supporting other dynamic tracing utilities is theoretically possible by changing the definitions for diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml index 12b7814bfd987..78a6a15ea737e 100644 --- a/doc/src/sgml/mvcc.sgml +++ b/doc/src/sgml/mvcc.sgml @@ -708,20 +708,6 @@ ERROR: could not serialize access due to read/write dependencies among transact - - - - Support for the Serializable transaction isolation level has not yet - been added to Hot Standby replication targets (described in - ). The strictest isolation level currently - supported in hot standby mode is Repeatable Read. While performing all - permanent database writes within Serializable transactions on the - master will ensure that all standbys will eventually reach a consistent - state, a Repeatable Read transaction run on the standby can sometimes - see a transient state which is inconsistent with any serial execution - of serializable transactions on the master. - - @@ -955,7 +941,9 @@ ERROR: could not serialize access due to read/write dependencies among transact Acquired by the DROP TABLE, TRUNCATE, REINDEX, - CLUSTER, and VACUUM FULL + CLUSTER, VACUUM FULL, + and REFRESH MATERIALIZED VIEW (without + ) commands. Many forms of ALTER TABLE also acquire a lock at this level (see ). This is also the default lock mode for LOCK TABLE @@ -1106,30 +1094,108 @@ ERROR: could not serialize access due to read/write dependencies among transact In addition to table-level locks, there are row-level locks, which - can be exclusive or shared locks. An exclusive row-level lock on a - specific row is automatically acquired when the row is updated or - deleted. The lock is held until the transaction commits or rolls - back, just like table-level locks. Row-level locks do - not affect data querying; they block only writers to the same - row. + are listed as below with the contexts in which they are used + automatically by PostgreSQL. See + for a complete table of + row-level lock conflicts. Note that a transaction can hold + conflicting locks on the same row, even in different subtransactions; + but other than that, two transactions can never hold conflicting locks + on the same row. Row-level locks do not affect data querying; they + block only writers and lockers to the same row. - - To acquire an exclusive row-level lock on a row without actually - modifying the row, select the row with SELECT FOR - UPDATE. Note that once the row-level lock is acquired, - the transaction can update the row multiple times without - fear of conflicts. - + + Row-level Lock Modes + + + FOR UPDATE + + + + FOR UPDATE causes the rows retrieved by the + SELECT statement to be locked as though for + update. This prevents them from being locked, modified or deleted by + other transactions until the current transaction ends. That is, + other transactions that attempt UPDATE, + DELETE, + SELECT FOR UPDATE, + SELECT FOR NO KEY UPDATE, + SELECT FOR SHARE or + SELECT FOR KEY SHARE + of these rows will be blocked until the current transaction ends; + conversely, SELECT FOR UPDATE will wait for a + concurrent transaction that has run any of those commands on the + same row, + and will then lock and return the updated row (or no row, if the + row was deleted). Within a REPEATABLE READ or + SERIALIZABLE transaction, + however, an error will be thrown if a row to be locked has changed + since the transaction started. For further discussion see + . + + + The FOR UPDATE lock mode + is also acquired by any DELETE on a row, and also by an + UPDATE that modifies the values on certain columns. Currently, + the set of columns considered for the UPDATE case are those that + have a unique index on them that can be used in a foreign key (so partial + indexes and expressional indexes are not considered), but this may change + in the future. + + + - - To acquire a shared row-level lock on a row, select the row with - SELECT FOR SHARE. A shared lock does not prevent - other transactions from acquiring the same shared lock. However, - no transaction is allowed to update, delete, or exclusively lock a - row on which any other transaction holds a shared lock. Any attempt - to do so will block until the shared lock(s) have been released. - + + + FOR NO KEY UPDATE + + + + Behaves similarly to FOR UPDATE, except that the lock + acquired is weaker: this lock will not block + SELECT FOR KEY SHARE commands that attempt to acquire + a lock on the same rows. This lock mode is also acquired by any + UPDATE that does not acquire a FOR UPDATE lock. + + + + + + + FOR SHARE + + + + Behaves similarly to FOR NO KEY UPDATE, except that it + acquires a shared lock rather than exclusive lock on each retrieved + row. A shared lock blocks other transactions from performing + UPDATE, DELETE, + SELECT FOR UPDATE or + SELECT FOR NO KEY UPDATE on these rows, but it does not + prevent them from performing SELECT FOR SHARE or + SELECT FOR KEY SHARE. + + + + + + + FOR KEY SHARE + + + + Behaves similarly to FOR SHARE, except that the + lock is weaker: SELECT FOR UPDATE is blocked, but not + SELECT FOR NO KEY UPDATE. A key-shared lock blocks + other transactions from performing DELETE or + any UPDATE that changes the key values, but not + other UPDATE, and neither does it prevent + SELECT FOR NO KEY UPDATE, SELECT FOR SHARE, + or SELECT FOR KEY SHARE. + + + + PostgreSQL doesn't remember any @@ -1140,6 +1206,61 @@ ERROR: could not serialize access due to read/write dependencies among transact will result in disk writes. + + Conflicting Row-level Locks + + + + + + + Requested Lock Mode + Current Lock Mode + + + FOR KEY SHARE + FOR SHARE + FOR NO KEY UPDATE + FOR UPDATE + + + + + FOR KEY SHARE + + + + X + + + FOR SHARE + + + X + X + + + FOR NO KEY UPDATE + + X + X + X + + + FOR UPDATE + X + X + X + X + + + +
+ + + + Page-level Locks + In addition to table and row locks, page-level share/exclusive locks are used to control read/write access to table pages in the shared buffer @@ -1254,7 +1375,7 @@ UPDATE accounts SET balance = balance - 100.00 WHERE acctnum = 22222; correctly. Advisory locks can be useful for locking strategies that are an awkward fit for the MVCC model. For example, a common use of advisory locks is to emulate pessimistic - locking strategies typical of so called flat file data + locking strategies typical of so-called flat file data management systems. While a flag stored in a table could be used for the same purpose, advisory locks are faster, avoid table bloat, and are automatically @@ -1408,7 +1529,7 @@ SELECT pg_advisory_lock(q.id) FROM This level of integrity protection using Serializable transactions does not yet extend to hot standby mode (). Because of that, those using hot standby may want to use Repeatable - Read and explicit locking.on the master. + Read and explicit locking on the master. @@ -1485,6 +1606,38 @@ SELECT pg_advisory_lock(q.id) FROM + + Caveats + + + Some DDL commands, currently only and the + table-rewriting forms of , are not + MVCC-safe. This means that after the truncation or rewrite commits, the + table will appear empty to concurrent transactions, if they are using a + snapshot taken before the DDL command committed. This will only be an + issue for a transaction that did not access the table in question + before the DDL command started — any transaction that has done so + would hold at least an ACCESS SHARE table lock, + which would block the DDL command until that transaction completes. + So these commands will not cause any apparent inconsistency in the + table contents for successive queries on the target table, but they + could cause visible inconsistency between the contents of the target + table and other tables in the database. + + + + Support for the Serializable transaction isolation level has not yet + been added to Hot Standby replication targets (described in + ). The strictest isolation level currently + supported in hot standby mode is Repeatable Read. While performing all + permanent database writes within Serializable transactions on the + master will ensure that all standbys will eventually reach a consistent + state, a Repeatable Read transaction run on the standby can sometimes + see a transient state that is inconsistent with any serial execution + of the transactions on the master. + + + Locking and Indexes diff --git a/doc/src/sgml/pageinspect.sgml b/doc/src/sgml/pageinspect.sgml index 9232183896327..191fb156c13bb 100644 --- a/doc/src/sgml/pageinspect.sgml +++ b/doc/src/sgml/pageinspect.sgml @@ -28,11 +28,12 @@ get_raw_page reads the specified block of the named - table and returns a copy as a bytea value. This allows a + relation and returns a copy as a bytea value. This allows a single time-consistent copy of the block to be obtained. fork should be 'main' for - the main data fork, or 'fsm' for the free space map, - or 'vm' for the visibility map. + the main data fork, 'fsm' for the free space map, + 'vm' for the visibility map, or 'init' + for the initialization fork. diff --git a/doc/src/sgml/perform.sgml b/doc/src/sgml/perform.sgml index 657575c07ad6f..5a087fbe6a098 100644 --- a/doc/src/sgml/perform.sgml +++ b/doc/src/sgml/perform.sgml @@ -89,7 +89,6 @@ EXPLAIN SELECT * FROM tenk1; QUERY PLAN ------------------------------------------------------------- Seq Scan on tenk1 (cost=0.00..458.00 rows=10000 width=244) - Planning time: 0.113 ms
@@ -162,12 +161,6 @@ EXPLAIN SELECT * FROM tenk1; actually returned, updated, or deleted by the query. - - The Planning time shown is the time it took to generate - the query plan from the parsed query and optimize it. It does not include - rewriting and parsing. - - Returning to our example: @@ -177,7 +170,6 @@ EXPLAIN SELECT * FROM tenk1; QUERY PLAN ------------------------------------------------------------- Seq Scan on tenk1 (cost=0.00..458.00 rows=10000 width=244) - Planning time: 0.113 ms @@ -206,7 +198,6 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 7000; ------------------------------------------------------------ Seq Scan on tenk1 (cost=0.00..483.00 rows=7001 width=244) Filter: (unique1 < 7000) - Planning time: 0.104 ms Notice that the EXPLAIN output shows the WHERE @@ -243,7 +234,6 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100; Recheck Cond: (unique1 < 100) -> Bitmap Index Scan on tenk1_unique1 (cost=0.00..5.04 rows=101 width=0) Index Cond: (unique1 < 100) - Planning time: 0.093 ms Here the planner has decided to use a two-step plan: the child plan @@ -272,7 +262,6 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND stringu1 = 'xxx'; Filter: (stringu1 = 'xxx'::name) -> Bitmap Index Scan on tenk1_unique1 (cost=0.00..5.04 rows=101 width=0) Index Cond: (unique1 < 100) - Planning time: 0.089 ms The added condition stringu1 = 'xxx' reduces the @@ -294,7 +283,6 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 = 42; ----------------------------------------------------------------------------- Index Scan using tenk1_unique1 on tenk1 (cost=0.29..8.30 rows=1 width=244) Index Cond: (unique1 = 42) - Planning time: 0.076 ms In this type of plan the table rows are fetched in index order, which @@ -323,7 +311,6 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000; Index Cond: (unique1 < 100) -> Bitmap Index Scan on tenk1_unique2 (cost=0.00..19.78 rows=999 width=0) Index Cond: (unique2 > 9000) - Planning time: 0.094 ms But this requires visiting both indexes, so it's not necessarily a win @@ -344,7 +331,6 @@ EXPLAIN SELECT * FROM tenk1 WHERE unique1 < 100 AND unique2 > 9000 LIMIT 2 -> Index Scan using tenk1_unique2 on tenk1 (cost=0.29..71.27 rows=10 width=244) Index Cond: (unique2 > 9000) Filter: (unique1 < 100) - Planning time: 0.087 ms @@ -378,7 +364,6 @@ WHERE t1.unique1 < 10 AND t1.unique2 = t2.unique2; Index Cond: (unique1 < 10) -> Index Scan using tenk2_unique2 on tenk2 t2 (cost=0.29..7.91 rows=1 width=244) Index Cond: (unique2 = t1.unique2) - Planning time: 0.117 ms @@ -430,7 +415,6 @@ WHERE t1.unique1 < 10 AND t2.unique2 < 10 AND t1.hundred < t2.hundred; -> Materialize (cost=0.29..8.51 rows=10 width=244) -> Index Scan using tenk2_unique2 on tenk2 t2 (cost=0.29..8.46 rows=10 width=244) Index Cond: (unique2 < 10) - Planning time: 0.119 ms The condition t1.hundred < t2.hundred can't be @@ -442,7 +426,7 @@ WHERE t1.unique1 < 10 AND t2.unique2 < 10 AND t1.hundred < t2.hundred; Notice that here the planner has chosen to materialize the inner relation of the join, by putting a Materialize plan node atop it. This - means that the t2 indexscan will be done just once, even + means that the t2 index scan will be done just once, even though the nested-loop join node needs to read that data ten times, once for each row from the outer relation. The Materialize node saves the data in memory as it's read, and then returns the data from memory on each @@ -478,7 +462,6 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; Recheck Cond: (unique1 < 100) -> Bitmap Index Scan on tenk1_unique1 (cost=0.00..5.04 rows=101 width=0) Index Cond: (unique1 < 100) - Planning time: 0.182 ms @@ -509,7 +492,6 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; -> Sort (cost=197.83..200.33 rows=1000 width=244) Sort Key: t2.unique2 -> Seq Scan on onek t2 (cost=0.00..148.00 rows=1000 width=244) - Planning time: 0.195 ms @@ -546,7 +528,6 @@ WHERE t1.unique1 < 100 AND t1.unique2 = t2.unique2; -> Index Scan using tenk1_unique2 on tenk1 t1 (cost=0.29..656.28 rows=101 width=244) Filter: (unique1 < 100) -> Index Scan using onek_unique2 on onek t2 (cost=0.28..224.79 rows=1000 width=244) - Planning time: 0.176 ms which shows that the planner thinks that sorting onek by @@ -780,6 +761,12 @@ ROLLBACK; decisions. + + The Planning time shown by EXPLAIN + ANALYZE is the time it took to generate the query plan from the + parsed query and optimize it. It does not include parsing or rewriting. + + The Execution time shown by EXPLAIN ANALYZE includes executor start-up and shut-down time, as well diff --git a/doc/src/sgml/pgarchivecleanup.sgml b/doc/src/sgml/pgarchivecleanup.sgml index fdf0cbb9d1be5..314132a7613a9 100644 --- a/doc/src/sgml/pgarchivecleanup.sgml +++ b/doc/src/sgml/pgarchivecleanup.sgml @@ -128,11 +128,6 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" .gz. - - Note that the - .backup file name passed to the program should not - include the extension. - diff --git a/doc/src/sgml/pgbench.sgml b/doc/src/sgml/pgbench.sgml index 4367563a3796f..4187fc1a0e3d7 100644 --- a/doc/src/sgml/pgbench.sgml +++ b/doc/src/sgml/pgbench.sgml @@ -396,27 +396,29 @@ pgbench options dbname - - + sec + sec - Report the average per-statement latency (execution time from the - perspective of the client) of each command after the benchmark - finishes. See below for details. + Show progress report every sec seconds. The report + includes the time since the beginning of the run, the tps since the + last report, and the transaction latency average and standard + deviation since the last report. Under throttling ( - sec - sec + + - Show progress report every sec seconds. The report - includes the time since the beginning of the run, the tps since the - last report, and the transaction latency average and standard - deviation since the last report. Under throttling ( @@ -440,29 +442,25 @@ pgbench options dbname possible for later ones to catch up again. - When throttling is active, the average and maximum transaction - schedule lag time are reported in ms. This is the delay between - the original scheduled transaction time and the actual transaction - start times. The schedule lag shows whether a transaction was - started on time or late. Once a client starts running behind its - schedule, every following transaction can continue to be penalized - for schedule lag. If faster transactions are able to catch up, it's - possible for them to get back on schedule again. The lag measurement - of every transaction is shown when pgbench is run with debugging - output. + When throttling is active, the transaction latency reported at the + end of the run is calculated from the scheduled start times, so it + includes the time each transaction had to wait for the previous + transaction to finish. The wait time is called the schedule lag time, + and its average and maximum are also reported separately. The + transaction latency with respect to the actual transaction start time, + i.e. the time spent executing the transaction in the database, can be + computed by subtracting the schedule lag time from the reported + latency. + - High rate limit schedule lag values, that is lag values that are large - compared to the actual transaction latency, indicate that something is - amiss in the throttling process. High schedule lag can highlight a subtle - problem there even if the target rate limit is met in the end. One - possible cause of schedule lag is insufficient pgbench threads to - handle all of the clients. To improve that, consider reducing the - number of clients, increasing the number of threads in pgbench, or - running pgbench on a separate host. Another possibility is that the - database is not keeping up with the load at some point. When that - happens, you will have to reduce the expected transaction rate to - lower schedule lag. + A high schedule lag time is an indication that the system cannot + process transactions at the specified rate, with the chosen number of + clients and threads. When the average transaction execution time is + longer than the scheduled interval between each transaction, each + successive transaction will fall further behind, and the schedule lag + time will keep increasing the longer the test run is. When that + happens, you will have to reduce the specified transaction rate. @@ -888,7 +886,7 @@ END; The format of the log is: -client_id transaction_no time file_no time_epoch time_us +client_id transaction_no time file_no time_epoch time_us [schedule_lag] where time is the total elapsed transaction time in microseconds, @@ -896,9 +894,12 @@ END; (useful when multiple scripts were specified with