From 31380b900ee427e86ffff2d873c3fe0f4417f8c9 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 30 Jun 2015 18:47:32 -0400 Subject: [PATCH 001/442] Fix broken link in documentation. HP's web server has apparently become case-sensitive sometime recently. Per bug #13479 from Daniel Abraham. Corrected link identified by Alvaro. --- doc/src/sgml/runtime.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index dacd3e1dfefff..547567e9ca458 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -2105,7 +2105,7 @@ pg_dumpall -p 5432 | psql -d postgres -p 5433 are also checked if the parameter is set. (See + url="http://h71000.www7.hp.com/doc/83final/ba554_90007/ch04s02.html"> for diagrams showing SSL certificate usage.) From 6cfb6d987419ce1e7bec0cf3ad22830ed3c2dc08 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Wed, 1 Jul 2015 10:54:47 +0900 Subject: [PATCH 002/442] Make XLogFileCopy() look the same as in 9.4. XLogFileCopy() was changed heavily in commit de76884. However it was partially reverted in commit 7abc685 and most of those changes to XLogFileCopy() were no longer needed. Then commit 7cbee7c removed those unnecessary code, but XLogFileCopy() looked different in master and 9.4 though the contents are almost the same. This patch makes XLogFileCopy() look the same in master and back-branches, which makes back-patching easier, per discussion on pgsql-hackers. Back-patch to 9.5. Discussion: 55760844.7090703@iki.fi Michael Paquier --- src/backend/access/transam/xlog.c | 60 ++++++++++++++++--------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 0def47d6ed5bb..bb7cd9f775c9c 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -808,7 +808,7 @@ static bool XLogCheckpointNeeded(XLogSegNo new_segno); static void XLogWrite(XLogwrtRqst WriteRqst, bool flexible); static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, bool find_free, XLogSegNo max_segno, - bool use_lock, int elevel); + bool use_lock); static int XLogFileRead(XLogSegNo segno, int emode, TimeLineID tli, int source, bool notexistOk); static int XLogFileReadAnyTLI(XLogSegNo segno, int emode, int source); @@ -3013,7 +3013,7 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) max_segno = logsegno + CheckPointSegments; if (!InstallXLogFileSegment(&installed_segno, tmppath, *use_existent, max_segno, - use_lock, LOG)) + use_lock)) { /* * No need for any more future segments, or InstallXLogFileSegment() @@ -3040,20 +3040,25 @@ XLogFileInit(XLogSegNo logsegno, bool *use_existent, bool use_lock) } /* - * Copy a WAL segment file in pg_xlog directory. + * Create a new XLOG file segment by copying a pre-existing one. * - * srcfname source filename - * upto how much of the source file to copy? (the rest is filled with - * zeros) - * segno identify segment to install. + * destsegno: identify segment to be created. * - * The file is first copied with a temporary filename, and then installed as - * a newly-created segment. + * srcTLI, srclog, srcseg: identify segment to be copied (could be from + * a different timeline) + * + * upto: how much of the source file to copy (the rest is filled with + * zeros) + * + * Currently this is only used during recovery, and so there are no locking + * considerations. But we should be just as tense as XLogFileInit to avoid + * emplacing a bogus file. */ static void -XLogFileCopy(char *srcfname, int upto, XLogSegNo segno) +XLogFileCopy(XLogSegNo destsegno, TimeLineID srcTLI, XLogSegNo srcsegno, + int upto) { - char srcpath[MAXPGPATH]; + char path[MAXPGPATH]; char tmppath[MAXPGPATH]; char buffer[XLOG_BLCKSZ]; int srcfd; @@ -3063,12 +3068,12 @@ XLogFileCopy(char *srcfname, int upto, XLogSegNo segno) /* * Open the source file */ - snprintf(srcpath, MAXPGPATH, XLOGDIR "/%s", srcfname); - srcfd = OpenTransientFile(srcpath, O_RDONLY | PG_BINARY, 0); + XLogFilePath(path, srcTLI, srcsegno); + srcfd = OpenTransientFile(path, O_RDONLY | PG_BINARY, 0); if (srcfd < 0) ereport(ERROR, (errcode_for_file_access(), - errmsg("could not open file \"%s\": %m", srcpath))); + errmsg("could not open file \"%s\": %m", path))); /* * Copy into a temp file name. @@ -3112,11 +3117,11 @@ XLogFileCopy(char *srcfname, int upto, XLogSegNo segno) ereport(ERROR, (errcode_for_file_access(), errmsg("could not read file \"%s\": %m", - srcpath))); + path))); else ereport(ERROR, (errmsg("not enough data in file \"%s\"", - srcpath))); + path))); } } errno = 0; @@ -3149,9 +3154,11 @@ XLogFileCopy(char *srcfname, int upto, XLogSegNo segno) CloseTransientFile(srcfd); - /* install the new file */ - (void) InstallXLogFileSegment(&segno, tmppath, false, - 0, false, ERROR); + /* + * Now move the segment into place with its final name. + */ + if (!InstallXLogFileSegment(&destsegno, tmppath, false, 0, false)) + elog(ERROR, "InstallXLogFileSegment should not have failed"); } /* @@ -3178,8 +3185,6 @@ XLogFileCopy(char *srcfname, int upto, XLogSegNo segno) * place. This should be TRUE except during bootstrap log creation. The * caller must *not* hold the lock at call. * - * elevel: log level used by this routine. - * * Returns TRUE if the file was installed successfully. FALSE indicates that * max_segno limit was exceeded, or an error occurred while renaming the * file into place. @@ -3187,7 +3192,7 @@ XLogFileCopy(char *srcfname, int upto, XLogSegNo segno) static bool InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, bool find_free, XLogSegNo max_segno, - bool use_lock, int elevel) + bool use_lock) { char path[MAXPGPATH]; struct stat stat_buf; @@ -3232,7 +3237,7 @@ InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, { if (use_lock) LWLockRelease(ControlFileLock); - ereport(elevel, + ereport(LOG, (errcode_for_file_access(), errmsg("could not link file \"%s\" to \"%s\" (initialization of log file): %m", tmppath, path))); @@ -3244,7 +3249,7 @@ InstallXLogFileSegment(XLogSegNo *segno, char *tmppath, { if (use_lock) LWLockRelease(ControlFileLock); - ereport(elevel, + ereport(LOG, (errcode_for_file_access(), errmsg("could not rename file \"%s\" to \"%s\" (initialization of log file): %m", tmppath, path))); @@ -3733,7 +3738,7 @@ RemoveXlogFile(const char *segname, XLogRecPtr PriorRedoPtr, XLogRecPtr endptr) if (endlogSegNo <= recycleSegNo && lstat(path, &statbuf) == 0 && S_ISREG(statbuf.st_mode) && InstallXLogFileSegment(&endlogSegNo, path, - true, recycleSegNo, true, LOG)) + true, recycleSegNo, true)) { ereport(DEBUG2, (errmsg("recycled transaction log file \"%s\"", @@ -5212,8 +5217,6 @@ exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog) */ if (endLogSegNo == startLogSegNo) { - XLogFileName(xlogfname, endTLI, endLogSegNo); - /* * Make a copy of the file on the new timeline. * @@ -5221,7 +5224,8 @@ exitArchiveRecovery(TimeLineID endTLI, XLogRecPtr endOfLog) * considerations. But we should be just as tense as XLogFileInit to * avoid emplacing a bogus file. */ - XLogFileCopy(xlogfname, endOfLog % XLOG_SEG_SIZE, endLogSegNo); + XLogFileCopy(endLogSegNo, endTLI, endLogSegNo, + endOfLog % XLOG_SEG_SIZE); } else { From cd7030ff085f5c378e837b392cb719cf23df9d0b Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 1 Jul 2015 18:07:48 -0400 Subject: [PATCH 003/442] Make sampler_random_fract() actually obey its API contract. This function is documented to return a value in the range (0,1), which is what its predecessor anl_random_fract() did. However, the new version depends on pg_erand48() which returns a value in [0,1). The possibility of returning zero creates hazards of division by zero or trying to compute log(0) at some call sites, and it might well break third-party modules using anl_random_fract() too. So let's change it to never return zero. Spotted by Coverity. Michael Paquier, cosmetically adjusted by me --- src/backend/utils/misc/sampling.c | 9 ++++++++- 1 file changed, 8 insertions(+), 1 deletion(-) diff --git a/src/backend/utils/misc/sampling.c b/src/backend/utils/misc/sampling.c index aaf1d6c4108b4..6191f7973441b 100644 --- a/src/backend/utils/misc/sampling.c +++ b/src/backend/utils/misc/sampling.c @@ -237,7 +237,14 @@ sampler_random_init_state(long seed, SamplerRandomState randstate) double sampler_random_fract(SamplerRandomState randstate) { - return pg_erand48(randstate); + double res; + + /* pg_erand48 returns a value in [0.0 - 1.0), so we must reject 0 */ + do + { + res = pg_erand48(randstate); + } while (res == 0.0); + return res; } From 163e29dc380137127cf7e9c23b1596b78ad0ce81 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Thu, 2 Jul 2015 10:35:38 +0900 Subject: [PATCH 004/442] Make use of xlog_internal.h's macros in WAL-related utilities. Commit 179cdd09 added macros to check if a filename is a WAL segment or other such file. However there were still some instances of the strlen + strspn combination to check for that in WAL-related utilities like pg_archivecleanup. Those checks can be replaced with the macros. This patch makes use of the macros in those utilities and which would make the code a bit easier to read. Back-patch to 9.5. Michael Paquier --- contrib/pg_standby/pg_standby.c | 24 ++++++------------- src/bin/pg_archivecleanup/pg_archivecleanup.c | 22 +++++++---------- src/bin/pg_resetxlog/pg_resetxlog.c | 11 ++++----- src/include/access/xlog_internal.h | 19 ++++++++++----- 4 files changed, 33 insertions(+), 43 deletions(-) diff --git a/contrib/pg_standby/pg_standby.c b/contrib/pg_standby/pg_standby.c index 2f9f2b4d2e920..861caea348106 100644 --- a/contrib/pg_standby/pg_standby.c +++ b/contrib/pg_standby/pg_standby.c @@ -32,6 +32,8 @@ #include "pg_getopt.h" +#include "access/xlog_internal.h" + const char *progname; /* Options and defaults */ @@ -57,7 +59,7 @@ char *restartWALFileName; /* the file from which we can restart restore */ char *priorWALFileName; /* the file we need to get from archive */ char WALFilePath[MAXPGPATH]; /* the file path including archive */ char restoreCommand[MAXPGPATH]; /* run this to restore */ -char exclusiveCleanupFileName[MAXPGPATH]; /* the file we need to +char exclusiveCleanupFileName[MAXFNAMELEN]; /* the file we need to * get from archive */ /* @@ -113,11 +115,6 @@ struct stat stat_buf; * folded in to later versions of this program. */ -#define XLOG_DATA_FNAME_LEN 24 -/* Reworked from access/xlog_internal.h */ -#define XLogFileName(fname, tli, log, seg) \ - snprintf(fname, XLOG_DATA_FNAME_LEN + 1, "%08X%08X%08X", tli, log, seg) - /* * Initialize allows customized commands into the warm standby program. * @@ -182,10 +179,7 @@ CustomizableNextWALFileReady() * If it's a backup file, return immediately. If it's a regular file * return only if it's the right size already. */ - if (strlen(nextWALFileName) > 24 && - strspn(nextWALFileName, "0123456789ABCDEF") == 24 && - strcmp(nextWALFileName + strlen(nextWALFileName) - strlen(".backup"), - ".backup") == 0) + if (IsBackupHistoryFileName(nextWALFileName)) { nextWALFileType = XLOG_BACKUP_LABEL; return true; @@ -261,8 +255,7 @@ CustomizableCleanupPriorWALFiles(void) * are not removed in the order they were originally written, * in case this worries you. */ - if (strlen(xlde->d_name) == XLOG_DATA_FNAME_LEN && - strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN && + if (IsXLogFileName(xlde->d_name) && strcmp(xlde->d_name + 8, exclusiveCleanupFileName + 8) < 0) { #ifdef WIN32 @@ -366,7 +359,7 @@ SetWALFileNameForCleanup(void) } } - XLogFileName(exclusiveCleanupFileName, tli, log, seg); + XLogFileNameById(exclusiveCleanupFileName, tli, log, seg); return cleanup; } @@ -740,10 +733,7 @@ main(int argc, char **argv) * Check for initial history file: always the first file to be requested * It's OK if the file isn't there - all other files need to wait */ - if (strlen(nextWALFileName) > 8 && - strspn(nextWALFileName, "0123456789ABCDEF") == 8 && - strcmp(nextWALFileName + strlen(nextWALFileName) - strlen(".history"), - ".history") == 0) + if (IsTLHistoryFileName(nextWALFileName)) { nextWALFileType = XLOG_HISTORY; if (RestoreWALFileForRecovery()) diff --git a/src/bin/pg_archivecleanup/pg_archivecleanup.c b/src/bin/pg_archivecleanup/pg_archivecleanup.c index ba6e242f15569..579a9bb84307e 100644 --- a/src/bin/pg_archivecleanup/pg_archivecleanup.c +++ b/src/bin/pg_archivecleanup/pg_archivecleanup.c @@ -21,6 +21,8 @@ #include "pg_getopt.h" +#include "access/xlog_internal.h" + const char *progname; /* Options and defaults */ @@ -31,7 +33,7 @@ char *additional_ext = NULL; /* Extension to remove from filenames */ char *archiveLocation; /* where to find the archive? */ char *restartWALFileName; /* the file from which we can restart restore */ char WALFilePath[MAXPGPATH]; /* the file path including archive */ -char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we +char exclusiveCleanupFileName[MAXFNAMELEN]; /* the oldest file we * want to remain in * archive */ @@ -51,12 +53,6 @@ char exclusiveCleanupFileName[MAXPGPATH]; /* the oldest file we * folded in to later versions of this program. */ -#define XLOG_DATA_FNAME_LEN 24 -/* Reworked from access/xlog_internal.h */ -#define XLogFileName(fname, tli, log, seg) \ - snprintf(fname, XLOG_DATA_FNAME_LEN + 1, "%08X%08X%08X", tli, log, seg) -#define XLOG_BACKUP_FNAME_LEN 40 - /* * Initialize allows customized commands into the archive cleanup program. * @@ -110,7 +106,7 @@ CleanupPriorWALFiles(void) { /* * Truncation is essentially harmless, because we skip names of - * length other than XLOG_DATA_FNAME_LEN. (In principle, one + * length other than XLOG_FNAME_LEN. (In principle, one * could use a 1000-character additional_ext and get trouble.) */ strlcpy(walfile, xlde->d_name, MAXPGPATH); @@ -129,8 +125,7 @@ CleanupPriorWALFiles(void) * file. Note that this means files are not removed in the order * they were originally written, in case this worries you. */ - if (strlen(walfile) == XLOG_DATA_FNAME_LEN && - strspn(walfile, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN && + if (IsXLogFileName(walfile) && strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0) { /* @@ -202,13 +197,12 @@ SetWALFileNameForCleanup(void) * 000000010000000000000010.00000020.backup is after * 000000010000000000000010. */ - if (strlen(restartWALFileName) == XLOG_DATA_FNAME_LEN && - strspn(restartWALFileName, "0123456789ABCDEF") == XLOG_DATA_FNAME_LEN) + if (IsXLogFileName(restartWALFileName)) { strcpy(exclusiveCleanupFileName, restartWALFileName); fnameOK = true; } - else if (strlen(restartWALFileName) == XLOG_BACKUP_FNAME_LEN) + else if (IsBackupHistoryFileName(restartWALFileName)) { int args; uint32 tli = 1, @@ -225,7 +219,7 @@ SetWALFileNameForCleanup(void) * Use just the prefix of the filename, ignore everything after * first period */ - XLogFileName(exclusiveCleanupFileName, tli, log, seg); + XLogFileNameById(exclusiveCleanupFileName, tli, log, seg); } } diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c index 6ffe795348d1e..e19a72b4c1640 100644 --- a/src/bin/pg_resetxlog/pg_resetxlog.c +++ b/src/bin/pg_resetxlog/pg_resetxlog.c @@ -261,7 +261,7 @@ main(int argc, char *argv[]) break; case 'l': - if (strspn(optarg, "01234567890ABCDEFabcdef") != 24) + if (strspn(optarg, "01234567890ABCDEFabcdef") != XLOG_FNAME_LEN) { fprintf(stderr, _("%s: invalid argument for option %s\n"), progname, "-l"); fprintf(stderr, _("Try \"%s --help\" for more information.\n"), progname); @@ -976,8 +976,7 @@ KillExistingXLOG(void) while (errno = 0, (xlde = readdir(xldir)) != NULL) { - if (strlen(xlde->d_name) == 24 && - strspn(xlde->d_name, "0123456789ABCDEF") == 24) + if (IsXLogFileName(xlde->d_name)) { snprintf(path, MAXPGPATH, "%s/%s", XLOGDIR, xlde->d_name); if (unlink(path) < 0) @@ -1027,9 +1026,9 @@ KillExistingArchiveStatus(void) while (errno = 0, (xlde = readdir(xldir)) != NULL) { - if (strspn(xlde->d_name, "0123456789ABCDEF") == 24 && - (strcmp(xlde->d_name + 24, ".ready") == 0 || - strcmp(xlde->d_name + 24, ".done") == 0)) + if (strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_FNAME_LEN && + (strcmp(xlde->d_name + XLOG_FNAME_LEN, ".ready") == 0 || + strcmp(xlde->d_name + XLOG_FNAME_LEN, ".done") == 0)) { snprintf(path, MAXPGPATH, "%s/%s", ARCHSTATDIR, xlde->d_name); if (unlink(path) < 0) diff --git a/src/include/access/xlog_internal.h b/src/include/access/xlog_internal.h index fbf9324ba4306..5ebaa5f69c6aa 100644 --- a/src/include/access/xlog_internal.h +++ b/src/include/access/xlog_internal.h @@ -137,13 +137,20 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; */ #define MAXFNAMELEN 64 +/* Length of XLog file name */ +#define XLOG_FNAME_LEN 24 + #define XLogFileName(fname, tli, logSegNo) \ snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, \ (uint32) ((logSegNo) / XLogSegmentsPerXLogId), \ (uint32) ((logSegNo) % XLogSegmentsPerXLogId)) +#define XLogFileNameById(fname, tli, log, seg) \ + snprintf(fname, MAXFNAMELEN, "%08X%08X%08X", tli, log, seg) + #define IsXLogFileName(fname) \ - (strlen(fname) == 24 && strspn(fname, "0123456789ABCDEF") == 24) + (strlen(fname) == XLOG_FNAME_LEN && \ + strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN) /* * XLOG segment with .partial suffix. Used by pg_receivexlog and at end of @@ -151,9 +158,9 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; * be complete yet. */ #define IsPartialXLogFileName(fname) \ - (strlen(fname) == 24 + strlen(".partial") && \ - strspn(fname, "0123456789ABCDEF") == 24 && \ - strcmp((fname) + 24, ".partial") == 0) + (strlen(fname) == XLOG_FNAME_LEN + strlen(".partial") && \ + strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ + strcmp((fname) + XLOG_FNAME_LEN, ".partial") == 0) #define XLogFromFileName(fname, tli, logSegNo) \ do { \ @@ -188,8 +195,8 @@ typedef XLogLongPageHeaderData *XLogLongPageHeader; (uint32) ((logSegNo) % XLogSegmentsPerXLogId), offset) #define IsBackupHistoryFileName(fname) \ - (strlen(fname) > 24 && \ - strspn(fname, "0123456789ABCDEF") == 24 && \ + (strlen(fname) > XLOG_FNAME_LEN && \ + strspn(fname, "0123456789ABCDEF") == XLOG_FNAME_LEN && \ strcmp((fname) + strlen(fname) - strlen(".backup"), ".backup") == 0) #define BackupHistoryFilePath(path, tli, logSegNo, offset) \ From e1d273efde7828947e52bb531851f67f91c628c3 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Wed, 1 Jul 2015 23:28:41 -0400 Subject: [PATCH 005/442] Allow MSVC's contribcheck and modulescheck to run independently. These require a temp install to have been done, so we now make sure it is done before proceeding. Michael Paquier. --- src/tools/msvc/vcregress.pl | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index ddb628d154509..619638361575f 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -290,6 +290,7 @@ sub subdircheck sub contribcheck { + InstallTemp(); chdir "$topdir/contrib"; foreach my $module (glob("*")) { @@ -309,6 +310,7 @@ sub contribcheck sub modulescheck { + InstallTemp(); chdir "$topdir/src/test/modules"; foreach my $module (glob("*")) { From 6c29ef48811d33fece01962b3be72511f1b1014e Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 2 Jul 2015 12:11:32 +0300 Subject: [PATCH 006/442] Use American spelling for "behavior". For consistency with the rest of the docs. Michael Paquier --- doc/src/sgml/func.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index d49cd4342822f..99923f46bcaf2 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -17851,7 +17851,7 @@ postgres=# SELECT * FROM pg_xlogfile_name_offset(pg_stop_backup()); All of these functions take an optional missing_ok parameter, - which specifies the behaviour when the file or directory does not exist. + which specifies the behavior when the file or directory does not exist. If true, the function returns NULL (except pg_ls_dir, which returns an empty result set). If false, an error is raised. The default is false. From 00ccea9e9dcee7b4f103674d274fadc8b09075f7 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 2 Jul 2015 12:12:05 +0300 Subject: [PATCH 007/442] Fix name of argument to pg_stat_file. It's called "missing_ok" in the docs and in the C code. I refrained from doing a catversion bump for this, because the name of an input argument is just documentation, it has no effect on any callers. Michael Paquier --- src/include/catalog/pg_proc.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 3a40fa69c0aa7..be3a8fba1bed4 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -3183,7 +3183,7 @@ DESCR("rotate log file"); DATA(insert OID = 2623 ( pg_stat_file PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2249 "25" "{25,20,1184,1184,1184,1184,16}" "{i,o,o,o,o,o,o}" "{filename,size,access,modification,change,creation,isdir}" _null_ _null_ pg_stat_file_1arg _null_ _null_ _null_ )); DESCR("get information about file"); -DATA(insert OID = 3307 ( pg_stat_file PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2249 "25 16" "{25,16,20,1184,1184,1184,1184,16}" "{i,i,o,o,o,o,o,o}" "{filename,if_not_exists,size,access,modification,change,creation,isdir}" _null_ _null_ pg_stat_file _null_ _null_ _null_ )); +DATA(insert OID = 3307 ( pg_stat_file PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 2249 "25 16" "{25,16,20,1184,1184,1184,1184,16}" "{i,i,o,o,o,o,o,o}" "{filename,missing_ok,size,access,modification,change,creation,isdir}" _null_ _null_ pg_stat_file _null_ _null_ _null_ )); DESCR("get information about file"); DATA(insert OID = 2624 ( pg_read_file PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 25 "25 20 20" _null_ _null_ _null_ _null_ _null_ pg_read_file_off_len _null_ _null_ _null_ )); DESCR("read text from a file"); From 02ec4cd179099fc409288bb55c40fea308a51204 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 2 Jul 2015 12:32:48 +0300 Subject: [PATCH 008/442] Use appendStringInfoString/Char et al where appropriate. Patch by David Rowley. Backpatch to 9.5, as some of the calls were new in 9.5, and keeping the code in sync with master makes future backpatching easier. --- contrib/postgres_fdw/postgres_fdw.c | 2 +- src/backend/access/rmgrdesc/gindesc.c | 4 ++-- src/backend/access/rmgrdesc/spgdesc.c | 10 +++++----- src/backend/access/rmgrdesc/xactdesc.c | 2 +- src/backend/access/transam/xlog.c | 4 ++-- src/backend/lib/pairingheap.c | 2 +- src/backend/utils/adt/ruleutils.c | 2 +- src/backend/utils/adt/xml.c | 6 +++--- src/bin/pg_basebackup/pg_basebackup.c | 2 +- src/bin/pg_dump/pg_backup_archiver.c | 2 +- src/bin/pg_dump/pg_dump.c | 6 +++--- src/bin/psql/describe.c | 8 ++++---- src/bin/scripts/clusterdb.c | 2 +- src/bin/scripts/createdb.c | 4 ++-- src/bin/scripts/createuser.c | 2 +- src/bin/scripts/reindexdb.c | 2 +- src/bin/scripts/vacuumdb.c | 4 ++-- 17 files changed, 32 insertions(+), 32 deletions(-) diff --git a/contrib/postgres_fdw/postgres_fdw.c b/contrib/postgres_fdw/postgres_fdw.c index 6da01e1d6f35f..e4d799cecd541 100644 --- a/contrib/postgres_fdw/postgres_fdw.c +++ b/contrib/postgres_fdw/postgres_fdw.c @@ -2738,7 +2738,7 @@ postgresImportForeignSchema(ImportForeignSchemaStmt *stmt, Oid serverOid) } /* Append ORDER BY at the end of query to ensure output ordering */ - appendStringInfo(&buf, " ORDER BY c.relname, a.attnum"); + appendStringInfoString(&buf, " ORDER BY c.relname, a.attnum"); /* Fetch the data */ res = PQexec(conn, buf.data); diff --git a/src/backend/access/rmgrdesc/gindesc.c b/src/backend/access/rmgrdesc/gindesc.c index 324efa3769906..09e928fb7c6da 100644 --- a/src/backend/access/rmgrdesc/gindesc.c +++ b/src/backend/access/rmgrdesc/gindesc.c @@ -113,7 +113,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) (ginxlogRecompressDataLeaf *) payload; if (XLogRecHasBlockImage(record, 0)) - appendStringInfo(buf, " (full page image)"); + appendStringInfoString(buf, " (full page image)"); else desc_recompress_leaf(buf, insertData); } @@ -147,7 +147,7 @@ gin_desc(StringInfo buf, XLogReaderState *record) ginxlogVacuumDataLeafPage *xlrec = (ginxlogVacuumDataLeafPage *) rec; if (XLogRecHasBlockImage(record, 0)) - appendStringInfo(buf, " (full page image)"); + appendStringInfoString(buf, " (full page image)"); else desc_recompress_leaf(buf, &xlrec->data); } diff --git a/src/backend/access/rmgrdesc/spgdesc.c b/src/backend/access/rmgrdesc/spgdesc.c index 6e426d7b8c721..478f50c7a0d20 100644 --- a/src/backend/access/rmgrdesc/spgdesc.c +++ b/src/backend/access/rmgrdesc/spgdesc.c @@ -30,14 +30,14 @@ spg_desc(StringInfo buf, XLogReaderState *record) { spgxlogAddLeaf *xlrec = (spgxlogAddLeaf *) rec; - appendStringInfo(buf, "add leaf to page"); + appendStringInfoString(buf, "add leaf to page"); appendStringInfo(buf, "; off %u; headoff %u; parentoff %u", xlrec->offnumLeaf, xlrec->offnumHeadLeaf, xlrec->offnumParent); if (xlrec->newPage) - appendStringInfo(buf, " (newpage)"); + appendStringInfoString(buf, " (newpage)"); if (xlrec->storesNulls) - appendStringInfo(buf, " (nulls)"); + appendStringInfoString(buf, " (nulls)"); } break; case XLOG_SPGIST_MOVE_LEAFS: @@ -63,9 +63,9 @@ spg_desc(StringInfo buf, XLogReaderState *record) appendStringInfo(buf, "ndel %u; nins %u", xlrec->nDelete, xlrec->nInsert); if (xlrec->innerIsParent) - appendStringInfo(buf, " (innerIsParent)"); + appendStringInfoString(buf, " (innerIsParent)"); if (xlrec->isRootSplit) - appendStringInfo(buf, " (isRootSplit)"); + appendStringInfoString(buf, " (isRootSplit)"); } break; case XLOG_SPGIST_VACUUM_LEAF: diff --git a/src/backend/access/rmgrdesc/xactdesc.c b/src/backend/access/rmgrdesc/xactdesc.c index 7b5f98305070c..e811c0a61ee46 100644 --- a/src/backend/access/rmgrdesc/xactdesc.c +++ b/src/backend/access/rmgrdesc/xactdesc.c @@ -232,7 +232,7 @@ xact_desc_commit(StringInfo buf, uint8 info, xl_xact_commit *xlrec, RepOriginId } if (XactCompletionForceSyncCommit(parsed.xinfo)) - appendStringInfo(buf, "; sync"); + appendStringInfoString(buf, "; sync"); if (parsed.xinfo & XACT_XINFO_HAS_ORIGIN) { diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index bb7cd9f775c9c..1dd31b37ffe06 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -1097,7 +1097,7 @@ XLogInsertRecord(XLogRecData *rdata, XLogRecPtr fpw_lsn) if (!debug_reader) { - appendStringInfo(&buf, "error decoding record: out of memory"); + appendStringInfoString(&buf, "error decoding record: out of memory"); } else if (!DecodeXLogRecord(debug_reader, (XLogRecord *) recordBuf.data, &errormsg)) @@ -9528,7 +9528,7 @@ xlog_outrec(StringInfo buf, XLogReaderState *record) rnode.spcNode, rnode.dbNode, rnode.relNode, blk); if (XLogRecHasBlockImage(record, block_id)) - appendStringInfo(buf, " FPW"); + appendStringInfoString(buf, " FPW"); } } #endif /* WAL_DEBUG */ diff --git a/src/backend/lib/pairingheap.c b/src/backend/lib/pairingheap.c index 3d8a5ea56189c..7ca35452ded66 100644 --- a/src/backend/lib/pairingheap.c +++ b/src/backend/lib/pairingheap.c @@ -306,7 +306,7 @@ pairingheap_dump_recurse(StringInfo buf, appendStringInfoSpaces(buf, depth * 4); dumpfunc(node, buf, opaque); - appendStringInfoString(buf, "\n"); + appendStringInfoChar(buf, '\n'); if (node->first_child) pairingheap_dump_recurse(buf, node->first_child, dumpfunc, opaque, depth + 1, node); prev_or_parent = node; diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 2cd4b62701f6f..5112cac901735 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -5487,7 +5487,7 @@ get_insert_query_def(Query *query, deparse_context *context) { OnConflictExpr *confl = query->onConflict; - appendStringInfo(buf, " ON CONFLICT"); + appendStringInfoString(buf, " ON CONFLICT"); if (confl->arbiterElems) { diff --git a/src/backend/utils/adt/xml.c b/src/backend/utils/adt/xml.c index 99bc832ab826b..31dfc4d2a7a91 100644 --- a/src/backend/utils/adt/xml.c +++ b/src/backend/utils/adt/xml.c @@ -2473,7 +2473,7 @@ query_to_xml_internal(const char *query, char *tablename, { xmldata_root_element_start(result, xmltn, xmlschema, targetns, top_level); - appendStringInfoString(result, "\n"); + appendStringInfoChar(result, '\n'); } if (xmlschema) @@ -2637,7 +2637,7 @@ schema_to_xml_internal(Oid nspid, const char *xmlschema, bool nulls, result = makeStringInfo(); xmldata_root_element_start(result, xmlsn, xmlschema, targetns, top_level); - appendStringInfoString(result, "\n"); + appendStringInfoChar(result, '\n'); if (xmlschema) appendStringInfo(result, "%s\n\n", xmlschema); @@ -2815,7 +2815,7 @@ database_to_xml_internal(const char *xmlschema, bool nulls, result = makeStringInfo(); xmldata_root_element_start(result, xmlcn, xmlschema, targetns, true); - appendStringInfoString(result, "\n"); + appendStringInfoChar(result, '\n'); if (xmlschema) appendStringInfo(result, "%s\n\n", xmlschema); diff --git a/src/bin/pg_basebackup/pg_basebackup.c b/src/bin/pg_basebackup/pg_basebackup.c index 5dd2887d12e9b..536368020b004 100644 --- a/src/bin/pg_basebackup/pg_basebackup.c +++ b/src/bin/pg_basebackup/pg_basebackup.c @@ -1516,7 +1516,7 @@ GenerateRecoveryConf(PGconn *conn) /* Separate key-value pairs with spaces */ if (conninfo_buf.len != 0) - appendPQExpBufferStr(&conninfo_buf, " "); + appendPQExpBufferChar(&conninfo_buf, ' '); /* * Write "keyword=value" pieces, the value string is escaped and/or diff --git a/src/bin/pg_dump/pg_backup_archiver.c b/src/bin/pg_dump/pg_backup_archiver.c index f9b564eee41a0..0d52babc4f179 100644 --- a/src/bin/pg_dump/pg_backup_archiver.c +++ b/src/bin/pg_dump/pg_backup_archiver.c @@ -533,7 +533,7 @@ RestoreArchive(Archive *AHX) * search for hardcoded "DROP CONSTRAINT" instead. */ if (strcmp(te->desc, "DEFAULT") == 0) - appendPQExpBuffer(ftStmt, "%s", dropStmt); + appendPQExpBufferStr(ftStmt, dropStmt); else { if (strcmp(te->desc, "CONSTRAINT") == 0 || diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index a72dfe93da939..0a8129020bdda 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -1659,7 +1659,7 @@ dumpTableData_insert(Archive *fout, DumpOptions *dopt, void *dcontext) /* append the list of column names if required */ if (dopt->column_inserts) { - appendPQExpBufferStr(insertStmt, "("); + appendPQExpBufferChar(insertStmt, '('); for (field = 0; field < nfields; field++) { if (field > 0) @@ -11332,7 +11332,7 @@ dumpOpclass(Archive *fout, DumpOptions *dopt, OpclassInfo *opcinfo) appendPQExpBufferStr(q, " FAMILY "); if (strcmp(opcfamilynsp, opcinfo->dobj.namespace->dobj.name) != 0) appendPQExpBuffer(q, "%s.", fmtId(opcfamilynsp)); - appendPQExpBuffer(q, "%s", fmtId(opcfamilyname)); + appendPQExpBufferStr(q, fmtId(opcfamilyname)); } appendPQExpBufferStr(q, " AS\n "); @@ -13844,7 +13844,7 @@ dumpTableSchema(Archive *fout, DumpOptions *dopt, TableInfo *tbinfo) if (actual_atts == 0) appendPQExpBufferStr(q, " ("); else - appendPQExpBufferStr(q, ","); + appendPQExpBufferChar(q, ','); appendPQExpBufferStr(q, "\n "); actual_atts++; diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index db568096dcef9..f63c7e90d3c0a 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -1611,7 +1611,7 @@ describeOneTableDetails(const char *schemaname, if (!PQgetisnull(res, i, 5)) { if (tmpbuf.len > 0) - appendPQExpBufferStr(&tmpbuf, " "); + appendPQExpBufferChar(&tmpbuf, ' '); appendPQExpBuffer(&tmpbuf, _("collate %s"), PQgetvalue(res, i, 5)); } @@ -1619,7 +1619,7 @@ describeOneTableDetails(const char *schemaname, if (strcmp(PQgetvalue(res, i, 3), "t") == 0) { if (tmpbuf.len > 0) - appendPQExpBufferStr(&tmpbuf, " "); + appendPQExpBufferChar(&tmpbuf, ' '); appendPQExpBufferStr(&tmpbuf, _("not null")); } @@ -1628,7 +1628,7 @@ describeOneTableDetails(const char *schemaname, if (strlen(PQgetvalue(res, i, 2)) != 0) { if (tmpbuf.len > 0) - appendPQExpBufferStr(&tmpbuf, " "); + appendPQExpBufferChar(&tmpbuf, ' '); /* translator: default values of column definitions */ appendPQExpBuffer(&tmpbuf, _("default %s"), PQgetvalue(res, i, 2)); @@ -2440,7 +2440,7 @@ describeOneTableDetails(const char *schemaname, printfPQExpBuffer(&buf, "%*s %s", sw, "", PQgetvalue(result, i, 0)); if (i < tuples - 1) - appendPQExpBufferStr(&buf, ","); + appendPQExpBufferChar(&buf, ','); printTableAddFooter(&cont, buf.data); } diff --git a/src/bin/scripts/clusterdb.c b/src/bin/scripts/clusterdb.c index 85087af795671..8c0e7cfab28dd 100644 --- a/src/bin/scripts/clusterdb.c +++ b/src/bin/scripts/clusterdb.c @@ -201,7 +201,7 @@ cluster_one_database(const char *dbname, bool verbose, const char *table, appendPQExpBufferStr(&sql, " VERBOSE"); if (table) appendPQExpBuffer(&sql, " %s", table); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); diff --git a/src/bin/scripts/createdb.c b/src/bin/scripts/createdb.c index a958bb86f014c..4d3fb22622aa6 100644 --- a/src/bin/scripts/createdb.c +++ b/src/bin/scripts/createdb.c @@ -195,7 +195,7 @@ main(int argc, char *argv[]) if (lc_ctype) appendPQExpBuffer(&sql, " LC_CTYPE '%s'", lc_ctype); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); /* No point in trying to use postgres db when creating postgres db. */ if (maintenance_db == NULL && strcmp(dbname, "postgres") == 0) @@ -222,7 +222,7 @@ main(int argc, char *argv[]) { printfPQExpBuffer(&sql, "COMMENT ON DATABASE %s IS ", fmtId(dbname)); appendStringLiteralConn(&sql, comment, conn); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/createuser.c b/src/bin/scripts/createuser.c index fba21a1c65997..c8bcf0d0b2e65 100644 --- a/src/bin/scripts/createuser.c +++ b/src/bin/scripts/createuser.c @@ -321,7 +321,7 @@ main(int argc, char *argv[]) appendPQExpBuffer(&sql, "%s", fmtId(cell->val)); } } - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); if (echo) printf("%s\n", sql.data); diff --git a/src/bin/scripts/reindexdb.c b/src/bin/scripts/reindexdb.c index 941729da2e787..80c78860bee5c 100644 --- a/src/bin/scripts/reindexdb.c +++ b/src/bin/scripts/reindexdb.c @@ -295,7 +295,7 @@ reindex_one_database(const char *name, const char *dbname, const char *type, appendPQExpBuffer(&sql, " SCHEMA %s", name); else if (strcmp(type, "DATABASE") == 0) appendPQExpBuffer(&sql, " DATABASE %s", fmtId(name)); - appendPQExpBufferStr(&sql, ";"); + appendPQExpBufferChar(&sql, ';'); conn = connectDatabase(dbname, host, port, username, prompt_password, progname, false); diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index f600b0514a8d8..ca6d00368325f 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -392,7 +392,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, ntups = PQntuples(res); for (i = 0; i < ntups; i++) { - appendPQExpBuffer(&buf, "%s", + appendPQExpBufferStr(&buf, fmtQualifiedId(PQserverVersion(conn), PQgetvalue(res, i, 1), PQgetvalue(res, i, 0))); @@ -643,7 +643,7 @@ prepare_vacuum_command(PQExpBuffer sql, PGconn *conn, vacuumingOptions *vacopts, sep = comma; } if (sep != paren) - appendPQExpBufferStr(sql, ")"); + appendPQExpBufferChar(sql, ')'); } else { From bcac470d5b8762629132428ddf8fc8f1baa701f3 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 2 Jul 2015 12:50:29 +0300 Subject: [PATCH 009/442] Don't emit a spurious space at end of line in pg_dump of event triggers. Backpatch to 9.3 and above, where event triggers were added. --- src/bin/pg_dump/pg_dump.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 0a8129020bdda..32ac26f1db476 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -15277,13 +15277,12 @@ dumpEventTrigger(Archive *fout, DumpOptions *dopt, EventTriggerInfo *evtinfo) appendPQExpBufferStr(query, fmtId(evtinfo->dobj.name)); appendPQExpBufferStr(query, " ON "); appendPQExpBufferStr(query, fmtId(evtinfo->evtevent)); - appendPQExpBufferStr(query, " "); if (strcmp("", evtinfo->evttags) != 0) { appendPQExpBufferStr(query, "\n WHEN TAG IN ("); appendPQExpBufferStr(query, evtinfo->evttags); - appendPQExpBufferStr(query, ") "); + appendPQExpBufferChar(query, ')'); } appendPQExpBufferStr(query, "\n EXECUTE PROCEDURE "); From cf2b5f9b33fda1cbeb8efdfd3989b5e88af74167 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Thu, 2 Jul 2015 09:46:34 -0700 Subject: [PATCH 010/442] Whitespace fix - replace tab with spaces in CREATE TABLE command. --- src/test/regress/expected/rowsecurity.out | 2 +- src/test/regress/sql/rowsecurity.sql | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 0ae555783bdb2..7a293f30b537c 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -2122,7 +2122,7 @@ EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc'); (2 rows) CREATE TABLE test_qual_pushdown ( - abc text + abc text ); INSERT INTO test_qual_pushdown VALUES ('abc'),('def'); SELECT * FROM y2 JOIN test_qual_pushdown ON (b = abc) WHERE f_leak(abc); diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index fdadf99fd6216..fdd9b892ce6a8 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -802,7 +802,7 @@ SELECT * FROM y2 WHERE f_leak('abc'); EXPLAIN (COSTS OFF) SELECT * FROM y2 WHERE f_leak('abc'); CREATE TABLE test_qual_pushdown ( - abc text + abc text ); INSERT INTO test_qual_pushdown VALUES ('abc'),('def'); From 69e9f9639d5c569a71c82f99550e7bf2912664f1 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 2 Jul 2015 17:02:08 -0400 Subject: [PATCH 011/442] Fix misuse of TextDatumGetCString(). "TextDatumGetCString(PG_GETARG_TEXT_P(x))" is formally wrong: a text* is not a Datum. Although this coding will accidentally fail to fail on all known platforms, it risks leaking memory if a detoast step is needed, unlike "TextDatumGetCString(PG_GETARG_DATUM(x))" which is what's used elsewhere. Make pg_get_object_address() fall in line with other uses. Noted while reviewing two-arg current_setting() patch. --- src/backend/catalog/objectaddress.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/catalog/objectaddress.c b/src/backend/catalog/objectaddress.c index 83390f6bab007..052aab1003de2 100644 --- a/src/backend/catalog/objectaddress.c +++ b/src/backend/catalog/objectaddress.c @@ -1831,7 +1831,7 @@ textarray_to_strvaluelist(ArrayType *arr) Datum pg_get_object_address(PG_FUNCTION_ARGS) { - char *ttype = TextDatumGetCString(PG_GETARG_TEXT_P(0)); + char *ttype = TextDatumGetCString(PG_GETARG_DATUM(0)); ArrayType *namearr = PG_GETARG_ARRAYTYPE_P(1); ArrayType *argsarr = PG_GETARG_ARRAYTYPE_P(2); int itype; From eeaf1b6afacba0fc0a0e1878c2ed23f4fceef039 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Fri, 3 Jul 2015 11:53:58 +0900 Subject: [PATCH 012/442] Make WAL-related utilities handle .partial WAL files properly. Commit de76884 changed an archive recovery so that the last WAL segment with old timeline was renamed with suffix .partial. It should have updated WAL-related utilities so that they can handle such .paritial WAL files, but we forgot that. This patch changes pg_archivecleanup so that it can clean up even archived WAL files with .partial suffix. Also it allows us to specify .partial WAL file name as the command-line argument "oldestkeptwalfile". This patch also changes pg_resetxlog so that it can remove .partial WAL files in pg_xlog directory. pg_xlogdump cannot handle .partial WAL files. Per discussion, we decided only to document that limitation instead of adding the fix. Because a user can easily work around the limitation (i.e., just remove .partial suffix from the file name) and the fix seems complicated for very narrow use case. Back-patch to 9.5 where the problem existed. Review by Michael Paquier. Discussion: http://www.postgresql.org/message-id/CAHGQGwGxMKnVHGgTfiig2Bt_2djec0in3-DLJmtg7+nEiidFdQ@mail.gmail.com --- doc/src/sgml/ref/pg_xlogdump.sgml | 6 ++++ doc/src/sgml/ref/pgarchivecleanup.sgml | 6 ++-- src/bin/pg_archivecleanup/pg_archivecleanup.c | 31 ++++++++++++++++--- src/bin/pg_resetxlog/pg_resetxlog.c | 10 ++++-- 4 files changed, 43 insertions(+), 10 deletions(-) diff --git a/doc/src/sgml/ref/pg_xlogdump.sgml b/doc/src/sgml/ref/pg_xlogdump.sgml index d9f4a6a499ccf..1d78cf1a758a1 100644 --- a/doc/src/sgml/ref/pg_xlogdump.sgml +++ b/doc/src/sgml/ref/pg_xlogdump.sgml @@ -215,6 +215,12 @@ PostgreSQL documentation Only the specified timeline is displayed (or the default, if none is specified). Records in other timelines are ignored. + + + pg_xlogdump cannot read WAL files with suffix + .partial. If those files need to be read, .partial + suffix needs to be removed from the filename. + diff --git a/doc/src/sgml/ref/pgarchivecleanup.sgml b/doc/src/sgml/ref/pgarchivecleanup.sgml index 779159d7fc2c5..db39deaca16fe 100644 --- a/doc/src/sgml/ref/pgarchivecleanup.sgml +++ b/doc/src/sgml/ref/pgarchivecleanup.sgml @@ -60,8 +60,10 @@ archive_cleanup_command = 'pg_archivecleanup archivelocation %r' When used as a standalone program all WAL files logically preceding the oldestkeptwalfile will be removed from archivelocation. - In this mode, if you specify a .backup file name, then only the file prefix - will be used as the oldestkeptwalfile. This allows you to remove + In this mode, if you specify a .partial or .backup + file name, then only the file prefix will be used as the + oldestkeptwalfile. This treatment of .backup + file name allows you to remove all WAL files archived prior to a specific base backup without error. For example, the following example will remove all files older than WAL file name 000000010000003700000010: diff --git a/src/bin/pg_archivecleanup/pg_archivecleanup.c b/src/bin/pg_archivecleanup/pg_archivecleanup.c index 579a9bb84307e..c5569f32a336b 100644 --- a/src/bin/pg_archivecleanup/pg_archivecleanup.c +++ b/src/bin/pg_archivecleanup/pg_archivecleanup.c @@ -125,7 +125,7 @@ CleanupPriorWALFiles(void) * file. Note that this means files are not removed in the order * they were originally written, in case this worries you. */ - if (IsXLogFileName(walfile) && + if ((IsXLogFileName(walfile) || IsPartialXLogFileName(walfile)) && strcmp(walfile + 8, exclusiveCleanupFileName + 8) < 0) { /* @@ -181,7 +181,7 @@ CleanupPriorWALFiles(void) * SetWALFileNameForCleanup() * * Set the earliest WAL filename that we want to keep on the archive - * and decide whether we need_cleanup + * and decide whether we need cleanup */ static void SetWALFileNameForCleanup(void) @@ -192,9 +192,10 @@ SetWALFileNameForCleanup(void) /* * If restartWALFileName is a WAL file name then just use it directly. If - * restartWALFileName is a .backup filename, make sure we use the prefix - * of the filename, otherwise we will remove wrong files since - * 000000010000000000000010.00000020.backup is after + * restartWALFileName is a .partial or .backup filename, make sure we use + * the prefix of the filename, otherwise we will remove wrong files since + * 000000010000000000000010.partial and + * 000000010000000000000010.00000020.backup are after * 000000010000000000000010. */ if (IsXLogFileName(restartWALFileName)) @@ -202,6 +203,26 @@ SetWALFileNameForCleanup(void) strcpy(exclusiveCleanupFileName, restartWALFileName); fnameOK = true; } + else if (IsPartialXLogFileName(restartWALFileName)) + { + int args; + uint32 tli = 1, + log = 0, + seg = 0; + + args = sscanf(restartWALFileName, "%08X%08X%08X.partial", + &tli, &log, &seg); + if (args == 3) + { + fnameOK = true; + + /* + * Use just the prefix of the filename, ignore everything after + * first period + */ + XLogFileNameById(exclusiveCleanupFileName, tli, log, seg); + } + } else if (IsBackupHistoryFileName(restartWALFileName)) { int args; diff --git a/src/bin/pg_resetxlog/pg_resetxlog.c b/src/bin/pg_resetxlog/pg_resetxlog.c index e19a72b4c1640..e7e8059a38df1 100644 --- a/src/bin/pg_resetxlog/pg_resetxlog.c +++ b/src/bin/pg_resetxlog/pg_resetxlog.c @@ -906,7 +906,8 @@ FindEndOfXLOG(void) while (errno = 0, (xlde = readdir(xldir)) != NULL) { - if (IsXLogFileName(xlde->d_name)) + if (IsXLogFileName(xlde->d_name) || + IsPartialXLogFileName(xlde->d_name)) { unsigned int tli, log, @@ -976,7 +977,8 @@ KillExistingXLOG(void) while (errno = 0, (xlde = readdir(xldir)) != NULL) { - if (IsXLogFileName(xlde->d_name)) + if (IsXLogFileName(xlde->d_name) || + IsPartialXLogFileName(xlde->d_name)) { snprintf(path, MAXPGPATH, "%s/%s", XLOGDIR, xlde->d_name); if (unlink(path) < 0) @@ -1028,7 +1030,9 @@ KillExistingArchiveStatus(void) { if (strspn(xlde->d_name, "0123456789ABCDEF") == XLOG_FNAME_LEN && (strcmp(xlde->d_name + XLOG_FNAME_LEN, ".ready") == 0 || - strcmp(xlde->d_name + XLOG_FNAME_LEN, ".done") == 0)) + strcmp(xlde->d_name + XLOG_FNAME_LEN, ".done") == 0 || + strcmp(xlde->d_name + XLOG_FNAME_LEN, ".partial.ready") == 0 || + strcmp(xlde->d_name + XLOG_FNAME_LEN, ".partial.done") == 0)) { snprintf(path, MAXPGPATH, "%s/%s", ARCHSTATDIR, xlde->d_name); if (unlink(path) < 0) From 5174ca17a2479e9f9844d72cc6e777f473f83566 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 3 Jul 2015 11:04:57 +0300 Subject: [PATCH 013/442] Fix pgbench progress report behaviour when pgbench or a query gets stuck. There were two issues here. First, if a query got stuck so that it took e.g. 5 seconds, and progress interval was 1 second, no progress reports were printed until the query returned. Fix so that we wake up specifically to print the progress report. Secondly, if pgbench got stuck so that it would nevertheless not print a progress report on time, and enough time passes that it's already time to print the next progress report, just skip the one that was missed. Before this patch, it would print the missed one with 0 TPS immediately after the previous one. Fabien Coelho. Backpatch to 9.4, where progress reports were added. --- src/bin/pgbench/pgbench.c | 47 +++++++++++++++++++++++++++++++++++++-- 1 file changed, 45 insertions(+), 2 deletions(-) diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index 2c3e3650c8a30..a6673e42a73b0 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -3638,6 +3638,33 @@ threadRun(void *arg) maxsock = sock; } + /* also wake up to print the next progress report on time */ + if (progress && min_usec > 0 +#if !defined(PTHREAD_FORK_EMULATION) + && thread->tid == 0 +#endif /* !PTHREAD_FORK_EMULATION */ + ) + { + /* get current time if needed */ + if (now_usec == 0) + { + instr_time now; + + INSTR_TIME_SET_CURRENT(now); + now_usec = INSTR_TIME_GET_MICROSEC(now); + } + + if (now_usec >= next_report) + min_usec = 0; + else if ((next_report - now_usec) < min_usec) + min_usec = next_report - now_usec; + } + + /* + * Sleep until we receive data from the server, or a nap-time + * specified in the script ends, or it's time to print a progress + * report. + */ if (min_usec > 0 && maxsock != -1) { int nsocks; /* return from select(2) */ @@ -3743,7 +3770,15 @@ threadRun(void *arg) last_lags = lags; last_report = now; last_skipped = thread->throttle_latency_skipped; - next_report += (int64) progress *1000000; + + /* + * Ensure that the next report is in the future, in case + * pgbench/postgres got stuck somewhere. + */ + do + { + next_report += (int64) progress *1000000; + } while (now >= next_report); } } #else @@ -3807,7 +3842,15 @@ threadRun(void *arg) last_lags = lags; last_report = now; last_skipped = thread->throttle_latency_skipped; - next_report += (int64) progress *1000000; + + /* + * Ensure that the next report is in the future, in case + * pgbench/postgres got stuck somewhere. + */ + do + { + next_report += (int64) progress *1000000; + } while (now >= next_report); } } #endif /* PTHREAD_FORK_EMULATION */ From d1fec374f716ffbfb9f9a758c9b5b23c00f01fcb Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 5 Jul 2015 12:01:01 -0400 Subject: [PATCH 014/442] Make numeric form of PG version number readily available in Makefiles. Expose PG_VERSION_NUM (e.g., "90600") as a Make variable; but for consistency with the other Make variables holding similar info, call the variable just VERSION_NUM not PG_VERSION_NUM. There was some discussion of making this value available as a pg_config value as well. However, that would entail substantially more work than this two-line patch. Given that there was not exactly universal consensus that we need this at all, let's just do a minimal amount of work for now. Back-patch of commit a5d489ccb7e613c7ca3be6141092b8c1d2c13fa7, so that this variable is actually useful for its intended purpose sometime before 2020. Michael Paquier, reviewed by Pavel Stehule --- configure | 2 ++ configure.in | 1 + src/Makefile.global.in | 1 + 3 files changed, 4 insertions(+) diff --git a/configure b/configure index d8f21563f6de5..38cec0fe70c11 100755 --- a/configure +++ b/configure @@ -627,6 +627,7 @@ ac_includes_default="\ ac_subst_vars='LTLIBOBJS vpath_build +PG_VERSION_NUM PROVE OSX XSLTPROC @@ -15549,6 +15550,7 @@ _ACEOF + # Begin output steps { $as_echo "$as_me:${as_lineno-$LINENO}: using compiler=$cc_string" >&5 diff --git a/configure.in b/configure.in index 21ba3a52f9f3a..143e667ce27eb 100644 --- a/configure.in +++ b/configure.in @@ -2128,6 +2128,7 @@ AC_DEFINE_UNQUOTED(PG_VERSION_STR, tr '.' ' ' | $AWK '{printf "%d%02d%02d", $1, $2, (NF >= 3) ? $3 : 0}'`"] AC_DEFINE_UNQUOTED(PG_VERSION_NUM, $PG_VERSION_NUM, [PostgreSQL version as a number]) +AC_SUBST(PG_VERSION_NUM) # Begin output steps diff --git a/src/Makefile.global.in b/src/Makefile.global.in index c583b44dbf356..8eab178ebd382 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -38,6 +38,7 @@ all: # PostgreSQL version number VERSION = @PACKAGE_VERSION@ MAJORVERSION = @PG_MAJORVERSION@ +VERSION_NUM = @PG_VERSION_NUM@ # Support for VPATH builds # (PGXS VPATH support is handled separately in pgxs.mk) From 9a92ad4b9eec0051296d6475feb9c9955c860a9d Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 5 Jul 2015 12:08:15 -0400 Subject: [PATCH 015/442] Fix bad grammar in brin.sgml. Christoph Berg --- doc/src/sgml/brin.sgml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml index e25f09c6800cf..cdfe5dec565b8 100644 --- a/doc/src/sgml/brin.sgml +++ b/doc/src/sgml/brin.sgml @@ -531,8 +531,8 @@ typedef struct BrinOpcInfo To implement these methods in a generic way, the operator class defines its own internal support functions. - (For instance, min/max operator classes implements - support functions for the four inequality operators for the data type.) + (For instance, the min/max operator classes implement + support functions for the four inequality operators for their data type.) Additionally, the operator class must supply appropriate operator entries, to enable the optimizer to use the index when those operators are From 486d3a2bb4b9e4c1cc64241f4b36643a22da8693 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 5 Jul 2015 13:14:38 -0400 Subject: [PATCH 016/442] Fix some typos in regression test comments. Back-patch to avoid unnecessary cross-branch differences. CharSyam --- src/test/regress/expected/alter_generic.out | 2 +- src/test/regress/expected/jsonb.out | 4 ++-- src/test/regress/expected/jsonb_1.out | 4 ++-- src/test/regress/sql/alter_generic.sql | 2 +- src/test/regress/sql/jsonb.sql | 4 ++-- 5 files changed, 8 insertions(+), 8 deletions(-) diff --git a/src/test/regress/expected/alter_generic.out b/src/test/regress/expected/alter_generic.out index 7845b8af591f4..4c3c8826b755c 100644 --- a/src/test/regress/expected/alter_generic.out +++ b/src/test/regress/expected/alter_generic.out @@ -450,7 +450,7 @@ ERROR: associated data types must be specified for index support procedure DROP OPERATOR FAMILY alt_opf16 USING gist; -- Should fail. duplicate operator number / function number in ALTER OPERATOR FAMILY ... ADD FUNCTION CREATE OPERATOR FAMILY alt_opf17 USING btree; -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statment +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statement ERROR: operator number 1 for (integer,integer) appears more than once ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested first-time ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested again in separate statement diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out index 171520275d7aa..4416d52611f08 100644 --- a/src/test/regress/expected/jsonb.out +++ b/src/test/regress/expected/jsonb.out @@ -2162,7 +2162,7 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}'; 3 (1 row) --- excercise GIN_SEARCH_MODE_ALL +-- exercise GIN_SEARCH_MODE_ALL SELECT count(*) FROM testjsonb WHERE j @> '{}'; count ------- @@ -2336,7 +2336,7 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; 2 (1 row) --- excercise GIN_SEARCH_MODE_ALL +-- exercise GIN_SEARCH_MODE_ALL SELECT count(*) FROM testjsonb WHERE j @> '{}'; count ------- diff --git a/src/test/regress/expected/jsonb_1.out b/src/test/regress/expected/jsonb_1.out index 864d85c6050e2..6d67655cf6aad 100644 --- a/src/test/regress/expected/jsonb_1.out +++ b/src/test/regress/expected/jsonb_1.out @@ -2162,7 +2162,7 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}'; 3 (1 row) --- excercise GIN_SEARCH_MODE_ALL +-- exercise GIN_SEARCH_MODE_ALL SELECT count(*) FROM testjsonb WHERE j @> '{}'; count ------- @@ -2336,7 +2336,7 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; 2 (1 row) --- excercise GIN_SEARCH_MODE_ALL +-- exercise GIN_SEARCH_MODE_ALL SELECT count(*) FROM testjsonb WHERE j @> '{}'; count ------- diff --git a/src/test/regress/sql/alter_generic.sql b/src/test/regress/sql/alter_generic.sql index f46cbc828a679..ed4398b30a0ae 100644 --- a/src/test/regress/sql/alter_generic.sql +++ b/src/test/regress/sql/alter_generic.sql @@ -390,7 +390,7 @@ DROP OPERATOR FAMILY alt_opf16 USING gist; -- Should fail. duplicate operator number / function number in ALTER OPERATOR FAMILY ... ADD FUNCTION CREATE OPERATOR FAMILY alt_opf17 USING btree; -ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statment +ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4), OPERATOR 1 < (int4, int4); -- operator # appears twice in same statement ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested first-time ALTER OPERATOR FAMILY alt_opf17 USING btree ADD OPERATOR 1 < (int4, int4); -- operator 1 requested again in separate statement ALTER OPERATOR FAMILY alt_opf17 USING btree ADD diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql index a25a19d7f0163..febdeeb7978a0 100644 --- a/src/test/regress/sql/jsonb.sql +++ b/src/test/regress/sql/jsonb.sql @@ -529,7 +529,7 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; SELECT count(*) FROM testjsonb WHERE j @> '{"array":["foo"]}'; SELECT count(*) FROM testjsonb WHERE j @> '{"array":["bar"]}'; --- excercise GIN_SEARCH_MODE_ALL +-- exercise GIN_SEARCH_MODE_ALL SELECT count(*) FROM testjsonb WHERE j @> '{}'; SELECT count(*) FROM testjsonb WHERE j ? 'public'; SELECT count(*) FROM testjsonb WHERE j ? 'bar'; @@ -582,7 +582,7 @@ SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC"}'; SELECT count(*) FROM testjsonb WHERE j @> '{"wait":"CC", "public":true}'; SELECT count(*) FROM testjsonb WHERE j @> '{"age":25}'; SELECT count(*) FROM testjsonb WHERE j @> '{"age":25.0}'; --- excercise GIN_SEARCH_MODE_ALL +-- exercise GIN_SEARCH_MODE_ALL SELECT count(*) FROM testjsonb WHERE j @> '{}'; RESET enable_seqscan; From c7673d2b1fd54caa82c9870927d0bef6518bb461 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 5 Jul 2015 19:36:57 -0400 Subject: [PATCH 017/442] Make a editorial pass over pgbench's error messages. The lack of consistency, and lack of attention to our message style guidelines, was a bit striking. Try to make 'em better. --- src/bin/pgbench/pgbench.c | 165 +++++++++++++++++++++----------------- 1 file changed, 90 insertions(+), 75 deletions(-) diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index a6673e42a73b0..ceaf14cde16e2 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -667,7 +667,7 @@ doConnect(void) if (!conn) { - fprintf(stderr, "Connection to database \"%s\" failed\n", + fprintf(stderr, "connection to database \"%s\" failed\n", dbName); return NULL; } @@ -685,7 +685,7 @@ doConnect(void) /* check to see that the backend connection was successfully made */ if (PQstatus(conn) == CONNECTION_BAD) { - fprintf(stderr, "Connection to database \"%s\" failed:\n%s", + fprintf(stderr, "connection to database \"%s\" failed:\n%s", dbName, PQerrorMessage(conn)); PQfinish(conn); return NULL; @@ -779,7 +779,8 @@ putVariable(CState *st, const char *context, char *name, char *value) */ if (!isLegalVariableName(name)) { - fprintf(stderr, "%s: invalid variable name '%s'\n", context, name); + fprintf(stderr, "%s: invalid variable name: \"%s\"\n", + context, name); return false; } @@ -924,7 +925,7 @@ evaluateExpr(CState *st, PgBenchExpr *expr, int64 *retval) if ((var = getVariable(st, expr->u.variable.varname)) == NULL) { - fprintf(stderr, "undefined variable %s\n", + fprintf(stderr, "undefined variable \"%s\"\n", expr->u.variable.varname); return false; } @@ -1024,14 +1025,15 @@ runShellCommand(CState *st, char *variable, char **argv, int argc) } else if ((arg = getVariable(st, argv[i] + 1)) == NULL) { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[i]); + fprintf(stderr, "%s: undefined variable \"%s\"\n", + argv[0], argv[i]); return false; } arglen = strlen(arg); if (len + arglen + (i > 0 ? 1 : 0) >= SHELL_COMMAND_SIZE - 1) { - fprintf(stderr, "%s: too long shell command\n", argv[0]); + fprintf(stderr, "%s: shell command is too long\n", argv[0]); return false; } @@ -1049,7 +1051,7 @@ runShellCommand(CState *st, char *variable, char **argv, int argc) if (system(command)) { if (!timer_exceeded) - fprintf(stderr, "%s: cannot launch shell command\n", argv[0]); + fprintf(stderr, "%s: could not launch shell command\n", argv[0]); return false; } return true; @@ -1058,19 +1060,19 @@ runShellCommand(CState *st, char *variable, char **argv, int argc) /* Execute the command with pipe and read the standard output. */ if ((fp = popen(command, "r")) == NULL) { - fprintf(stderr, "%s: cannot launch shell command\n", argv[0]); + fprintf(stderr, "%s: could not launch shell command\n", argv[0]); return false; } if (fgets(res, sizeof(res), fp) == NULL) { if (!timer_exceeded) - fprintf(stderr, "%s: cannot read the result\n", argv[0]); + fprintf(stderr, "%s: could not read result of shell command\n", argv[0]); (void) pclose(fp); return false; } if (pclose(fp) < 0) { - fprintf(stderr, "%s: cannot close shell command\n", argv[0]); + fprintf(stderr, "%s: could not close shell command\n", argv[0]); return false; } @@ -1080,7 +1082,8 @@ runShellCommand(CState *st, char *variable, char **argv, int argc) endptr++; if (*res == '\0' || *endptr != '\0') { - fprintf(stderr, "%s: must return an integer ('%s' returned)\n", argv[0], res); + fprintf(stderr, "%s: shell command must return an integer (not \"%s\")\n", + argv[0], res); return false; } snprintf(res, sizeof(res), "%d", retval); @@ -1088,7 +1091,7 @@ runShellCommand(CState *st, char *variable, char **argv, int argc) return false; #ifdef DEBUG - printf("shell parameter name: %s, value: %s\n", argv[1], res); + printf("shell parameter name: \"%s\", value: \"%s\"\n", argv[1], res); #endif return true; } @@ -1244,7 +1247,7 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa fprintf(stderr, "client %d receiving\n", st->id); if (!PQconsumeInput(st->con)) { /* there's something wrong */ - fprintf(stderr, "Client %d aborted in state %d. Probably the backend died while processing.\n", st->id, st->state); + fprintf(stderr, "client %d aborted in state %d; perhaps the backend died while processing\n", st->id, st->state); return clientDone(st, false); } if (PQisBusy(st->con)) @@ -1313,7 +1316,7 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa case PGRES_TUPLES_OK: break; /* OK */ default: - fprintf(stderr, "Client %d aborted in state %d: %s", + fprintf(stderr, "client %d aborted in state %d: %s", st->id, st->state, PQerrorMessage(st->con)); PQclear(res); return clientDone(st, false); @@ -1364,7 +1367,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa INSTR_TIME_SET_CURRENT(start); if ((st->con = doConnect()) == NULL) { - fprintf(stderr, "Client %d aborted in establishing connection.\n", st->id); + fprintf(stderr, "client %d aborted while establishing connection\n", + st->id); return clientDone(st, false); } INSTR_TIME_SET_CURRENT(end); @@ -1468,7 +1472,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa if (r == 0) { if (debug) - fprintf(stderr, "client %d cannot send %s\n", st->id, command->argv[0]); + fprintf(stderr, "client %d could not send %s\n", + st->id, command->argv[0]); st->ecnt++; } else @@ -1500,7 +1505,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa { if ((var = getVariable(st, argv[2] + 1)) == NULL) { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[2]); + fprintf(stderr, "%s: undefined variable \"%s\"\n", + argv[0], argv[2]); st->ecnt++; return true; } @@ -1509,20 +1515,12 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa else min = strtoint64(argv[2]); -#ifdef NOT_USED - if (min < 0) - { - fprintf(stderr, "%s: invalid minimum number %d\n", argv[0], min); - st->ecnt++; - return; - } -#endif - if (*argv[3] == ':') { if ((var = getVariable(st, argv[3] + 1)) == NULL) { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[3]); + fprintf(stderr, "%s: undefined variable \"%s\"\n", + argv[0], argv[3]); st->ecnt++; return true; } @@ -1533,7 +1531,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa if (max < min) { - fprintf(stderr, "%s: maximum is less than minimum\n", argv[0]); + fprintf(stderr, "%s: \\setrandom maximum is less than minimum\n", + argv[0]); st->ecnt++; return true; } @@ -1548,7 +1547,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa */ if (max - min < 0 || (max - min) + 1 < 0) { - fprintf(stderr, "%s: range too large\n", argv[0]); + fprintf(stderr, "%s: \\setrandom range is too large\n", + argv[0]); st->ecnt++; return true; } @@ -1569,7 +1569,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa { if ((var = getVariable(st, argv[5] + 1)) == NULL) { - fprintf(stderr, "%s: invalid threshold number %s\n", argv[0], argv[5]); + fprintf(stderr, "%s: invalid threshold number: \"%s\"\n", + argv[0], argv[5]); st->ecnt++; return true; } @@ -1582,7 +1583,7 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa { if (threshold < MIN_GAUSSIAN_THRESHOLD) { - fprintf(stderr, "%s: gaussian threshold must be at least %f\n,", argv[5], MIN_GAUSSIAN_THRESHOLD); + fprintf(stderr, "gaussian threshold must be at least %f (not \"%s\")\n", MIN_GAUSSIAN_THRESHOLD, argv[5]); st->ecnt++; return true; } @@ -1595,7 +1596,7 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa { if (threshold <= 0.0) { - fprintf(stderr, "%s: exponential threshold must be strictly positive\n,", argv[5]); + fprintf(stderr, "exponential threshold must be greater than zero (not \"%s\")\n", argv[5]); st->ecnt++; return true; } @@ -1607,7 +1608,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa } else /* this means an error somewhere in the parsing phase... */ { - fprintf(stderr, "%s: unexpected arguments\n", argv[0]); + fprintf(stderr, "%s: invalid arguments for \\setrandom\n", + argv[0]); st->ecnt++; return true; } @@ -1651,7 +1653,8 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa { if ((var = getVariable(st, argv[1] + 1)) == NULL) { - fprintf(stderr, "%s: undefined variable %s\n", argv[0], argv[1]); + fprintf(stderr, "%s: undefined variable \"%s\"\n", + argv[0], argv[1]); st->ecnt++; return true; } @@ -2505,7 +2508,7 @@ process_file(char *filename) if (num_files >= MAX_FILES) { - fprintf(stderr, "Up to only %d SQL files are allowed\n", MAX_FILES); + fprintf(stderr, "at most %d SQL files are allowed\n", MAX_FILES); exit(1); } @@ -2516,7 +2519,8 @@ process_file(char *filename) fd = stdin; else if ((fd = fopen(filename, "r")) == NULL) { - fprintf(stderr, "%s: %s\n", filename, strerror(errno)); + fprintf(stderr, "could not open file \"%s\": %s\n", + filename, strerror(errno)); pg_free(my_commands); return false; } @@ -2896,7 +2900,8 @@ main(int argc, char **argv) nclients = atoi(optarg); if (nclients <= 0 || nclients > MAXCLIENTS) { - fprintf(stderr, "invalid number of clients: %d\n", nclients); + fprintf(stderr, "invalid number of clients: \"%s\"\n", + optarg); exit(1); } #ifdef HAVE_GETRLIMIT @@ -2909,10 +2914,11 @@ main(int argc, char **argv) fprintf(stderr, "getrlimit failed: %s\n", strerror(errno)); exit(1); } - if (rlim.rlim_cur <= (nclients + 2)) + if (rlim.rlim_cur < nclients + 3) { - fprintf(stderr, "You need at least %d open files but you are only allowed to use %ld.\n", nclients + 2, (long) rlim.rlim_cur); - fprintf(stderr, "Use limit/ulimit to increase the limit before using pgbench.\n"); + fprintf(stderr, "need at least %d open files, but system limit is %ld\n", + nclients + 3, (long) rlim.rlim_cur); + fprintf(stderr, "Reduce number of clients, or use limit/ulimit to increase the system limit.\n"); exit(1); } #endif /* HAVE_GETRLIMIT */ @@ -2922,7 +2928,8 @@ main(int argc, char **argv) nthreads = atoi(optarg); if (nthreads <= 0) { - fprintf(stderr, "invalid number of threads: %d\n", nthreads); + fprintf(stderr, "invalid number of threads: \"%s\"\n", + optarg); exit(1); } break; @@ -2939,7 +2946,7 @@ main(int argc, char **argv) scale = atoi(optarg); if (scale <= 0) { - fprintf(stderr, "invalid scaling factor: %d\n", scale); + fprintf(stderr, "invalid scaling factor: \"%s\"\n", optarg); exit(1); } break; @@ -2947,13 +2954,14 @@ main(int argc, char **argv) benchmarking_option_set = true; if (duration > 0) { - fprintf(stderr, "specify either a number of transactions (-t) or a duration (-T), not both.\n"); + fprintf(stderr, "specify either a number of transactions (-t) or a duration (-T), not both\n"); exit(1); } nxacts = atoi(optarg); if (nxacts <= 0) { - fprintf(stderr, "invalid number of transactions: %d\n", nxacts); + fprintf(stderr, "invalid number of transactions: \"%s\"\n", + optarg); exit(1); } break; @@ -2961,13 +2969,13 @@ main(int argc, char **argv) benchmarking_option_set = true; if (nxacts > 0) { - fprintf(stderr, "specify either a number of transactions (-t) or a duration (-T), not both.\n"); + fprintf(stderr, "specify either a number of transactions (-t) or a duration (-T), not both\n"); exit(1); } duration = atoi(optarg); if (duration <= 0) { - fprintf(stderr, "invalid duration: %d\n", duration); + fprintf(stderr, "invalid duration: \"%s\"\n", optarg); exit(1); } break; @@ -2997,7 +3005,8 @@ main(int argc, char **argv) if ((p = strchr(optarg, '=')) == NULL || p == optarg || *(p + 1) == '\0') { - fprintf(stderr, "invalid variable definition: %s\n", optarg); + fprintf(stderr, "invalid variable definition: \"%s\"\n", + optarg); exit(1); } @@ -3009,9 +3018,9 @@ main(int argc, char **argv) case 'F': initialization_option_set = true; fillfactor = atoi(optarg); - if ((fillfactor < 10) || (fillfactor > 100)) + if (fillfactor < 10 || fillfactor > 100) { - fprintf(stderr, "invalid fillfactor: %d\n", fillfactor); + fprintf(stderr, "invalid fillfactor: \"%s\"\n", optarg); exit(1); } break; @@ -3019,7 +3028,7 @@ main(int argc, char **argv) benchmarking_option_set = true; if (num_files > 0) { - fprintf(stderr, "query mode (-M) should be specified before transaction scripts (-f)\n"); + fprintf(stderr, "query mode (-M) should be specified before any transaction scripts (-f)\n"); exit(1); } for (querymode = 0; querymode < NUM_QUERYMODE; querymode++) @@ -3027,7 +3036,8 @@ main(int argc, char **argv) break; if (querymode >= NUM_QUERYMODE) { - fprintf(stderr, "invalid query mode (-M): %s\n", optarg); + fprintf(stderr, "invalid query mode (-M): \"%s\"\n", + optarg); exit(1); } break; @@ -3036,8 +3046,7 @@ main(int argc, char **argv) progress = atoi(optarg); if (progress <= 0) { - fprintf(stderr, - "thread progress delay (-P) must be positive (%s)\n", + fprintf(stderr, "invalid thread progress delay: \"%s\"\n", optarg); exit(1); } @@ -3051,7 +3060,7 @@ main(int argc, char **argv) if (throttle_value <= 0.0) { - fprintf(stderr, "invalid rate limit: %s\n", optarg); + fprintf(stderr, "invalid rate limit: \"%s\"\n", optarg); exit(1); } /* Invert rate limit into a time offset */ @@ -3064,7 +3073,8 @@ main(int argc, char **argv) if (limit_ms <= 0.0) { - fprintf(stderr, "invalid latency limit: %s\n", optarg); + fprintf(stderr, "invalid latency limit: \"%s\"\n", + optarg); exit(1); } benchmarking_option_set = true; @@ -3089,20 +3099,21 @@ main(int argc, char **argv) sample_rate = atof(optarg); if (sample_rate <= 0.0 || sample_rate > 1.0) { - fprintf(stderr, "invalid sampling rate: %f\n", sample_rate); + fprintf(stderr, "invalid sampling rate: \"%s\"\n", optarg); exit(1); } break; case 5: #ifdef WIN32 - fprintf(stderr, "--aggregate-interval is not currently supported on Windows"); + fprintf(stderr, "--aggregate-interval is not currently supported on Windows\n"); exit(1); #else benchmarking_option_set = true; agg_interval = atoi(optarg); if (agg_interval <= 0) { - fprintf(stderr, "invalid number of seconds for aggregation: %d\n", agg_interval); + fprintf(stderr, "invalid number of seconds for aggregation: \"%s\"\n", + optarg); exit(1); } #endif @@ -3133,7 +3144,7 @@ main(int argc, char **argv) { if (benchmarking_option_set) { - fprintf(stderr, "some options cannot be used in initialization (-i) mode\n"); + fprintf(stderr, "some of the specified options cannot be used in initialization (-i) mode\n"); exit(1); } @@ -3144,7 +3155,7 @@ main(int argc, char **argv) { if (initialization_option_set) { - fprintf(stderr, "some options cannot be used in benchmarking mode\n"); + fprintf(stderr, "some of the specified options cannot be used in benchmarking mode\n"); exit(1); } } @@ -3162,30 +3173,30 @@ main(int argc, char **argv) /* --sampling-rate may be used only with -l */ if (sample_rate > 0.0 && !use_log) { - fprintf(stderr, "log sampling rate is allowed only when logging transactions (-l) \n"); + fprintf(stderr, "log sampling (--sampling-rate) is allowed only when logging transactions (-l)\n"); exit(1); } /* --sampling-rate may must not be used with --aggregate-interval */ if (sample_rate > 0.0 && agg_interval > 0) { - fprintf(stderr, "log sampling (--sampling-rate) and aggregation (--aggregate-interval) can't be used at the same time\n"); + fprintf(stderr, "log sampling (--sampling-rate) and aggregation (--aggregate-interval) cannot be used at the same time\n"); exit(1); } - if (agg_interval > 0 && (!use_log)) + if (agg_interval > 0 && !use_log) { fprintf(stderr, "log aggregation is allowed only when actually logging transactions\n"); exit(1); } - if ((duration > 0) && (agg_interval > duration)) + if (duration > 0 && agg_interval > duration) { - fprintf(stderr, "number of seconds for aggregation (%d) must not be higher that test duration (%d)\n", agg_interval, duration); + fprintf(stderr, "number of seconds for aggregation (%d) must not be higher than test duration (%d)\n", agg_interval, duration); exit(1); } - if ((duration > 0) && (agg_interval > 0) && (duration % agg_interval != 0)) + if (duration > 0 && agg_interval > 0 && duration % agg_interval != 0) { fprintf(stderr, "duration (%d) must be a multiple of aggregation interval (%d)\n", duration, agg_interval); exit(1); @@ -3251,7 +3262,7 @@ main(int argc, char **argv) if (PQstatus(con) == CONNECTION_BAD) { - fprintf(stderr, "Connection to database '%s' failed.\n", dbName); + fprintf(stderr, "connection to database \"%s\" failed\n", dbName); fprintf(stderr, "%s", PQerrorMessage(con)); exit(1); } @@ -3271,7 +3282,8 @@ main(int argc, char **argv) scale = atoi(PQgetvalue(res, 0, 0)); if (scale < 0) { - fprintf(stderr, "count(*) from pgbench_branches invalid (%d)\n", scale); + fprintf(stderr, "invalid count(*) from pgbench_branches: \"%s\"\n", + PQgetvalue(res, 0, 0)); exit(1); } PQclear(res); @@ -3279,7 +3291,7 @@ main(int argc, char **argv) /* warn if we override user-given -s switch */ if (scale_given) fprintf(stderr, - "Scale option ignored, using pgbench_branches table count = %d\n", + "scale option ignored, using count from pgbench_branches table (%d)\n", scale); } @@ -3416,7 +3428,7 @@ main(int argc, char **argv) if (err != 0 || thread->thread == INVALID_THREAD) { - fprintf(stderr, "cannot create thread: %s\n", strerror(err)); + fprintf(stderr, "could not create thread: %s\n", strerror(err)); exit(1); } } @@ -3528,7 +3540,8 @@ threadRun(void *arg) if (logfile == NULL) { - fprintf(stderr, "Couldn't open logfile \"%s\": %s", logpath, strerror(errno)); + fprintf(stderr, "could not open logfile \"%s\": %s\n", + logpath, strerror(errno)); goto done; } } @@ -3562,7 +3575,8 @@ threadRun(void *arg) if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND) { - fprintf(stderr, "Client %d aborted in state %d. Execution meta-command failed.\n", i, st->state); + fprintf(stderr, "client %d aborted in state %d; execution of meta-command failed\n", + i, st->state); remains--; /* I've aborted */ PQfinish(st->con); st->con = NULL; @@ -3684,7 +3698,7 @@ threadRun(void *arg) if (errno == EINTR) continue; /* must be something wrong */ - fprintf(stderr, "select failed: %s\n", strerror(errno)); + fprintf(stderr, "select() failed: %s\n", strerror(errno)); goto done; } } @@ -3705,7 +3719,8 @@ threadRun(void *arg) if (st->ecnt > prev_ecnt && commands[st->state]->type == META_COMMAND) { - fprintf(stderr, "Client %d aborted in state %d. Execution of meta-command failed.\n", i, st->state); + fprintf(stderr, "client %d aborted in state %d; execution of meta-command failed\n", + i, st->state); remains--; /* I've aborted */ PQfinish(st->con); st->con = NULL; @@ -4004,7 +4019,7 @@ setalarm(int seconds) win32_timer_callback, NULL, seconds * 1000, 0, WT_EXECUTEINTIMERTHREAD | WT_EXECUTEONLYONCE)) { - fprintf(stderr, "Failed to set timer\n"); + fprintf(stderr, "failed to set timer\n"); exit(1); } } From a830c83c9b71b78c65c5ddd71db2ecd68601ce73 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Mon, 6 Jul 2015 20:58:58 +0900 Subject: [PATCH 018/442] Remove incorrect warning from pg_archivecleanup document. The .backup file name can be passed to pg_archivecleanup even if it includes the extension which is specified in -x option. However, previously the document incorrectly warned a user not to do that. Back-patch to 9.2 where pg_archivecleanup's -x option and the warning were added. --- doc/src/sgml/ref/pgarchivecleanup.sgml | 5 ----- 1 file changed, 5 deletions(-) diff --git a/doc/src/sgml/ref/pgarchivecleanup.sgml b/doc/src/sgml/ref/pgarchivecleanup.sgml index db39deaca16fe..60a7fc4e6b986 100644 --- a/doc/src/sgml/ref/pgarchivecleanup.sgml +++ b/doc/src/sgml/ref/pgarchivecleanup.sgml @@ -130,11 +130,6 @@ pg_archivecleanup: removing file "archive/00000001000000370000000E" .gz. - - Note that the - .backup file name passed to the program should not - include the extension. - From 2867f26fecafc6d9930eb751abdd7b80359a6f51 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Mon, 6 Jul 2015 19:17:57 -0700 Subject: [PATCH 019/442] Make RLS related error messages more consistent and compliant. Also updated regression expected output to match. Noted and patch by Daniele Varrazzo. --- src/backend/commands/policy.c | 6 +++--- src/backend/commands/user.c | 2 +- src/test/regress/expected/rowsecurity.out | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 6e95ba28b9db1..11efc9f30f144 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -563,7 +563,7 @@ CreatePolicy(CreatePolicyStmt *stmt) if (HeapTupleIsValid(policy_tuple)) ereport(ERROR, (errcode(ERRCODE_DUPLICATE_OBJECT), - errmsg("policy \"%s\" for relation \"%s\" already exists", + errmsg("policy \"%s\" for table \"%s\" already exists", stmt->policy_name, RelationGetRelationName(target_table)))); values[Anum_pg_policy_polrelid - 1] = ObjectIdGetDatum(table_id); @@ -735,7 +735,7 @@ AlterPolicy(AlterPolicyStmt *stmt) if (!HeapTupleIsValid(policy_tuple)) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("policy \"%s\" on table \"%s\" does not exist", + errmsg("policy \"%s\" for table \"%s\" does not exist", stmt->policy_name, RelationGetRelationName(target_table)))); @@ -977,7 +977,7 @@ get_relation_policy_oid(Oid relid, const char *policy_name, bool missing_ok) if (!missing_ok) ereport(ERROR, (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("policy \"%s\" for table \"%s\" does not exist", + errmsg("policy \"%s\" for table \"%s\" does not exist", policy_name, get_rel_name(relid)))); policy_oid = InvalidOid; diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 3b381c58353fc..5b20994028db9 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -301,7 +301,7 @@ CreateRole(CreateRoleStmt *stmt) if (!superuser()) ereport(ERROR, (errcode(ERRCODE_INSUFFICIENT_PRIVILEGE), - errmsg("must be superuser to change bypassrls attribute."))); + errmsg("must be superuser to change bypassrls attribute"))); } else { diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 7a293f30b537c..4073c1beea511 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -1988,7 +1988,7 @@ GRANT ALL ON y1, y2 TO rls_regress_user1; CREATE POLICY p1 ON y1 FOR ALL USING (a % 2 = 0); CREATE POLICY p2 ON y1 FOR SELECT USING (a > 2); CREATE POLICY p1 ON y1 FOR SELECT USING (a % 2 = 1); --fail -ERROR: policy "p1" for relation "y1" already exists +ERROR: policy "p1" for table "y1" already exists CREATE POLICY p1 ON y2 FOR ALL USING (a % 2 = 0); --OK ALTER TABLE y1 ENABLE ROW LEVEL SECURITY; ALTER TABLE y2 ENABLE ROW LEVEL SECURITY; From 8022b0a35f7c4e71908a878c8c412b5c2ae8536c Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Tue, 7 Jul 2015 12:47:44 +0200 Subject: [PATCH 020/442] Fix pg_recvlogical not to fsync output when it's a tty or pipe. The previous coding tried to handle possible failures when fsyncing a tty or pipe fd by accepting EINVAL - but apparently some platforms (windows, OSX) don't reliably return that. So instead check whether the output fd refers to a pipe or a tty when opening it. Reported-By: Olivier Gosseaume, Marko Tiikkaja Discussion: 559AF98B.3050901@joh.to Backpatch to 9.4, where pg_recvlogical was added. --- src/bin/pg_basebackup/pg_recvlogical.c | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index 0bc141dd79bc0..50844e700d960 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -50,6 +50,7 @@ static const char *plugin = "test_decoding"; static int outfd = -1; static volatile sig_atomic_t time_to_abort = false; static volatile sig_atomic_t output_reopen = false; +static bool output_isfile; static int64 output_last_fsync = -1; static bool output_needs_fsync = false; static XLogRecPtr output_written_lsn = InvalidXLogRecPtr; @@ -177,8 +178,11 @@ OutputFsync(int64 now) output_needs_fsync = false; - /* Accept EINVAL, in case output is writing to a pipe or similar. */ - if (fsync(outfd) != 0 && errno != EINVAL) + /* can only fsync if it's a regular file */ + if (!output_isfile) + return true; + + if (fsync(outfd) != 0) { fprintf(stderr, _("%s: could not fsync log file \"%s\": %s\n"), @@ -317,6 +321,8 @@ StreamLogicalLog(void) /* open the output file, if not open yet */ if (outfd == -1) { + struct stat statbuf; + if (strcmp(outfile, "-") == 0) outfd = fileno(stdout); else @@ -329,6 +335,13 @@ StreamLogicalLog(void) progname, outfile, strerror(errno)); goto error; } + + if (fstat(outfd, &statbuf) != 0) + fprintf(stderr, + _("%s: could not stat file \"%s\": %s\n"), + progname, outfile, strerror(errno)); + + output_isfile = S_ISREG(statbuf.st_mode) && !isatty(outfd); } r = PQgetCopyData(conn, ©buf, 1); From cf051c4f9d4e990e5fce0a00bacb47b4e64261d6 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Tue, 7 Jul 2015 13:13:15 +0200 Subject: [PATCH 021/442] Fix logical decoding bug leading to inefficient reopening of files. When spilling transaction data to disk a simple typo caused the output file to be closed and reopened for every serialized change. That happens to not have a huge impact on linux, which is why it probably wasn't noticed so far, but on windows that appears to trigger actual disk writes after every change. Not fun. The bug fortunately does not have any impact besides speed. A change could end up being in the wrong segment (last instead of next), but since we read all files to the end, that's just ugly, not really problematic. It's not a problem to upgrade, since transaction spill files do not persist across restarts. Bug: #13484 Reported-By: Olivier Gosseaume Discussion: 20150703090217.1190.63940@wrigleys.postgresql.org Backpatch to 9.4, where logical decoding was added. --- src/backend/replication/logical/reorderbuffer.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/replication/logical/reorderbuffer.c b/src/backend/replication/logical/reorderbuffer.c index fa98580302afb..478c3e874af3f 100644 --- a/src/backend/replication/logical/reorderbuffer.c +++ b/src/backend/replication/logical/reorderbuffer.c @@ -2009,7 +2009,7 @@ ReorderBufferSerializeTXN(ReorderBuffer *rb, ReorderBufferTXN *txn) * store in segment in which it belongs by start lsn, don't split over * multiple segments tho */ - if (fd == -1 || XLByteInSeg(change->lsn, curOpenSegNo)) + if (fd == -1 || !XLByteInSeg(change->lsn, curOpenSegNo)) { XLogRecPtr recptr; From e5460aa02fd0e3e51b57aac1d15b3d2b494aac57 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 7 Jul 2015 16:31:52 +0300 Subject: [PATCH 022/442] Turn install.bat into a pure one line wrapper fort he perl script. Build.bat and vcregress.bat got similar treatment years ago. I'm not sure why install.bat wasn't treated at the same time, but it seems like a good idea anyway. The immediate problem with the old install.bat was that it had quoting issues, and wouldn't work if the target directory's name contained spaces. This fixes that problem. I committed this to master yesterday, this is a backpatch of the same for all supported versions. --- src/tools/msvc/install.bat | 29 ++++------------------------- src/tools/msvc/install.pl | 13 +++++++++++++ 2 files changed, 17 insertions(+), 25 deletions(-) diff --git a/src/tools/msvc/install.bat b/src/tools/msvc/install.bat index bed08f1e125dd..d03277eff2b74 100644 --- a/src/tools/msvc/install.bat +++ b/src/tools/msvc/install.bat @@ -1,27 +1,6 @@ @echo off REM src/tools/msvc/install.bat - -if NOT "%1"=="" GOTO RUN_INSTALL - -echo Invalid command line options. -echo Usage: "install.bat " -echo. -REM exit fix for pre-2003 shell especially if used on buildfarm -if "%XP_EXIT_FIX%" == "yes" exit 1 -exit /b 1 - -:RUN_INSTALL - -SETLOCAL - -IF NOT EXIST buildenv.pl goto nobuildenv -perl -e "require 'buildenv.pl'; while(($k,$v) = each %%ENV) { print qq[\@SET $k=$v\n]; }" > bldenv.bat -CALL bldenv.bat -del bldenv.bat -:nobuildenv - -perl install.pl "%1" %2 - -REM exit fix for pre-2003 shell especially if used on buildfarm -if "%XP_EXIT_FIX%" == "yes" exit %ERRORLEVEL% -exit /b %ERRORLEVEL% +REM all the logic for this now belongs in install.pl. This file really +REM only exists so you don't have to type "perl install.pl" +REM Resist any temptation to add any logic here. +@perl install.pl %* diff --git a/src/tools/msvc/install.pl b/src/tools/msvc/install.pl index 97e297e1765a4..bde5b7c793a28 100755 --- a/src/tools/msvc/install.pl +++ b/src/tools/msvc/install.pl @@ -8,6 +8,19 @@ use Install qw(Install); +# buildenv.pl is for specifying the build environment settings +# it should contain lines like: +# $ENV{PATH} = "c:/path/to/bison/bin;$ENV{PATH}"; + +if (-e "src/tools/msvc/buildenv.pl") +{ + require "src/tools/msvc/buildenv.pl"; +} +elsif (-e "./buildenv.pl") +{ + require "./buildenv.pl"; +} + my $target = shift || Usage(); my $insttype = shift; Install($target, $insttype); From 162ae5b9bb6542809453fbe9cfbb6468cb8eb06e Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Tue, 7 Jul 2015 23:24:02 +0900 Subject: [PATCH 023/442] Add tab-completion for psql meta-commands. Based on the original code from David Christensen, modified by me. --- src/bin/psql/tab-complete.c | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 5b32fdeb6ee15..4fd1dba6515fc 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -897,14 +897,16 @@ psql_completion(const char *text, int start, int end) static const char *const backslash_commands[] = { "\\a", "\\connect", "\\conninfo", "\\C", "\\cd", "\\copy", "\\copyright", - "\\d", "\\da", "\\db", "\\dc", "\\dC", "\\dd", "\\dD", "\\des", "\\det", "\\deu", "\\dew", "\\df", + "\\d", "\\da", "\\db", "\\dc", "\\dC", "\\dd", "\\ddp", "\\dD", + "\\des", "\\det", "\\deu", "\\dew", "\\dE", "\\df", "\\dF", "\\dFd", "\\dFp", "\\dFt", "\\dg", "\\di", "\\dl", "\\dL", - "\\dn", "\\do", "\\dp", "\\drds", "\\ds", "\\dS", "\\dt", "\\dT", "\\dv", "\\du", "\\dx", + "\\dm", "\\dn", "\\do", "\\dO", "\\dp", "\\drds", "\\ds", "\\dS", + "\\dt", "\\dT", "\\dv", "\\du", "\\dx", "\\dy", "\\e", "\\echo", "\\ef", "\\encoding", "\\f", "\\g", "\\gset", "\\h", "\\help", "\\H", "\\i", "\\ir", "\\l", "\\lo_import", "\\lo_export", "\\lo_list", "\\lo_unlink", "\\o", "\\p", "\\password", "\\prompt", "\\pset", "\\q", "\\qecho", "\\r", - "\\set", "\\sf", "\\t", "\\T", + "\\s", "\\set", "\\setenv", "\\sf", "\\t", "\\T", "\\timing", "\\unset", "\\x", "\\w", "\\watch", "\\z", "\\!", NULL }; @@ -3791,6 +3793,10 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH_QUERY(Query_for_list_of_extensions); else if (strncmp(prev_wd, "\\dm", strlen("\\dm")) == 0) COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_matviews, NULL); + else if (strncmp(prev_wd, "\\dE", strlen("\\dE")) == 0) + COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_foreign_tables, NULL); + else if (strncmp(prev_wd, "\\dy", strlen("\\dy")) == 0) + COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers); /* must be at end of \d list */ else if (strncmp(prev_wd, "\\d", strlen("\\d")) == 0) From 28c38396eda2d923974b99013b27e89a9093c766 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 7 Jul 2015 18:37:45 +0300 Subject: [PATCH 024/442] Improve handling of out-of-memory in libpq. If an allocation fails in the main message handling loop, pqParseInput3 or pqParseInput2, it should not be treated as "not enough data available yet". Otherwise libpq will wait indefinitely for more data to arrive from the server, and gets stuck forever. This isn't a complete fix - getParamDescriptions and getCopyStart still have the same issue, but it's a step in the right direction. Michael Paquier and me. Backpatch to all supported versions. --- src/interfaces/libpq/fe-protocol2.c | 51 +++++++++++++++++------ src/interfaces/libpq/fe-protocol3.c | 63 ++++++++++++++++++++--------- 2 files changed, 83 insertions(+), 31 deletions(-) diff --git a/src/interfaces/libpq/fe-protocol2.c b/src/interfaces/libpq/fe-protocol2.c index eeba7f3504708..9ff73dbd58b22 100644 --- a/src/interfaces/libpq/fe-protocol2.c +++ b/src/interfaces/libpq/fe-protocol2.c @@ -498,10 +498,17 @@ pqParseInput2(PGconn *conn) conn->result = PQmakeEmptyPGresult(conn, PGRES_COMMAND_OK); if (!conn->result) - return; + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); + pqSaveErrorResult(conn); + } + } + if (conn->result) + { + strlcpy(conn->result->cmdStatus, conn->workBuffer.data, + CMDSTATUS_LEN); } - strlcpy(conn->result->cmdStatus, conn->workBuffer.data, - CMDSTATUS_LEN); checkXactStatus(conn, conn->workBuffer.data); conn->asyncStatus = PGASYNC_READY; break; @@ -522,8 +529,16 @@ pqParseInput2(PGconn *conn) "unexpected character %c following empty query response (\"I\" message)", id); if (conn->result == NULL) + { conn->result = PQmakeEmptyPGresult(conn, PGRES_EMPTY_QUERY); + if (!conn->result) + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); + pqSaveErrorResult(conn); + } + } conn->asyncStatus = PGASYNC_READY; break; case 'K': /* secret key data from the backend */ @@ -965,14 +980,17 @@ pqGetErrorNotice2(PGconn *conn, bool isError) * Make a PGresult to hold the message. We temporarily lie about the * result status, so that PQmakeEmptyPGresult doesn't uselessly copy * conn->errorMessage. + * + * NB: This allocation can fail, if you run out of memory. The rest of the + * function handles that gracefully, and we still try to set the error + * message as the connection's error message. */ res = PQmakeEmptyPGresult(conn, PGRES_EMPTY_QUERY); - if (!res) - goto failure; - res->resultStatus = isError ? PGRES_FATAL_ERROR : PGRES_NONFATAL_ERROR; - res->errMsg = pqResultStrdup(res, workBuf.data); - if (!res->errMsg) - goto failure; + if (res) + { + res->resultStatus = isError ? PGRES_FATAL_ERROR : PGRES_NONFATAL_ERROR; + res->errMsg = pqResultStrdup(res, workBuf.data); + } /* * Break the message into fields. We can't do very much here, but we can @@ -1024,15 +1042,22 @@ pqGetErrorNotice2(PGconn *conn, bool isError) pqClearAsyncResult(conn); conn->result = res; resetPQExpBuffer(&conn->errorMessage); - appendPQExpBufferStr(&conn->errorMessage, res->errMsg); + if (res && !PQExpBufferDataBroken(workBuf) && res->errMsg) + appendPQExpBufferStr(&conn->errorMessage, res->errMsg); + else + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); if (conn->xactStatus == PQTRANS_INTRANS) conn->xactStatus = PQTRANS_INERROR; } else { - if (res->noticeHooks.noticeRec != NULL) - (*res->noticeHooks.noticeRec) (res->noticeHooks.noticeRecArg, res); - PQclear(res); + if (res) + { + if (res->noticeHooks.noticeRec != NULL) + (*res->noticeHooks.noticeRec) (res->noticeHooks.noticeRecArg, res); + PQclear(res); + } } termPQExpBuffer(&workBuf); diff --git a/src/interfaces/libpq/fe-protocol3.c b/src/interfaces/libpq/fe-protocol3.c index a847f084fa0e8..dbc0d89a4ed04 100644 --- a/src/interfaces/libpq/fe-protocol3.c +++ b/src/interfaces/libpq/fe-protocol3.c @@ -204,10 +204,15 @@ pqParseInput3(PGconn *conn) conn->result = PQmakeEmptyPGresult(conn, PGRES_COMMAND_OK); if (!conn->result) - return; + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); + pqSaveErrorResult(conn); + } } - strlcpy(conn->result->cmdStatus, conn->workBuffer.data, - CMDSTATUS_LEN); + if (conn->result) + strlcpy(conn->result->cmdStatus, conn->workBuffer.data, + CMDSTATUS_LEN); conn->asyncStatus = PGASYNC_READY; break; case 'E': /* error return */ @@ -226,7 +231,11 @@ pqParseInput3(PGconn *conn) conn->result = PQmakeEmptyPGresult(conn, PGRES_EMPTY_QUERY); if (!conn->result) - return; + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); + pqSaveErrorResult(conn); + } } conn->asyncStatus = PGASYNC_READY; break; @@ -239,7 +248,11 @@ pqParseInput3(PGconn *conn) conn->result = PQmakeEmptyPGresult(conn, PGRES_COMMAND_OK); if (!conn->result) - return; + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); + pqSaveErrorResult(conn); + } } conn->asyncStatus = PGASYNC_READY; } @@ -306,7 +319,11 @@ pqParseInput3(PGconn *conn) conn->result = PQmakeEmptyPGresult(conn, PGRES_COMMAND_OK); if (!conn->result) - return; + { + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); + pqSaveErrorResult(conn); + } } conn->asyncStatus = PGASYNC_READY; } @@ -822,11 +839,14 @@ pqGetErrorNotice3(PGconn *conn, bool isError) * Make a PGresult to hold the accumulated fields. We temporarily lie * about the result status, so that PQmakeEmptyPGresult doesn't uselessly * copy conn->errorMessage. + * + * NB: This allocation can fail, if you run out of memory. The rest of the + * function handles that gracefully, and we still try to set the error + * message as the connection's error message. */ res = PQmakeEmptyPGresult(conn, PGRES_EMPTY_QUERY); - if (!res) - goto fail; - res->resultStatus = isError ? PGRES_FATAL_ERROR : PGRES_NONFATAL_ERROR; + if (res) + res->resultStatus = isError ? PGRES_FATAL_ERROR : PGRES_NONFATAL_ERROR; /* * Read the fields and save into res. @@ -966,20 +986,27 @@ pqGetErrorNotice3(PGconn *conn, bool isError) */ if (isError) { - res->errMsg = pqResultStrdup(res, workBuf.data); - if (!res->errMsg) - goto fail; + if (res) + res->errMsg = pqResultStrdup(res, workBuf.data); pqClearAsyncResult(conn); conn->result = res; - appendPQExpBufferStr(&conn->errorMessage, workBuf.data); + if (PQExpBufferDataBroken(workBuf)) + printfPQExpBuffer(&conn->errorMessage, + libpq_gettext("out of memory")); + else + appendPQExpBufferStr(&conn->errorMessage, workBuf.data); } else { - /* We can cheat a little here and not copy the message. */ - res->errMsg = workBuf.data; - if (res->noticeHooks.noticeRec != NULL) - (*res->noticeHooks.noticeRec) (res->noticeHooks.noticeRecArg, res); - PQclear(res); + /* if we couldn't allocate the result set, just discard the NOTICE */ + if (res) + { + /* We can cheat a little here and not copy the message. */ + res->errMsg = workBuf.data; + if (res->noticeHooks.noticeRec != NULL) + (*res->noticeHooks.noticeRec) (res->noticeHooks.noticeRecArg, res); + PQclear(res); + } } termPQExpBuffer(&workBuf); From bb67e357b2607eea3e7c929520945a61b8cff546 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 7 Jul 2015 12:49:18 -0400 Subject: [PATCH 025/442] Fix portability issue in pg_upgrade test script: avoid $PWD. SUSv2-era shells don't set the PWD variable, though anything more modern does. In the buildfarm environment this could lead to test.sh executing with PWD pointing to $HOME or another high-level directory, so that there were conflicts between concurrent executions of the test in different branch subdirectories. This appears to be the explanation for recent intermittent failures on buildfarm members binturong and dingo (and might well have something to do with the buildfarm script's failure to capture log files from pg_upgrade tests, too). To fix, just use `pwd` in place of $PWD. AFAICS test.sh is the only place in our source tree that depended on $PWD. Back-patch to all versions containing this script. Per buildfarm. Thanks to Oskari Saarenmaa for diagnosing the problem. --- src/bin/pg_upgrade/test.sh | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/bin/pg_upgrade/test.sh b/src/bin/pg_upgrade/test.sh index fef64dfb5f87a..07002f6a1659a 100644 --- a/src/bin/pg_upgrade/test.sh +++ b/src/bin/pg_upgrade/test.sh @@ -62,7 +62,8 @@ esac POSTMASTER_OPTS="-F -c listen_addresses=$LISTEN_ADDRESSES -k \"$PGHOST\"" export PGHOST -temp_root=$PWD/tmp_check +# don't rely on $PWD here, as old shells don't set it +temp_root=`pwd`/tmp_check if [ "$1" = '--install' ]; then temp_install=$temp_root/install @@ -104,7 +105,7 @@ PGDATA="$BASE_PGDATA.old" export PGDATA rm -rf "$BASE_PGDATA" "$PGDATA" -logdir=$PWD/log +logdir=`pwd`/log rm -rf "$logdir" mkdir "$logdir" From de184e57e894b13f51c4f30788d20f385b547048 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Wed, 8 Jul 2015 01:54:17 +0900 Subject: [PATCH 026/442] Fix incorrect path in pg_regress log messages. Back-patch to 9.5 where the bug was introduced. David Christensen --- src/test/regress/pg_regress.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index a267894751e45..ed8c369e5cb57 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -2223,7 +2223,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc temp_instance); if (system(buf)) { - fprintf(stderr, _("\n%s: initdb failed\nExamine %s/log/initdb.log for the reason.\nCommand was: %s\n"), progname, outputdir, buf); + fprintf(stderr, _("\n%s: initdb failed\nExamine %s/log/initdb.log for the reason.\nCommand was: %s\n"), progname, temp_instance, buf); exit(2); } @@ -2353,7 +2353,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc if (WaitForSingleObject(postmaster_pid, 0) == WAIT_OBJECT_0) #endif { - fprintf(stderr, _("\n%s: postmaster failed\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir); + fprintf(stderr, _("\n%s: postmaster failed\nExamine %s/log/postmaster.log for the reason\n"), progname, temp_instance); exit(2); } @@ -2361,7 +2361,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc } if (i >= 60) { - fprintf(stderr, _("\n%s: postmaster did not respond within 60 seconds\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir); + fprintf(stderr, _("\n%s: postmaster did not respond within 60 seconds\nExamine %s/log/postmaster.log for the reason\n"), progname, temp_instance); /* * If we get here, the postmaster is probably wedged somewhere in From d5f551abcf78cb4e3f6c5d195bd260893443414b Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Tue, 7 Jul 2015 14:36:03 -0700 Subject: [PATCH 027/442] Improve regression test coverage of table lock modes vs permissions. Test the interactions with permissions and LOCK TABLE. Specifically ROW EXCLUSIVE, ACCESS SHARE, and ACCESS EXCLUSIVE modes against SELECT, INSERT, UPDATE, DELETE, and TRUNCATE permissions. Discussed by Stephen Frost and Michael Paquier, patch by the latter. Backpatch to 9.5 where matching behavior was first committed. --- src/test/regress/expected/privileges.out | 83 +++++++++++++++++++++++ src/test/regress/sql/privileges.sql | 84 ++++++++++++++++++++++++ 2 files changed, 167 insertions(+) diff --git a/src/test/regress/expected/privileges.out b/src/test/regress/expected/privileges.out index 64a93309ebcea..c0cd9fac46896 100644 --- a/src/test/regress/expected/privileges.out +++ b/src/test/regress/expected/privileges.out @@ -1569,3 +1569,86 @@ DROP USER regressuser4; DROP USER regressuser5; DROP USER regressuser6; ERROR: role "regressuser6" does not exist +-- permissions with LOCK TABLE +CREATE USER locktable_user; +CREATE TABLE lock_table (a int); +-- LOCK TABLE and SELECT permission +GRANT SELECT ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should fail +ERROR: permission denied for relation lock_table +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail +ERROR: permission denied for relation lock_table +ROLLBACK; +\c +REVOKE SELECT ON lock_table FROM locktable_user; +-- LOCK TABLE and INSERT permission +GRANT INSERT ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ERROR: permission denied for relation lock_table +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail +ERROR: permission denied for relation lock_table +ROLLBACK; +\c +REVOKE INSERT ON lock_table FROM locktable_user; +-- LOCK TABLE and UPDATE permission +GRANT UPDATE ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ERROR: permission denied for relation lock_table +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE UPDATE ON lock_table FROM locktable_user; +-- LOCK TABLE and DELETE permission +GRANT DELETE ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ERROR: permission denied for relation lock_table +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE DELETE ON lock_table FROM locktable_user; +-- LOCK TABLE and TRUNCATE permission +GRANT TRUNCATE ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ERROR: permission denied for relation lock_table +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE TRUNCATE ON lock_table FROM locktable_user; +-- clean up +DROP TABLE lock_table; +DROP USER locktable_user; diff --git a/src/test/regress/sql/privileges.sql b/src/test/regress/sql/privileges.sql index 22b54a28c46cf..c1837c497af59 100644 --- a/src/test/regress/sql/privileges.sql +++ b/src/test/regress/sql/privileges.sql @@ -975,3 +975,87 @@ DROP USER regressuser3; DROP USER regressuser4; DROP USER regressuser5; DROP USER regressuser6; + + +-- permissions with LOCK TABLE +CREATE USER locktable_user; +CREATE TABLE lock_table (a int); + +-- LOCK TABLE and SELECT permission +GRANT SELECT ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should fail +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail +ROLLBACK; +\c +REVOKE SELECT ON lock_table FROM locktable_user; + +-- LOCK TABLE and INSERT permission +GRANT INSERT ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should fail +ROLLBACK; +\c +REVOKE INSERT ON lock_table FROM locktable_user; + +-- LOCK TABLE and UPDATE permission +GRANT UPDATE ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE UPDATE ON lock_table FROM locktable_user; + +-- LOCK TABLE and DELETE permission +GRANT DELETE ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE DELETE ON lock_table FROM locktable_user; + +-- LOCK TABLE and TRUNCATE permission +GRANT TRUNCATE ON lock_table TO locktable_user; +SET SESSION AUTHORIZATION locktable_user; +BEGIN; +LOCK TABLE lock_table IN ROW EXCLUSIVE MODE; -- should pass +COMMIT; +BEGIN; +LOCK TABLE lock_table IN ACCESS SHARE MODE; -- should fail +ROLLBACK; +BEGIN; +LOCK TABLE lock_table IN ACCESS EXCLUSIVE MODE; -- should pass +COMMIT; +\c +REVOKE TRUNCATE ON lock_table FROM locktable_user; + +-- clean up +DROP TABLE lock_table; +DROP USER locktable_user; From ce0da6261004ac15f01c21d8b94f11af7a098243 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 8 Jul 2015 20:36:06 +0300 Subject: [PATCH 028/442] Replace our hacked version of ax_pthread.m4 with latest upstream version. Our version was different from the upstream version in that we tried to use all possible pthread-related flags that the compiler accepts, rather than just the first one that works. That change was made in commit e48322a6d6cfce1ec52ab303441df329ddbc04d1, to work-around a bug affecting GCC versions 3.2 and below (https://gcc.gnu.org/bugzilla/show_bug.cgi?id=8888), although we didn't realize that it was a GCC bug at the time. We hardly care about that old GCC versions anymore, so we no longer need that workaround. This fixes the macro for compilers that print warnings with the chosen flags. That's pretty annoying on its own right, but it also inconspicuously disabled thread-safety, because we refused to use any pthread-related flags if the compiler produced warnings. Max Filippov reported that problem when linking with uClibc and OpenSSL. The warnings-check was added because the workaround for the GCC bug caused warnings otherwise, so it's no longer needed either. We can just use the upstream version as is. If you really want to compile with GCC version 3.2 or older, you can still work-around it manually by setting PTHREAD_CFLAGS="-pthread -lpthread" manually on the configure command line. Backpatch to 9.5. I don't want to unnecessarily rock the boat on stable branches, but 9.5 seems like fair game. --- aclocal.m4 | 2 +- config/acx_pthread.m4 | 171 ---------------------- config/ax_pthread.m4 | 332 ++++++++++++++++++++++++++++++++++++++++++ configure | 310 +++++++++++++++++++++++++++++++-------- configure.in | 2 +- 5 files changed, 583 insertions(+), 234 deletions(-) delete mode 100644 config/acx_pthread.m4 create mode 100644 config/ax_pthread.m4 diff --git a/aclocal.m4 b/aclocal.m4 index eaf98007e5b19..6f930b6fc1be2 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -1,6 +1,6 @@ dnl aclocal.m4 m4_include([config/ac_func_accept_argtypes.m4]) -m4_include([config/acx_pthread.m4]) +m4_include([config/ax_pthread.m4]) m4_include([config/c-compiler.m4]) m4_include([config/c-library.m4]) m4_include([config/docbook.m4]) diff --git a/config/acx_pthread.m4 b/config/acx_pthread.m4 deleted file mode 100644 index 581164b1e559e..0000000000000 --- a/config/acx_pthread.m4 +++ /dev/null @@ -1,171 +0,0 @@ -dnl This is based on an old macro from the GNU Autoconf Macro Archive at: -dnl http://www.gnu.org/software/ac-archive/htmldoc/acx_pthread.html -dnl but it's been rather heavily hacked --- beware of blindly dropping in -dnl upstream changes! -dnl -AC_DEFUN([ACX_PTHREAD], [ -AC_REQUIRE([AC_CANONICAL_HOST]) -AC_LANG_SAVE -AC_LANG_C -acx_pthread_ok=no - -# We used to check for pthread.h first, but this fails if pthread.h -# requires special compiler flags (e.g. on True64 or Sequent). -# It gets checked for in the link test anyway. - -# First of all, check if the user has set any of the PTHREAD_LIBS, -# etcetera environment variables, and if threads linking works using -# them: -if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) - AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes) - AC_MSG_RESULT($acx_pthread_ok) - if test x"$acx_pthread_ok" = xno; then - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" - fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" -fi - -# We must check for the threads library under a number of different -# names; the ordering is very important because some systems -# (e.g. DEC) have both -lpthread and -lpthreads, where one of the -# libraries is broken (non-POSIX). - -# Create a list of thread flags to try. Items starting with a "-" are -# C compiler flags, and other items are library names, except for "none" -# which indicates that we try without any flags at all, and "pthread-config" -# which is a program returning the flags for the Pth emulation library. - -acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config pthreadGC2" - -# The ordering *is* (sometimes) important. Some notes on the -# individual items follow: - -# pthreads: AIX (must check this before -lpthread) -# none: in case threads are in libc; should be tried before -Kthread and -# other compiler flags to prevent continual compiler warnings -# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) -# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) -# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) -# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) -# -pthreads: Solaris/gcc -# -mthreads: Mingw32/gcc, Lynx/gcc -# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it -# doesn't hurt to check since this sometimes defines pthreads too; -# also defines -D_REENTRANT) -# pthread: Linux, etcetera -# --thread-safe: KAI C++ -# pthread-config: use pthread-config program (for GNU Pth library) - -case "${host_cpu}-${host_os}" in - *solaris*) - - # On Solaris (at least, for some versions), libc contains stubbed - # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthread or - # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather - # a function called by this macro, so we could check for that, but - # who knows whether they'll stub that too in a future libc.) So, - # we'll just look for -pthreads and -lpthread first: - - acx_pthread_flags="-pthread -pthreads pthread -mt $acx_pthread_flags" - ;; -esac - -if test x"$acx_pthread_ok" = xno; then -for flag in $acx_pthread_flags; do - - tryPTHREAD_CFLAGS="" - tryPTHREAD_LIBS="" - case $flag in - none) - AC_MSG_CHECKING([whether pthreads work without any flags]) - ;; - - -*) - AC_MSG_CHECKING([whether pthreads work with $flag]) - tryPTHREAD_CFLAGS="$flag" - ;; - - pthread-config) - # skip this if we already have flags defined, for PostgreSQL - if test x"$PTHREAD_CFLAGS" != x -o x"$PTHREAD_LIBS" != x; then continue; fi - AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no) - if test x"$acx_pthread_config" = xno; then continue; fi - tryPTHREAD_CFLAGS="`pthread-config --cflags`" - tryPTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" - ;; - - *) - AC_MSG_CHECKING([for the pthreads library -l$flag]) - tryPTHREAD_LIBS="-l$flag" - ;; - esac - - save_LIBS="$LIBS" - save_CFLAGS="$CFLAGS" - LIBS="$tryPTHREAD_LIBS $PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" - - # Check for various functions. We must include pthread.h, - # since some functions may be macros. (On the Sequent, we - # need a special flag -Kthread to make this header compile.) - # We check for pthread_join because it is in -lpthread on IRIX - # while pthread_create is in libc. We check for pthread_attr_init - # due to DEC craziness with -lpthreads. We check for - # pthread_cleanup_push because it is one of the few pthread - # functions on Solaris that doesn't have a non-functional libc stub. - # We try pthread_create on general principles. - AC_TRY_LINK([#include ], - [pthread_t th; pthread_join(th, 0); - pthread_attr_init(0); pthread_cleanup_push(0, 0); - pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], - [acx_pthread_ok=yes], [acx_pthread_ok=no]) - - if test "x$acx_pthread_ok" = xyes; then - # Don't use options that are ignored by the compiler. - # We find them by checking stderror. - cat >conftest.$ac_ext <<_ACEOF -int -main (int argc, char **argv) -{ - (void) argc; - (void) argv; - return 0; -} -_ACEOF - rm -f conftest.$ac_objext conftest$ac_exeext - # Check both linking and compiling, because they might tolerate different options. - if test "`(eval $ac_link 2>&1 1>&5)`" = "" && test "`(eval $ac_compile 2>&1 1>&5)`" = ""; then - # The original macro breaks out of the loop at this point, - # but we continue trying flags because Linux needs -lpthread - # too to build libpq successfully. The test above only - # tests for building binaries, not shared libraries. - PTHREAD_LIBS=" $tryPTHREAD_LIBS $PTHREAD_LIBS" - PTHREAD_CFLAGS="$PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" - else acx_pthread_ok=no - fi - fi - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - AC_MSG_RESULT($acx_pthread_ok) -done -fi - -# The original macro has a bunch of other tests here, which we have removed -# because (a) Postgres doesn't need them, and (b) $acx_pthread_ok is not -# meaningful at this point. - -AC_SUBST(PTHREAD_LIBS) -AC_SUBST(PTHREAD_CFLAGS) - -AC_LANG_RESTORE -])dnl ACX_PTHREAD diff --git a/config/ax_pthread.m4 b/config/ax_pthread.m4 new file mode 100644 index 0000000000000..d383ad5c6d6a5 --- /dev/null +++ b/config/ax_pthread.m4 @@ -0,0 +1,332 @@ +# =========================================================================== +# http://www.gnu.org/software/autoconf-archive/ax_pthread.html +# =========================================================================== +# +# SYNOPSIS +# +# AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) +# +# DESCRIPTION +# +# This macro figures out how to build C programs using POSIX threads. It +# sets the PTHREAD_LIBS output variable to the threads library and linker +# flags, and the PTHREAD_CFLAGS output variable to any special C compiler +# flags that are needed. (The user can also force certain compiler +# flags/libs to be tested by setting these environment variables.) +# +# Also sets PTHREAD_CC to any special C compiler that is needed for +# multi-threaded programs (defaults to the value of CC otherwise). (This +# is necessary on AIX to use the special cc_r compiler alias.) +# +# NOTE: You are assumed to not only compile your program with these flags, +# but also link it with them as well. e.g. you should link with +# $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS +# +# If you are only building threads programs, you may wish to use these +# variables in your default LIBS, CFLAGS, and CC: +# +# LIBS="$PTHREAD_LIBS $LIBS" +# CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +# CC="$PTHREAD_CC" +# +# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant +# has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name +# (e.g. PTHREAD_CREATE_UNDETACHED on AIX). +# +# Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the +# PTHREAD_PRIO_INHERIT symbol is defined when compiling with +# PTHREAD_CFLAGS. +# +# ACTION-IF-FOUND is a list of shell commands to run if a threads library +# is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it +# is not found. If ACTION-IF-FOUND is not specified, the default action +# will define HAVE_PTHREAD. +# +# Please let the authors know if this macro fails on any platform, or if +# you have any other suggestions or comments. This macro was based on work +# by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help +# from M. Frigo), as well as ac_pthread and hb_pthread macros posted by +# Alejandro Forero Cuervo to the autoconf macro repository. We are also +# grateful for the helpful feedback of numerous users. +# +# Updated for Autoconf 2.68 by Daniel Richard G. +# +# LICENSE +# +# Copyright (c) 2008 Steven G. Johnson +# Copyright (c) 2011 Daniel Richard G. +# +# This program is free software: you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by the +# Free Software Foundation, either version 3 of the License, or (at your +# option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General +# Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program. If not, see . +# +# As a special exception, the respective Autoconf Macro's copyright owner +# gives unlimited permission to copy, distribute and modify the configure +# scripts that are the output of Autoconf when processing the Macro. You +# need not follow the terms of the GNU General Public License when using +# or distributing such scripts, even though portions of the text of the +# Macro appear in them. The GNU General Public License (GPL) does govern +# all other use of the material that constitutes the Autoconf Macro. +# +# This special exception to the GPL applies to versions of the Autoconf +# Macro released by the Autoconf Archive. When you make and distribute a +# modified version of the Autoconf Macro, you may extend this special +# exception to the GPL to apply to your modified version as well. + +#serial 21 + +AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) +AC_DEFUN([AX_PTHREAD], [ +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_LANG_PUSH([C]) +ax_pthread_ok=no + +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. + +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) + AC_TRY_LINK_FUNC([pthread_join], [ax_pthread_ok=yes]) + AC_MSG_RESULT([$ax_pthread_ok]) + if test x"$ax_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" +fi + +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). + +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. + +ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" + +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: + +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# ... -mt is also the pthreads flag for HP/aCC +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) + +case ${host_os} in + solaris*) + + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" + ;; + + darwin*) + ax_pthread_flags="-pthread $ax_pthread_flags" + ;; +esac + +# Clang doesn't consider unrecognized options an error unless we specify +# -Werror. We throw in some extra Clang-specific options to ensure that +# this doesn't happen for GCC, which also accepts -Werror. + +AC_MSG_CHECKING([if compiler needs -Werror to reject unknown flags]) +save_CFLAGS="$CFLAGS" +ax_pthread_extra_flags="-Werror" +CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" +AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])], + [AC_MSG_RESULT([yes])], + [ax_pthread_extra_flags= + AC_MSG_RESULT([no])]) +CFLAGS="$save_CFLAGS" + +if test x"$ax_pthread_ok" = xno; then +for flag in $ax_pthread_flags; do + + case $flag in + none) + AC_MSG_CHECKING([whether pthreads work without any flags]) + ;; + + -*) + AC_MSG_CHECKING([whether pthreads work with $flag]) + PTHREAD_CFLAGS="$flag" + ;; + + pthread-config) + AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no]) + if test x"$ax_pthread_config" = xno; then continue; fi + PTHREAD_CFLAGS="`pthread-config --cflags`" + PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; + + *) + AC_MSG_CHECKING([for the pthreads library -l$flag]) + PTHREAD_LIBS="-l$flag" + ;; + esac + + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" + + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + AC_LINK_IFELSE([AC_LANG_PROGRAM([#include + static void routine(void *a) { a = 0; } + static void *start_routine(void *a) { return a; }], + [pthread_t th; pthread_attr_t attr; + pthread_create(&th, 0, start_routine, 0); + pthread_join(th, 0); + pthread_attr_init(&attr); + pthread_cleanup_push(routine, 0); + pthread_cleanup_pop(0) /* ; */])], + [ax_pthread_ok=yes], + []) + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + AC_MSG_RESULT([$ax_pthread_ok]) + if test "x$ax_pthread_ok" = xyes; then + break; + fi + + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" +done +fi + +# Various other checks: +if test "x$ax_pthread_ok" = xyes; then + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. + AC_MSG_CHECKING([for joinable pthread attribute]) + attr_name=unknown + for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], + [int attr = $attr; return attr /* ; */])], + [attr_name=$attr; break], + []) + done + AC_MSG_RESULT([$attr_name]) + if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then + AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], [$attr_name], + [Define to necessary symbol if this constant + uses a non-standard name on your system.]) + fi + + AC_MSG_CHECKING([if more special flags are required for pthreads]) + flag=no + case ${host_os} in + aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; + osf* | hpux*) flag="-D_REENTRANT";; + solaris*) + if test "$GCC" = "yes"; then + flag="-D_REENTRANT" + else + # TODO: What about Clang on Solaris? + flag="-mt -D_REENTRANT" + fi + ;; + esac + AC_MSG_RESULT([$flag]) + if test "x$flag" != xno; then + PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" + fi + + AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT], + [ax_cv_PTHREAD_PRIO_INHERIT], [ + AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], + [[int i = PTHREAD_PRIO_INHERIT;]])], + [ax_cv_PTHREAD_PRIO_INHERIT=yes], + [ax_cv_PTHREAD_PRIO_INHERIT=no]) + ]) + AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"], + [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])]) + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + # More AIX lossage: compile with *_r variant + if test "x$GCC" != xyes; then + case $host_os in + aix*) + AS_CASE(["x/$CC"], + [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6], + [#handle absolute path differently from PATH based program lookup + AS_CASE(["x$CC"], + [x/*], + [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])], + [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])]) + ;; + esac + fi +fi + +test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" + +AC_SUBST([PTHREAD_LIBS]) +AC_SUBST([PTHREAD_CFLAGS]) +AC_SUBST([PTHREAD_CC]) + +# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: +if test x"$ax_pthread_ok" = xyes; then + ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1]) + : +else + ax_pthread_ok=no + $2 +fi +AC_LANG_POP +])dnl AX_PTHREAD diff --git a/configure b/configure index 38cec0fe70c11..fae437946e57b 100755 --- a/configure +++ b/configure @@ -656,7 +656,8 @@ LDAP_LIBS_BE LDAP_LIBS_FE PTHREAD_CFLAGS PTHREAD_LIBS -acx_pthread_config +PTHREAD_CC +ax_pthread_config have_win32_dbghelp HAVE_IPV6 LIBOBJS @@ -12527,7 +12528,7 @@ ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' ac_compiler_gnu=$ac_cv_c_compiler_gnu -acx_pthread_ok=no +ax_pthread_ok=no # We used to check for pthread.h first, but this fails if pthread.h # requires special compiler flags (e.g. on True64 or Sequent). @@ -12562,13 +12563,13 @@ return pthread_join (); } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - acx_pthread_ok=yes + ax_pthread_ok=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_ok" >&5 -$as_echo "$acx_pthread_ok" >&6; } - if test x"$acx_pthread_ok" = xno; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 +$as_echo "$ax_pthread_ok" >&6; } + if test x"$ax_pthread_ok" = xno; then PTHREAD_LIBS="" PTHREAD_CFLAGS="" fi @@ -12586,7 +12587,7 @@ fi # which indicates that we try without any flags at all, and "pthread-config" # which is a program returning the flags for the Pth emulation library. -acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config pthreadGC2" +ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" # The ordering *is* (sometimes) important. Some notes on the # individual items follow: @@ -12603,30 +12604,64 @@ acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -m # -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it # doesn't hurt to check since this sometimes defines pthreads too; # also defines -D_REENTRANT) +# ... -mt is also the pthreads flag for HP/aCC # pthread: Linux, etcetera # --thread-safe: KAI C++ # pthread-config: use pthread-config program (for GNU Pth library) -case "${host_cpu}-${host_os}" in - *solaris*) +case ${host_os} in + solaris*) # On Solaris (at least, for some versions), libc contains stubbed # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthread or + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather # a function called by this macro, so we could check for that, but # who knows whether they'll stub that too in a future libc.) So, # we'll just look for -pthreads and -lpthread first: - acx_pthread_flags="-pthread -pthreads pthread -mt $acx_pthread_flags" + ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" + ;; + + darwin*) + ax_pthread_flags="-pthread $ax_pthread_flags" ;; esac -if test x"$acx_pthread_ok" = xno; then -for flag in $acx_pthread_flags; do +# Clang doesn't consider unrecognized options an error unless we specify +# -Werror. We throw in some extra Clang-specific options to ensure that +# this doesn't happen for GCC, which also accepts -Werror. + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler needs -Werror to reject unknown flags" >&5 +$as_echo_n "checking if compiler needs -Werror to reject unknown flags... " >&6; } +save_CFLAGS="$CFLAGS" +ax_pthread_extra_flags="-Werror" +CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo(void); +int +main () +{ +foo() + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + ax_pthread_extra_flags= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +CFLAGS="$save_CFLAGS" + +if test x"$ax_pthread_ok" = xno; then +for flag in $ax_pthread_flags; do - tryPTHREAD_CFLAGS="" - tryPTHREAD_LIBS="" case $flag in none) { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 @@ -12636,21 +12671,19 @@ $as_echo_n "checking whether pthreads work without any flags... " >&6; } -*) { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 $as_echo_n "checking whether pthreads work with $flag... " >&6; } - tryPTHREAD_CFLAGS="$flag" + PTHREAD_CFLAGS="$flag" ;; pthread-config) - # skip this if we already have flags defined, for PostgreSQL - if test x"$PTHREAD_CFLAGS" != x -o x"$PTHREAD_LIBS" != x; then continue; fi # Extract the first word of "pthread-config", so it can be a program name with args. set dummy pthread-config; ac_word=$2 { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 $as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_acx_pthread_config+:} false; then : +if ${ac_cv_prog_ax_pthread_config+:} false; then : $as_echo_n "(cached) " >&6 else - if test -n "$acx_pthread_config"; then - ac_cv_prog_acx_pthread_config="$acx_pthread_config" # Let the user override the test. + if test -n "$ax_pthread_config"; then + ac_cv_prog_ax_pthread_config="$ax_pthread_config" # Let the user override the test. else as_save_IFS=$IFS; IFS=$PATH_SEPARATOR for as_dir in $PATH @@ -12659,7 +12692,7 @@ do test -z "$as_dir" && as_dir=. for ac_exec_ext in '' $ac_executable_extensions; do if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_acx_pthread_config="yes" + ac_cv_prog_ax_pthread_config="yes" $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 break 2 fi @@ -12667,35 +12700,35 @@ done done IFS=$as_save_IFS - test -z "$ac_cv_prog_acx_pthread_config" && ac_cv_prog_acx_pthread_config="no" + test -z "$ac_cv_prog_ax_pthread_config" && ac_cv_prog_ax_pthread_config="no" fi fi -acx_pthread_config=$ac_cv_prog_acx_pthread_config -if test -n "$acx_pthread_config"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_config" >&5 -$as_echo "$acx_pthread_config" >&6; } +ax_pthread_config=$ac_cv_prog_ax_pthread_config +if test -n "$ax_pthread_config"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_config" >&5 +$as_echo "$ax_pthread_config" >&6; } else { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 $as_echo "no" >&6; } fi - if test x"$acx_pthread_config" = xno; then continue; fi - tryPTHREAD_CFLAGS="`pthread-config --cflags`" - tryPTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + if test x"$ax_pthread_config" = xno; then continue; fi + PTHREAD_CFLAGS="`pthread-config --cflags`" + PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" ;; *) { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 $as_echo_n "checking for the pthreads library -l$flag... " >&6; } - tryPTHREAD_LIBS="-l$flag" + PTHREAD_LIBS="-l$flag" ;; esac save_LIBS="$LIBS" save_CFLAGS="$CFLAGS" - LIBS="$tryPTHREAD_LIBS $PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" # Check for various functions. We must include pthread.h, # since some functions may be macros. (On the Sequent, we @@ -12709,64 +12742,219 @@ $as_echo_n "checking for the pthreads library -l$flag... " >&6; } cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include + static void routine(void *a) { a = 0; } + static void *start_routine(void *a) { return a; } int main () { -pthread_t th; pthread_join(th, 0); - pthread_attr_init(0); pthread_cleanup_push(0, 0); - pthread_create(0,0,0,0); pthread_cleanup_pop(0); +pthread_t th; pthread_attr_t attr; + pthread_create(&th, 0, start_routine, 0); + pthread_join(th, 0); + pthread_attr_init(&attr); + pthread_cleanup_push(routine, 0); + pthread_cleanup_pop(0) /* ; */ ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - acx_pthread_ok=yes -else - acx_pthread_ok=no + ax_pthread_ok=yes fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext - if test "x$acx_pthread_ok" = xyes; then - # Don't use options that are ignored by the compiler. - # We find them by checking stderror. - cat >conftest.$ac_ext <<_ACEOF + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 +$as_echo "$ax_pthread_ok" >&6; } + if test "x$ax_pthread_ok" = xyes; then + break; + fi + + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" +done +fi + +# Various other checks: +if test "x$ax_pthread_ok" = xyes; then + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5 +$as_echo_n "checking for joinable pthread attribute... " >&6; } + attr_name=unknown + for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include int -main (int argc, char **argv) +main () { - (void) argc; - (void) argv; +int attr = $attr; return attr /* ; */ + ; return 0; } _ACEOF - rm -f conftest.$ac_objext conftest$ac_exeext - # Check both linking and compiling, because they might tolerate different options. - if test "`(eval $ac_link 2>&1 1>&5)`" = "" && test "`(eval $ac_compile 2>&1 1>&5)`" = ""; then - # The original macro breaks out of the loop at this point, - # but we continue trying flags because Linux needs -lpthread - # too to build libpq successfully. The test above only - # tests for building binaries, not shared libraries. - PTHREAD_LIBS=" $tryPTHREAD_LIBS $PTHREAD_LIBS" - PTHREAD_CFLAGS="$PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" - else acx_pthread_ok=no +if ac_fn_c_try_link "$LINENO"; then : + attr_name=$attr; break +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5 +$as_echo "$attr_name" >&6; } + if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then + +cat >>confdefs.h <<_ACEOF +#define PTHREAD_CREATE_JOINABLE $attr_name +_ACEOF + + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5 +$as_echo_n "checking if more special flags are required for pthreads... " >&6; } + flag=no + case ${host_os} in + aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; + osf* | hpux*) flag="-D_REENTRANT";; + solaris*) + if test "$GCC" = "yes"; then + flag="-D_REENTRANT" + else + # TODO: What about Clang on Solaris? + flag="-mt -D_REENTRANT" fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $flag" >&5 +$as_echo "$flag" >&6; } + if test "x$flag" != xno; then + PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PTHREAD_PRIO_INHERIT" >&5 +$as_echo_n "checking for PTHREAD_PRIO_INHERIT... " >&6; } +if ${ax_cv_PTHREAD_PRIO_INHERIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +int i = PTHREAD_PRIO_INHERIT; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_PTHREAD_PRIO_INHERIT=yes +else + ax_cv_PTHREAD_PRIO_INHERIT=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_PRIO_INHERIT" >&5 +$as_echo "$ax_cv_PTHREAD_PRIO_INHERIT" >&6; } + if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"; then : + +$as_echo "#define HAVE_PTHREAD_PRIO_INHERIT 1" >>confdefs.h + +fi + LIBS="$save_LIBS" CFLAGS="$save_CFLAGS" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_ok" >&5 -$as_echo "$acx_pthread_ok" >&6; } + # More AIX lossage: compile with *_r variant + if test "x$GCC" != xyes; then + case $host_os in + aix*) + case "x/$CC" in #( + x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6) : + #handle absolute path differently from PATH based program lookup + case "x$CC" in #( + x/*) : + if as_fn_executable_p ${CC}_r; then : + PTHREAD_CC="${CC}_r" +fi ;; #( + *) : + for ac_prog in ${CC}_r +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_PTHREAD_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$PTHREAD_CC"; then + ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_PTHREAD_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +PTHREAD_CC=$ac_cv_prog_PTHREAD_CC +if test -n "$PTHREAD_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5 +$as_echo "$PTHREAD_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$PTHREAD_CC" && break done +test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" + ;; +esac ;; #( + *) : + ;; +esac + ;; + esac + fi fi -# The original macro has a bunch of other tests here, which we have removed -# because (a) Postgres doesn't need them, and (b) $acx_pthread_ok is not -# meaningful at this point. +test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" + + + +# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: +if test x"$ax_pthread_ok" = xyes; then +$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h + : +else + ax_pthread_ok=no + +fi ac_ext=c ac_cpp='$CPP $CPPFLAGS' ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' diff --git a/configure.in b/configure.in index 143e667ce27eb..9f2db8169b4fe 100644 --- a/configure.in +++ b/configure.in @@ -1571,7 +1571,7 @@ fi # See the comment at the top of src/port/thread.c for more information. # WIN32 doesn't need the pthread tests; it always uses threads if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then -ACX_PTHREAD # set thread flags +AX_PTHREAD # set thread flags # Some platforms use these, so just define them. They can't hurt if they # are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS From 080c4dab3d9575449b81604051b160597cfd55c3 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 9 Jul 2015 00:05:45 +0300 Subject: [PATCH 029/442] Move pthread-tests earlier in the autoconf script. On some Linux systems, "-lrt" exposed pthread-functions, so that linking with -lrt was seemingly enough to make a program that uses pthreads to work. However, when linking libpq, the dependency to libpthread was not marked correctly, so that when an executable was linked with -lpq but without -pthread, you got errors about undefined pthread_* functions from libpq. To fix, test for the flags required to use pthreads earlier in the autoconf script, before checking any other libraries. This should fix the failure on buildfarm member shearwater. gharial is also failing; hopefully this fixes that too although the failure looks somewhat different. --- configure | 7648 +++++++++++++++++++++++++------------------------- configure.in | 147 +- 2 files changed, 3900 insertions(+), 3895 deletions(-) diff --git a/configure b/configure index fae437946e57b..2b973ae96be97 100755 --- a/configure +++ b/configure @@ -652,16 +652,16 @@ MSGFMT HAVE_POSIX_SIGNALS PG_CRC32C_OBJS CFLAGS_SSE42 +have_win32_dbghelp +HAVE_IPV6 +LIBOBJS +UUID_LIBS LDAP_LIBS_BE LDAP_LIBS_FE PTHREAD_CFLAGS PTHREAD_LIBS PTHREAD_CC ax_pthread_config -have_win32_dbghelp -HAVE_IPV6 -LIBOBJS -UUID_LIBS ZIC python_additional_libs python_libspec @@ -1748,73 +1748,6 @@ fi } # ac_fn_c_try_cpp -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_func - # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using @@ -1979,6 +1912,73 @@ $as_echo "$ac_res" >&6; } } # ac_fn_c_check_header_compile +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + # ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES # ---------------------------------------------------- # Tries to find if the field MEMBER exists in type AGGR, after including @@ -7614,62 +7614,44 @@ program to use during the build." "$LINENO" 5 fi fi - -## -## Libraries -## -## Most libraries are included only if they demonstrably provide a function -## we need, but libm is an exception: always include it, because there are -## too many compilers that play cute optimization games that will break -## probes for standard functions such as pow(). -## - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lm" >&5 -$as_echo_n "checking for main in -lm... " >&6; } -if ${ac_cv_lib_m_main+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lm $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +# +# Pthreads +# +# For each platform, we need to know about any special compile and link +# libraries, and whether the normal C function names are thread-safe. +# See the comment at the top of src/port/thread.c for more information. +# WIN32 doesn't need the pthread tests; it always uses threads +# +# These tests are run before the library-tests, because linking with the +# other libraries can pull in the pthread functions as a side-effect. We +# want to use the -pthread or similar flags directly, and not rely on +# the side-effects of linking with some other library. +if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then -int -main () -{ -return main (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_m_main=yes -else - ac_cv_lib_m_main=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_m_main" >&5 -$as_echo "$ac_cv_lib_m_main" >&6; } -if test "x$ac_cv_lib_m_main" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBM 1 -_ACEOF +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu - LIBS="-lm $LIBS" +ax_pthread_ok=no -fi +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing setproctitle" >&5 -$as_echo_n "checking for library containing setproctitle... " >&6; } -if ${ac_cv_search_setproctitle+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5 +$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -7678,561 +7660,683 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char setproctitle (); +char pthread_join (); int main () { -return setproctitle (); +return pthread_join (); ; return 0; } _ACEOF -for ac_lib in '' util; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_setproctitle=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + ax_pthread_ok=yes fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_setproctitle+:} false; then : - break + conftest$ac_exeext conftest.$ac_ext + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 +$as_echo "$ax_pthread_ok" >&6; } + if test x"$ax_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" fi -done -if ${ac_cv_search_setproctitle+:} false; then : -else - ac_cv_search_setproctitle=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_setproctitle" >&5 -$as_echo "$ac_cv_search_setproctitle" >&6; } -ac_res=$ac_cv_search_setproctitle -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). -fi +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 -$as_echo_n "checking for library containing dlopen... " >&6; } -if ${ac_cv_search_dlopen+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS +ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" + +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: + +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# ... -mt is also the pthreads flag for HP/aCC +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) + +case ${host_os} in + solaris*) + + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" + ;; + + darwin*) + ax_pthread_flags="-pthread $ax_pthread_flags" + ;; +esac + +# Clang doesn't consider unrecognized options an error unless we specify +# -Werror. We throw in some extra Clang-specific options to ensure that +# this doesn't happen for GCC, which also accepts -Werror. + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler needs -Werror to reject unknown flags" >&5 +$as_echo_n "checking if compiler needs -Werror to reject unknown flags... " >&6; } +save_CFLAGS="$CFLAGS" +ax_pthread_extra_flags="-Werror" +CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char dlopen (); +int foo(void); int main () { -return dlopen (); +foo() ; return 0; } _ACEOF -for ac_lib in '' dl; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_dlopen=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_dlopen+:} false; then : - break +if ac_fn_c_try_compile "$LINENO"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + ax_pthread_extra_flags= + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } fi -done -if ${ac_cv_search_dlopen+:} false; then : +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +CFLAGS="$save_CFLAGS" + +if test x"$ax_pthread_ok" = xno; then +for flag in $ax_pthread_flags; do + + case $flag in + none) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 +$as_echo_n "checking whether pthreads work without any flags... " >&6; } + ;; + + -*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 +$as_echo_n "checking whether pthreads work with $flag... " >&6; } + PTHREAD_CFLAGS="$flag" + ;; + pthread-config) + # Extract the first word of "pthread-config", so it can be a program name with args. +set dummy pthread-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ax_pthread_config+:} false; then : + $as_echo_n "(cached) " >&6 else - ac_cv_search_dlopen=no + if test -n "$ax_pthread_config"; then + ac_cv_prog_ax_pthread_config="$ax_pthread_config" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_ax_pthread_config="yes" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_ax_pthread_config" && ac_cv_prog_ax_pthread_config="no" fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 -$as_echo "$ac_cv_search_dlopen" >&6; } -ac_res=$ac_cv_search_dlopen -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - +ax_pthread_config=$ac_cv_prog_ax_pthread_config +if test -n "$ax_pthread_config"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_config" >&5 +$as_echo "$ax_pthread_config" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing socket" >&5 -$as_echo_n "checking for library containing socket... " >&6; } -if ${ac_cv_search_socket+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char socket (); + if test x"$ax_pthread_config" = xno; then continue; fi + PTHREAD_CFLAGS="`pthread-config --cflags`" + PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; + + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 +$as_echo_n "checking for the pthreads library -l$flag... " >&6; } + PTHREAD_LIBS="-l$flag" + ;; + esac + + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" + + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + static void routine(void *a) { a = 0; } + static void *start_routine(void *a) { return a; } int main () { -return socket (); +pthread_t th; pthread_attr_t attr; + pthread_create(&th, 0, start_routine, 0); + pthread_join(th, 0); + pthread_attr_init(&attr); + pthread_cleanup_push(routine, 0); + pthread_cleanup_pop(0) /* ; */ ; return 0; } _ACEOF -for ac_lib in '' socket ws2_32; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_socket=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + ax_pthread_ok=yes fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_socket+:} false; then : - break -fi -done -if ${ac_cv_search_socket+:} false; then : + conftest$ac_exeext conftest.$ac_ext -else - ac_cv_search_socket=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_socket" >&5 -$as_echo "$ac_cv_search_socket" >&6; } -ac_res=$ac_cv_search_socket -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" -fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 +$as_echo "$ax_pthread_ok" >&6; } + if test "x$ax_pthread_ok" = xyes; then + break; + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shl_load" >&5 -$as_echo_n "checking for library containing shl_load... " >&6; } -if ${ac_cv_search_shl_load+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" +done +fi -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shl_load (); +# Various other checks: +if test "x$ax_pthread_ok" = xyes; then + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5 +$as_echo_n "checking for joinable pthread attribute... " >&6; } + attr_name=unknown + for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include int main () { -return shl_load (); +int attr = $attr; return attr /* ; */ ; return 0; } _ACEOF -for ac_lib in '' dld; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shl_load=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + attr_name=$attr; break fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_shl_load+:} false; then : - break -fi -done -if ${ac_cv_search_shl_load+:} false; then : + conftest$ac_exeext conftest.$ac_ext + done + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5 +$as_echo "$attr_name" >&6; } + if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then -else - ac_cv_search_shl_load=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shl_load" >&5 -$as_echo "$ac_cv_search_shl_load" >&6; } -ac_res=$ac_cv_search_shl_load -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +cat >>confdefs.h <<_ACEOF +#define PTHREAD_CREATE_JOINABLE $attr_name +_ACEOF -fi + fi -# We only use libld in port/dynloader/aix.c -case $host_os in - aix*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ldopen" >&5 -$as_echo_n "checking for library containing ldopen... " >&6; } -if ${ac_cv_search_ldopen+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5 +$as_echo_n "checking if more special flags are required for pthreads... " >&6; } + flag=no + case ${host_os} in + aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; + osf* | hpux*) flag="-D_REENTRANT";; + solaris*) + if test "$GCC" = "yes"; then + flag="-D_REENTRANT" + else + # TODO: What about Clang on Solaris? + flag="-mt -D_REENTRANT" + fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $flag" >&5 +$as_echo "$flag" >&6; } + if test "x$flag" != xno; then + PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PTHREAD_PRIO_INHERIT" >&5 +$as_echo_n "checking for PTHREAD_PRIO_INHERIT... " >&6; } +if ${ax_cv_PTHREAD_PRIO_INHERIT+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char ldopen (); + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include int main () { -return ldopen (); +int i = PTHREAD_PRIO_INHERIT; ; return 0; } _ACEOF -for ac_lib in '' ld; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_ldopen=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + ax_cv_PTHREAD_PRIO_INHERIT=yes +else + ax_cv_PTHREAD_PRIO_INHERIT=no fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_ldopen+:} false; then : - break -fi -done -if ${ac_cv_search_ldopen+:} false; then : + conftest$ac_exeext conftest.$ac_ext -else - ac_cv_search_ldopen=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ldopen" >&5 -$as_echo "$ac_cv_search_ldopen" >&6; } -ac_res=$ac_cv_search_ldopen -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_PRIO_INHERIT" >&5 +$as_echo "$ax_cv_PTHREAD_PRIO_INHERIT" >&6; } + if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"; then : + +$as_echo "#define HAVE_PTHREAD_PRIO_INHERIT 1" >>confdefs.h fi - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getopt_long" >&5 -$as_echo_n "checking for library containing getopt_long... " >&6; } -if ${ac_cv_search_getopt_long+:} false; then : + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + # More AIX lossage: compile with *_r variant + if test "x$GCC" != xyes; then + case $host_os in + aix*) + case "x/$CC" in #( + x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6) : + #handle absolute path differently from PATH based program lookup + case "x$CC" in #( + x/*) : + if as_fn_executable_p ${CC}_r; then : + PTHREAD_CC="${CC}_r" +fi ;; #( + *) : + for ac_prog in ${CC}_r +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_PTHREAD_CC+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char getopt_long (); -int -main () -{ -return getopt_long (); - ; - return 0; -} -_ACEOF -for ac_lib in '' getopt gnugetopt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" + if test -n "$PTHREAD_CC"; then + ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_PTHREAD_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_getopt_long=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_getopt_long+:} false; then : - break -fi done -if ${ac_cv_search_getopt_long+:} false; then : + done +IFS=$as_save_IFS -else - ac_cv_search_getopt_long=no fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_getopt_long" >&5 -$as_echo "$ac_cv_search_getopt_long" >&6; } -ac_res=$ac_cv_search_getopt_long -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - +PTHREAD_CC=$ac_cv_prog_PTHREAD_CC +if test -n "$PTHREAD_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5 +$as_echo "$PTHREAD_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing crypt" >&5 -$as_echo_n "checking for library containing crypt... " >&6; } -if ${ac_cv_search_crypt+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char crypt (); -int -main () -{ -return crypt (); - ; - return 0; -} -_ACEOF -for ac_lib in '' crypt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_crypt=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_crypt+:} false; then : - break -fi + test -n "$PTHREAD_CC" && break done -if ${ac_cv_search_crypt+:} false; then : +test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" + ;; +esac ;; #( + *) : + ;; +esac + ;; + esac + fi +fi + +test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" + + + + + +# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: +if test x"$ax_pthread_ok" = xyes; then + +$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h + : else - ac_cv_search_crypt=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_crypt" >&5 -$as_echo "$ac_cv_search_crypt" >&6; } -ac_res=$ac_cv_search_crypt -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + ax_pthread_ok=no fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_open" >&5 -$as_echo_n "checking for library containing shm_open... " >&6; } -if ${ac_cv_search_shm_open+:} false; then : + # set thread flags + +# Some platforms use these, so just define them. They can't hurt if they +# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS +# enables 5-arg getpwuid_r, among other things. +PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" + +# Check for *_r functions +_CFLAGS="$CFLAGS" +_LIBS="$LIBS" +CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +LIBS="$LIBS $PTHREAD_LIBS" + +if test "$PORTNAME" != "win32"; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include +#include +#include +#include -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shm_open (); int main () { -return shm_open (); + ; return 0; } _ACEOF -for ac_lib in '' rt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shm_open=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_shm_open+:} false; then : - break +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no fi -done -if ${ac_cv_search_shm_open+:} false; then : +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : else - ac_cv_search_shm_open=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS + ac_cv_header_stdc=no fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_open" >&5 -$as_echo "$ac_cv_search_shm_open" >&6; } -ac_res=$ac_cv_search_shm_open -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +rm -f conftest* fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_unlink" >&5 -$as_echo_n "checking for library containing shm_unlink... " >&6; } -if ${ac_cv_search_shm_unlink+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char shm_unlink (); -int -main () -{ -return shm_unlink (); - ; - return 0; -} _ACEOF -for ac_lib in '' rt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shm_unlink=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_shm_unlink+:} false; then : - break -fi -done -if ${ac_cv_search_shm_unlink+:} false; then : +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : else - ac_cv_search_shm_unlink=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS + ac_cv_header_stdc=no fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_unlink" >&5 -$as_echo "$ac_cv_search_shm_unlink" >&6; } -ac_res=$ac_cv_search_shm_unlink -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +rm -f conftest* fi -# Solaris: -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing fdatasync" >&5 -$as_echo_n "checking for library containing fdatasync... " >&6; } -if ${ac_cv_search_fdatasync+:} false; then : - $as_echo_n "(cached) " >&6 +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif -char fdatasync (); + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { -return fdatasync (); - ; + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; return 0; } _ACEOF -for ac_lib in '' rt posix4; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_fdatasync=$ac_res +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_fdatasync+:} false; then : - break +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + fi + done -if ${ac_cv_search_fdatasync+:} false; then : + + +ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" +if test "x$ac_cv_header_pthread_h" = xyes; then : else - ac_cv_search_fdatasync=no + as_fn_error $? " +pthread.h not found; use --disable-thread-safety to disable thread safety" "$LINENO" 5 fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS + + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_fdatasync" >&5 -$as_echo "$ac_cv_search_fdatasync" >&6; } -ac_res=$ac_cv_search_fdatasync -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +for ac_func in strerror_r getpwuid_r gethostbyname_r +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF fi +done -# Required for thread_test.c on Solaris -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sched_yield" >&5 -$as_echo_n "checking for library containing sched_yield... " >&6; } -if ${ac_cv_search_sched_yield+:} false; then : + +# Do test here with the proper thread flags +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns int" >&5 +$as_echo_n "checking whether strerror_r returns int... " >&6; } +if ${pgac_cv_func_strerror_r_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +#ifndef _AIX +int strerror_r(int, char *, size_t); +#else +/* Older AIX has 'int' for the third argument so we don't test the args. */ +int strerror_r(); +#endif + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_func_strerror_r_int=yes +else + pgac_cv_func_strerror_r_int=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_strerror_r_int" >&5 +$as_echo "$pgac_cv_func_strerror_r_int" >&6; } +if test x"$pgac_cv_func_strerror_r_int" = xyes ; then + +$as_echo "#define STRERROR_R_INT 1" >>confdefs.h + +fi + + +CFLAGS="$_CFLAGS" +LIBS="$_LIBS" + +else +# do not use values from template file +PTHREAD_CFLAGS= +PTHREAD_LIBS= +fi + + + + + +## +## Libraries +## +## Most libraries are included only if they demonstrably provide a function +## we need, but libm is an exception: always include it, because there are +## too many compilers that play cute optimization games that will break +## probes for standard functions such as pow(). +## + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lm" >&5 +$as_echo_n "checking for main in -lm... " >&6; } +if ${ac_cv_lib_m_main+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lm $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + + +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_m_main=yes +else + ac_cv_lib_m_main=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_m_main" >&5 +$as_echo "$ac_cv_lib_m_main" >&6; } +if test "x$ac_cv_lib_m_main" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBM 1 +_ACEOF + + LIBS="-lm $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing setproctitle" >&5 +$as_echo_n "checking for library containing setproctitle... " >&6; } +if ${ac_cv_search_setproctitle+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8245,16 +8349,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char sched_yield (); +char setproctitle (); int main () { -return sched_yield (); +return setproctitle (); ; return 0; } _ACEOF -for ac_lib in '' rt; do +for ac_lib in '' util; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8262,35 +8366,33 @@ for ac_lib in '' rt; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_sched_yield=$ac_res + ac_cv_search_setproctitle=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_sched_yield+:} false; then : + if ${ac_cv_search_setproctitle+:} false; then : break fi done -if ${ac_cv_search_sched_yield+:} false; then : +if ${ac_cv_search_setproctitle+:} false; then : else - ac_cv_search_sched_yield=no + ac_cv_search_setproctitle=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_sched_yield" >&5 -$as_echo "$ac_cv_search_sched_yield" >&6; } -ac_res=$ac_cv_search_sched_yield +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_setproctitle" >&5 +$as_echo "$ac_cv_search_setproctitle" >&6; } +ac_res=$ac_cv_search_setproctitle if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -# Required for thread_test.c on Solaris 2.5: -# Other ports use it too (HP-UX) so test unconditionally -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gethostbyname_r" >&5 -$as_echo_n "checking for library containing gethostbyname_r... " >&6; } -if ${ac_cv_search_gethostbyname_r+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 +$as_echo_n "checking for library containing dlopen... " >&6; } +if ${ac_cv_search_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8303,16 +8405,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char gethostbyname_r (); +char dlopen (); int main () { -return gethostbyname_r (); +return dlopen (); ; return 0; } _ACEOF -for ac_lib in '' nsl; do +for ac_lib in '' dl; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8320,34 +8422,33 @@ for ac_lib in '' nsl; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_gethostbyname_r=$ac_res + ac_cv_search_dlopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_gethostbyname_r+:} false; then : + if ${ac_cv_search_dlopen+:} false; then : break fi done -if ${ac_cv_search_gethostbyname_r+:} false; then : +if ${ac_cv_search_dlopen+:} false; then : else - ac_cv_search_gethostbyname_r=no + ac_cv_search_dlopen=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gethostbyname_r" >&5 -$as_echo "$ac_cv_search_gethostbyname_r" >&6; } -ac_res=$ac_cv_search_gethostbyname_r +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 +$as_echo "$ac_cv_search_dlopen" >&6; } +ac_res=$ac_cv_search_dlopen if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -# Cygwin: -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shmget" >&5 -$as_echo_n "checking for library containing shmget... " >&6; } -if ${ac_cv_search_shmget+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing socket" >&5 +$as_echo_n "checking for library containing socket... " >&6; } +if ${ac_cv_search_socket+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8360,16 +8461,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char shmget (); +char socket (); int main () { -return shmget (); +return socket (); ; return 0; } _ACEOF -for ac_lib in '' cygipc; do +for ac_lib in '' socket ws2_32; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8377,49 +8478,37 @@ for ac_lib in '' cygipc; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shmget=$ac_res + ac_cv_search_socket=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_shmget+:} false; then : + if ${ac_cv_search_socket+:} false; then : break fi done -if ${ac_cv_search_shmget+:} false; then : +if ${ac_cv_search_socket+:} false; then : else - ac_cv_search_shmget=no + ac_cv_search_socket=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shmget" >&5 -$as_echo "$ac_cv_search_shmget" >&6; } -ac_res=$ac_cv_search_shmget +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_socket" >&5 +$as_echo "$ac_cv_search_socket" >&6; } +ac_res=$ac_cv_search_socket if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi - -if test "$with_readline" = yes; then - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing readline" >&5 -$as_echo_n "checking for library containing readline... " >&6; } -if ${pgac_cv_check_readline+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shl_load" >&5 +$as_echo_n "checking for library containing shl_load... " >&6; } +if ${ac_cv_search_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else - pgac_cv_check_readline=no -pgac_save_LIBS=$LIBS -if test x"$with_libedit_preferred" != x"yes" -then READLINE_ORDER="-lreadline -ledit" -else READLINE_ORDER="-ledit -lreadline" -fi -for pgac_rllib in $READLINE_ORDER ; do - for pgac_lib in "" " -ltermcap" " -lncurses" " -lcurses" ; do - LIBS="${pgac_rllib}${pgac_lib} $pgac_save_LIBS" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -8428,67 +8517,56 @@ for pgac_rllib in $READLINE_ORDER ; do #ifdef __cplusplus extern "C" #endif -char readline (); +char shl_load (); int main () { -return readline (); +return shl_load (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - - # Older NetBSD, OpenBSD, and Irix have a broken linker that does not - # recognize dependent libraries; assume curses is needed if we didn't - # find any dependency. - case $host_os in - netbsd* | openbsd* | irix*) - if test x"$pgac_lib" = x"" ; then - pgac_lib=" -lcurses" - fi ;; - esac - - pgac_cv_check_readline="${pgac_rllib}${pgac_lib}" - break - +for ac_lib in '' dld; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_shl_load=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - if test "$pgac_cv_check_readline" != no ; then - break - fi + conftest$ac_exeext + if ${ac_cv_search_shl_load+:} false; then : + break +fi done -LIBS=$pgac_save_LIBS +if ${ac_cv_search_shl_load+:} false; then : +else + ac_cv_search_shl_load=no fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_check_readline" >&5 -$as_echo "$pgac_cv_check_readline" >&6; } -if test "$pgac_cv_check_readline" != no ; then - LIBS="$pgac_cv_check_readline $LIBS" - -$as_echo "#define HAVE_LIBREADLINE 1" >>confdefs.h - +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shl_load" >&5 +$as_echo "$ac_cv_search_shl_load" >&6; } +ac_res=$ac_cv_search_shl_load +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - - if test x"$pgac_cv_check_readline" = x"no"; then - as_fn_error $? "readline library not found -If you have readline already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable readline support." "$LINENO" 5 - fi fi -if test "$with_zlib" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflate in -lz" >&5 -$as_echo_n "checking for inflate in -lz... " >&6; } -if ${ac_cv_lib_z_inflate+:} false; then : +# We only use libld in port/dynloader/aix.c +case $host_os in + aix*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ldopen" >&5 +$as_echo_n "checking for library containing ldopen... " >&6; } +if ${ac_cv_search_ldopen+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lz $LIBS" + ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8498,69 +8576,52 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char inflate (); +char ldopen (); int main () { -return inflate (); +return ldopen (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_z_inflate=yes -else - ac_cv_lib_z_inflate=no +for ac_lib in '' ld; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_ldopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + conftest$ac_exeext + if ${ac_cv_search_ldopen+:} false; then : + break fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflate" >&5 -$as_echo "$ac_cv_lib_z_inflate" >&6; } -if test "x$ac_cv_lib_z_inflate" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBZ 1 -_ACEOF - - LIBS="-lz $LIBS" +done +if ${ac_cv_search_ldopen+:} false; then : else - as_fn_error $? "zlib library not found -If you have zlib already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-zlib to disable zlib support." "$LINENO" 5 -fi - + ac_cv_search_ldopen=no fi - -if test "$enable_spinlocks" = yes; then - -$as_echo "#define HAVE_SPINLOCKS 1" >>confdefs.h - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: -*** Not using spinlocks will cause poor performance." >&5 -$as_echo "$as_me: WARNING: -*** Not using spinlocks will cause poor performance." >&2;} +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ldopen" >&5 +$as_echo "$ac_cv_search_ldopen" >&6; } +ac_res=$ac_cv_search_ldopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -if test "$enable_atomics" = yes; then - -$as_echo "#define HAVE_ATOMICS 1" >>confdefs.h - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: -*** Not using atomic operations will cause poor performance." >&5 -$as_echo "$as_me: WARNING: -*** Not using atomic operations will cause poor performance." >&2;} fi -if test "$with_gssapi" = yes ; then - if test "$PORTNAME" != "win32"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gss_init_sec_context" >&5 -$as_echo_n "checking for library containing gss_init_sec_context... " >&6; } -if ${ac_cv_search_gss_init_sec_context+:} false; then : + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getopt_long" >&5 +$as_echo_n "checking for library containing getopt_long... " >&6; } +if ${ac_cv_search_getopt_long+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8573,16 +8634,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char gss_init_sec_context (); +char getopt_long (); int main () { -return gss_init_sec_context (); +return getopt_long (); ; return 0; } _ACEOF -for ac_lib in '' gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'; do +for ac_lib in '' getopt gnugetopt; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8590,46 +8651,36 @@ for ac_lib in '' gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_gss_init_sec_context=$ac_res + ac_cv_search_getopt_long=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_gss_init_sec_context+:} false; then : + if ${ac_cv_search_getopt_long+:} false; then : break fi done -if ${ac_cv_search_gss_init_sec_context+:} false; then : +if ${ac_cv_search_getopt_long+:} false; then : else - ac_cv_search_gss_init_sec_context=no + ac_cv_search_getopt_long=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gss_init_sec_context" >&5 -$as_echo "$ac_cv_search_gss_init_sec_context" >&6; } -ac_res=$ac_cv_search_gss_init_sec_context +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_getopt_long" >&5 +$as_echo "$ac_cv_search_getopt_long" >&6; } +ac_res=$ac_cv_search_getopt_long if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -else - as_fn_error $? "could not find function 'gss_init_sec_context' required for GSSAPI" "$LINENO" 5 -fi - - else - LIBS="$LIBS -lgssapi32" - fi fi -if test "$with_openssl" = yes ; then - if test "$PORTNAME" != "win32"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CRYPTO_new_ex_data in -lcrypto" >&5 -$as_echo_n "checking for CRYPTO_new_ex_data in -lcrypto... " >&6; } -if ${ac_cv_lib_crypto_CRYPTO_new_ex_data+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing crypt" >&5 +$as_echo_n "checking for library containing crypt... " >&6; } +if ${ac_cv_search_crypt+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lcrypto $LIBS" + ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8639,44 +8690,53 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char CRYPTO_new_ex_data (); +char crypt (); int main () { -return CRYPTO_new_ex_data (); +return crypt (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_crypto_CRYPTO_new_ex_data=yes -else - ac_cv_lib_crypto_CRYPTO_new_ex_data=no +for ac_lib in '' crypt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_crypt=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + conftest$ac_exeext + if ${ac_cv_search_crypt+:} false; then : + break fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_CRYPTO_new_ex_data" >&5 -$as_echo "$ac_cv_lib_crypto_CRYPTO_new_ex_data" >&6; } -if test "x$ac_cv_lib_crypto_CRYPTO_new_ex_data" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBCRYPTO 1 -_ACEOF - - LIBS="-lcrypto $LIBS" +done +if ${ac_cv_search_crypt+:} false; then : else - as_fn_error $? "library 'crypto' is required for OpenSSL" "$LINENO" 5 + ac_cv_search_crypt=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_crypt" >&5 +$as_echo "$ac_cv_search_crypt" >&6; } +ac_res=$ac_cv_search_crypt +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SSL_library_init in -lssl" >&5 -$as_echo_n "checking for SSL_library_init in -lssl... " >&6; } -if ${ac_cv_lib_ssl_SSL_library_init+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_open" >&5 +$as_echo_n "checking for library containing shm_open... " >&6; } +if ${ac_cv_search_shm_open+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lssl $LIBS" + ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8686,41 +8746,50 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char SSL_library_init (); +char shm_open (); int main () { -return SSL_library_init (); +return shm_open (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ssl_SSL_library_init=yes -else - ac_cv_lib_ssl_SSL_library_init=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_shm_open=$ac_res fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ssl_SSL_library_init" >&5 -$as_echo "$ac_cv_lib_ssl_SSL_library_init" >&6; } -if test "x$ac_cv_lib_ssl_SSL_library_init" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBSSL 1 -_ACEOF - - LIBS="-lssl $LIBS" +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_shm_open+:} false; then : + break +fi +done +if ${ac_cv_search_shm_open+:} false; then : else - as_fn_error $? "library 'ssl' is required for OpenSSL" "$LINENO" 5 + ac_cv_search_shm_open=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_open" >&5 +$as_echo "$ac_cv_search_shm_open" >&6; } +ac_res=$ac_cv_search_shm_open +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing CRYPTO_new_ex_data" >&5 -$as_echo_n "checking for library containing CRYPTO_new_ex_data... " >&6; } -if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_unlink" >&5 +$as_echo_n "checking for library containing shm_unlink... " >&6; } +if ${ac_cv_search_shm_unlink+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8733,16 +8802,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char CRYPTO_new_ex_data (); +char shm_unlink (); int main () { -return CRYPTO_new_ex_data (); +return shm_unlink (); ; return 0; } _ACEOF -for ac_lib in '' eay32 crypto; do +for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8750,35 +8819,34 @@ for ac_lib in '' eay32 crypto; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_CRYPTO_new_ex_data=$ac_res + ac_cv_search_shm_unlink=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : + if ${ac_cv_search_shm_unlink+:} false; then : break fi done -if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : +if ${ac_cv_search_shm_unlink+:} false; then : else - ac_cv_search_CRYPTO_new_ex_data=no + ac_cv_search_shm_unlink=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_CRYPTO_new_ex_data" >&5 -$as_echo "$ac_cv_search_CRYPTO_new_ex_data" >&6; } -ac_res=$ac_cv_search_CRYPTO_new_ex_data +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_unlink" >&5 +$as_echo "$ac_cv_search_shm_unlink" >&6; } +ac_res=$ac_cv_search_shm_unlink if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -else - as_fn_error $? "library 'eay32' or 'crypto' is required for OpenSSL" "$LINENO" 5 fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing SSL_library_init" >&5 -$as_echo_n "checking for library containing SSL_library_init... " >&6; } -if ${ac_cv_search_SSL_library_init+:} false; then : +# Solaris: +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing fdatasync" >&5 +$as_echo_n "checking for library containing fdatasync... " >&6; } +if ${ac_cv_search_fdatasync+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8791,16 +8859,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char SSL_library_init (); +char fdatasync (); int main () { -return SSL_library_init (); +return fdatasync (); ; return 0; } _ACEOF -for ac_lib in '' ssleay32 ssl; do +for ac_lib in '' rt posix4; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8808,54 +8876,37 @@ for ac_lib in '' ssleay32 ssl; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_SSL_library_init=$ac_res + ac_cv_search_fdatasync=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_SSL_library_init+:} false; then : + if ${ac_cv_search_fdatasync+:} false; then : break fi done -if ${ac_cv_search_SSL_library_init+:} false; then : +if ${ac_cv_search_fdatasync+:} false; then : else - ac_cv_search_SSL_library_init=no + ac_cv_search_fdatasync=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_SSL_library_init" >&5 -$as_echo "$ac_cv_search_SSL_library_init" >&6; } -ac_res=$ac_cv_search_SSL_library_init +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_fdatasync" >&5 +$as_echo "$ac_cv_search_fdatasync" >&6; } +ac_res=$ac_cv_search_fdatasync if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -else - as_fn_error $? "library 'ssleay32' or 'ssl' is required for OpenSSL" "$LINENO" 5 -fi - - fi - for ac_func in SSL_get_current_compression -do : - ac_fn_c_check_func "$LINENO" "SSL_get_current_compression" "ac_cv_func_SSL_get_current_compression" -if test "x$ac_cv_func_SSL_get_current_compression" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SSL_GET_CURRENT_COMPRESSION 1 -_ACEOF - -fi -done - fi -if test "$with_pam" = yes ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pam_start in -lpam" >&5 -$as_echo_n "checking for pam_start in -lpam... " >&6; } -if ${ac_cv_lib_pam_pam_start+:} false; then : +# Required for thread_test.c on Solaris +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sched_yield" >&5 +$as_echo_n "checking for library containing sched_yield... " >&6; } +if ${ac_cv_search_sched_yield+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lpam $LIBS" + ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8865,47 +8916,55 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char pam_start (); +char sched_yield (); int main () { -return pam_start (); +return sched_yield (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_pam_pam_start=yes -else - ac_cv_lib_pam_pam_start=no +for ac_lib in '' rt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_sched_yield=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + conftest$ac_exeext + if ${ac_cv_search_sched_yield+:} false; then : + break fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pam_pam_start" >&5 -$as_echo "$ac_cv_lib_pam_pam_start" >&6; } -if test "x$ac_cv_lib_pam_pam_start" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBPAM 1 -_ACEOF - - LIBS="-lpam $LIBS" +done +if ${ac_cv_search_sched_yield+:} false; then : else - as_fn_error $? "library 'pam' is required for PAM" "$LINENO" 5 + ac_cv_search_sched_yield=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_sched_yield" >&5 +$as_echo "$ac_cv_search_sched_yield" >&6; } +ac_res=$ac_cv_search_sched_yield +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -if test "$with_libxml" = yes ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xmlSaveToBuffer in -lxml2" >&5 -$as_echo_n "checking for xmlSaveToBuffer in -lxml2... " >&6; } -if ${ac_cv_lib_xml2_xmlSaveToBuffer+:} false; then : +# Required for thread_test.c on Solaris 2.5: +# Other ports use it too (HP-UX) so test unconditionally +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gethostbyname_r" >&5 +$as_echo_n "checking for library containing gethostbyname_r... " >&6; } +if ${ac_cv_search_gethostbyname_r+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lxml2 $LIBS" + ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8915,47 +8974,54 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char xmlSaveToBuffer (); +char gethostbyname_r (); int main () { -return xmlSaveToBuffer (); +return gethostbyname_r (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_xml2_xmlSaveToBuffer=yes -else - ac_cv_lib_xml2_xmlSaveToBuffer=no +for ac_lib in '' nsl; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_gethostbyname_r=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + conftest$ac_exeext + if ${ac_cv_search_gethostbyname_r+:} false; then : + break fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xml2_xmlSaveToBuffer" >&5 -$as_echo "$ac_cv_lib_xml2_xmlSaveToBuffer" >&6; } -if test "x$ac_cv_lib_xml2_xmlSaveToBuffer" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBXML2 1 -_ACEOF - - LIBS="-lxml2 $LIBS" +done +if ${ac_cv_search_gethostbyname_r+:} false; then : else - as_fn_error $? "library 'xml2' (version >= 2.6.23) is required for XML support" "$LINENO" 5 + ac_cv_search_gethostbyname_r=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gethostbyname_r" >&5 +$as_echo "$ac_cv_search_gethostbyname_r" >&6; } +ac_res=$ac_cv_search_gethostbyname_r +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -if test "$with_libxslt" = yes ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xsltCleanupGlobals in -lxslt" >&5 -$as_echo_n "checking for xsltCleanupGlobals in -lxslt... " >&6; } -if ${ac_cv_lib_xslt_xsltCleanupGlobals+:} false; then : +# Cygwin: +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shmget" >&5 +$as_echo_n "checking for library containing shmget... " >&6; } +if ${ac_cv_search_shmget+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lxslt $LIBS" + ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8965,49 +9031,66 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char xsltCleanupGlobals (); +char shmget (); int main () { -return xsltCleanupGlobals (); +return shmget (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_xslt_xsltCleanupGlobals=yes -else - ac_cv_lib_xslt_xsltCleanupGlobals=no +for ac_lib in '' cygipc; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_shmget=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + conftest$ac_exeext + if ${ac_cv_search_shmget+:} false; then : + break fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xslt_xsltCleanupGlobals" >&5 -$as_echo "$ac_cv_lib_xslt_xsltCleanupGlobals" >&6; } -if test "x$ac_cv_lib_xslt_xsltCleanupGlobals" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBXSLT 1 -_ACEOF - - LIBS="-lxslt $LIBS" +done +if ${ac_cv_search_shmget+:} false; then : else - as_fn_error $? "library 'xslt' is required for XSLT support" "$LINENO" 5 + ac_cv_search_shmget=no fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shmget" >&5 +$as_echo "$ac_cv_search_shmget" >&6; } +ac_res=$ac_cv_search_shmget +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -# for contrib/sepgsql -if test "$with_selinux" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for security_compute_create_name in -lselinux" >&5 -$as_echo_n "checking for security_compute_create_name in -lselinux... " >&6; } -if ${ac_cv_lib_selinux_security_compute_create_name+:} false; then : + +if test "$with_readline" = yes; then + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing readline" >&5 +$as_echo_n "checking for library containing readline... " >&6; } +if ${pgac_cv_check_readline+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lselinux $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + pgac_cv_check_readline=no +pgac_save_LIBS=$LIBS +if test x"$with_libedit_preferred" != x"yes" +then READLINE_ORDER="-lreadline -ledit" +else READLINE_ORDER="-ledit -lreadline" +fi +for pgac_rllib in $READLINE_ORDER ; do + for pgac_lib in "" " -ltermcap" " -lncurses" " -lcurses" ; do + LIBS="${pgac_rllib}${pgac_lib} $pgac_save_LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -9016,62 +9099,67 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char security_compute_create_name (); +char readline (); int main () { -return security_compute_create_name (); +return readline (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_selinux_security_compute_create_name=yes -else - ac_cv_lib_selinux_security_compute_create_name=no + + # Older NetBSD, OpenBSD, and Irix have a broken linker that does not + # recognize dependent libraries; assume curses is needed if we didn't + # find any dependency. + case $host_os in + netbsd* | openbsd* | irix*) + if test x"$pgac_lib" = x"" ; then + pgac_lib=" -lcurses" + fi ;; + esac + + pgac_cv_check_readline="${pgac_rllib}${pgac_lib}" + break + fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + done + if test "$pgac_cv_check_readline" != no ; then + break + fi +done +LIBS=$pgac_save_LIBS + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_selinux_security_compute_create_name" >&5 -$as_echo "$ac_cv_lib_selinux_security_compute_create_name" >&6; } -if test "x$ac_cv_lib_selinux_security_compute_create_name" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBSELINUX 1 -_ACEOF +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_check_readline" >&5 +$as_echo "$pgac_cv_check_readline" >&6; } +if test "$pgac_cv_check_readline" != no ; then + LIBS="$pgac_cv_check_readline $LIBS" - LIBS="-lselinux $LIBS" +$as_echo "#define HAVE_LIBREADLINE 1" >>confdefs.h -else - as_fn_error $? "library 'libselinux', version 2.1.10 or newer, is required for SELinux support" "$LINENO" 5 fi -fi -# for contrib/uuid-ossp -if test "$with_uuid" = bsd ; then - # On BSD, the UUID functions are in libc - ac_fn_c_check_func "$LINENO" "uuid_to_string" "ac_cv_func_uuid_to_string" -if test "x$ac_cv_func_uuid_to_string" = xyes; then : - UUID_LIBS="" -else - as_fn_error $? "BSD UUID functions are not present" "$LINENO" 5 + if test x"$pgac_cv_check_readline" = x"no"; then + as_fn_error $? "readline library not found +If you have readline already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable readline support." "$LINENO" 5 + fi fi -elif test "$with_uuid" = e2fs ; then - # On OS X, the UUID functions are in libc - ac_fn_c_check_func "$LINENO" "uuid_generate" "ac_cv_func_uuid_generate" -if test "x$ac_cv_func_uuid_generate" = xyes; then : - UUID_LIBS="" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_generate in -luuid" >&5 -$as_echo_n "checking for uuid_generate in -luuid... " >&6; } -if ${ac_cv_lib_uuid_uuid_generate+:} false; then : +if test "$with_zlib" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflate in -lz" >&5 +$as_echo_n "checking for inflate in -lz... " >&6; } +if ${ac_cv_lib_z_inflate+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS -LIBS="-luuid $LIBS" +LIBS="-lz $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9081,42 +9169,72 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char uuid_generate (); +char inflate (); int main () { -return uuid_generate (); +return inflate (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_uuid_uuid_generate=yes + ac_cv_lib_z_inflate=yes else - ac_cv_lib_uuid_uuid_generate=no + ac_cv_lib_z_inflate=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_generate" >&5 -$as_echo "$ac_cv_lib_uuid_uuid_generate" >&6; } -if test "x$ac_cv_lib_uuid_uuid_generate" = xyes; then : - UUID_LIBS="-luuid" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflate" >&5 +$as_echo "$ac_cv_lib_z_inflate" >&6; } +if test "x$ac_cv_lib_z_inflate" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBZ 1 +_ACEOF + + LIBS="-lz $LIBS" + else - as_fn_error $? "library 'uuid' is required for E2FS UUID" "$LINENO" 5 + as_fn_error $? "zlib library not found +If you have zlib already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-zlib to disable zlib support." "$LINENO" 5 fi fi -elif test "$with_uuid" = ossp ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -lossp-uuid" >&5 -$as_echo_n "checking for uuid_export in -lossp-uuid... " >&6; } -if ${ac_cv_lib_ossp_uuid_uuid_export+:} false; then : +if test "$enable_spinlocks" = yes; then + +$as_echo "#define HAVE_SPINLOCKS 1" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: +*** Not using spinlocks will cause poor performance." >&5 +$as_echo "$as_me: WARNING: +*** Not using spinlocks will cause poor performance." >&2;} +fi + +if test "$enable_atomics" = yes; then + +$as_echo "#define HAVE_ATOMICS 1" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: +*** Not using atomic operations will cause poor performance." >&5 +$as_echo "$as_me: WARNING: +*** Not using atomic operations will cause poor performance." >&2;} +fi + +if test "$with_gssapi" = yes ; then + if test "$PORTNAME" != "win32"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gss_init_sec_context" >&5 +$as_echo_n "checking for library containing gss_init_sec_context... " >&6; } +if ${ac_cv_search_gss_init_sec_context+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lossp-uuid $LIBS" + ac_func_search_save_LIBS=$LIBS cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9126,36 +9244,63 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char uuid_export (); +char gss_init_sec_context (); int main () { -return uuid_export (); +return gss_init_sec_context (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ossp_uuid_uuid_export=yes -else - ac_cv_lib_ossp_uuid_uuid_export=no +for ac_lib in '' gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_gss_init_sec_context=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + conftest$ac_exeext + if ${ac_cv_search_gss_init_sec_context+:} false; then : + break fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ossp_uuid_uuid_export" >&5 -$as_echo "$ac_cv_lib_ossp_uuid_uuid_export" >&6; } -if test "x$ac_cv_lib_ossp_uuid_uuid_export" = xyes; then : - UUID_LIBS="-lossp-uuid" +done +if ${ac_cv_search_gss_init_sec_context+:} false; then : + else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -luuid" >&5 -$as_echo_n "checking for uuid_export in -luuid... " >&6; } -if ${ac_cv_lib_uuid_uuid_export+:} false; then : + ac_cv_search_gss_init_sec_context=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gss_init_sec_context" >&5 +$as_echo "$ac_cv_search_gss_init_sec_context" >&6; } +ac_res=$ac_cv_search_gss_init_sec_context +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + +else + as_fn_error $? "could not find function 'gss_init_sec_context' required for GSSAPI" "$LINENO" 5 +fi + + else + LIBS="$LIBS -lgssapi32" + fi +fi + +if test "$with_openssl" = yes ; then + if test "$PORTNAME" != "win32"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CRYPTO_new_ex_data in -lcrypto" >&5 +$as_echo_n "checking for CRYPTO_new_ex_data in -lcrypto... " >&6; } +if ${ac_cv_lib_crypto_CRYPTO_new_ex_data+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS -LIBS="-luuid $LIBS" +LIBS="-lcrypto $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9165,685 +9310,916 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char uuid_export (); +char CRYPTO_new_ex_data (); int main () { -return uuid_export (); +return CRYPTO_new_ex_data (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_uuid_uuid_export=yes + ac_cv_lib_crypto_CRYPTO_new_ex_data=yes else - ac_cv_lib_uuid_uuid_export=no + ac_cv_lib_crypto_CRYPTO_new_ex_data=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_export" >&5 -$as_echo "$ac_cv_lib_uuid_uuid_export" >&6; } -if test "x$ac_cv_lib_uuid_uuid_export" = xyes; then : - UUID_LIBS="-luuid" -else - as_fn_error $? "library 'ossp-uuid' or 'uuid' is required for OSSP UUID" "$LINENO" 5 -fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_CRYPTO_new_ex_data" >&5 +$as_echo "$ac_cv_lib_crypto_CRYPTO_new_ex_data" >&6; } +if test "x$ac_cv_lib_crypto_CRYPTO_new_ex_data" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBCRYPTO 1 +_ACEOF -fi + LIBS="-lcrypto $LIBS" +else + as_fn_error $? "library 'crypto' is required for OpenSSL" "$LINENO" 5 fi - - -## -## Header files -## - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SSL_library_init in -lssl" >&5 +$as_echo_n "checking for SSL_library_init in -lssl... " >&6; } +if ${ac_cv_lib_ssl_SSL_library_init+:} false; then : $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_check_lib_save_LIBS=$LIBS +LIBS="-lssl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -#include -#include +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char SSL_library_init (); int main () { - +return SSL_library_init (); ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ssl_SSL_library_init=yes else - ac_cv_header_stdc=no + ac_cv_lib_ssl_SSL_library_init=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ssl_SSL_library_init" >&5 +$as_echo "$ac_cv_lib_ssl_SSL_library_init" >&6; } +if test "x$ac_cv_lib_ssl_SSL_library_init" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSSL 1 _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : -else - ac_cv_header_stdc=no -fi -rm -f conftest* + LIBS="-lssl $LIBS" +else + as_fn_error $? "library 'ssl' is required for OpenSSL" "$LINENO" 5 fi -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing CRYPTO_new_ex_data" >&5 +$as_echo_n "checking for library containing CRYPTO_new_ex_data... " >&6; } +if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char CRYPTO_new_ex_data (); +int +main () +{ +return CRYPTO_new_ex_data (); + ; + return 0; +} _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : +for ac_lib in '' eay32 crypto; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_CRYPTO_new_ex_data=$ac_res +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : + break +fi +done +if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : else - ac_cv_header_stdc=no + ac_cv_search_CRYPTO_new_ex_data=no fi -rm -f conftest* +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_CRYPTO_new_ex_data" >&5 +$as_echo "$ac_cv_search_CRYPTO_new_ex_data" >&6; } +ac_res=$ac_cv_search_CRYPTO_new_ex_data +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +else + as_fn_error $? "library 'eay32' or 'crypto' is required for OpenSSL" "$LINENO" 5 fi -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing SSL_library_init" >&5 +$as_echo_n "checking for library containing SSL_library_init... " >&6; } +if ${ac_cv_search_SSL_library_init+:} false; then : + $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char SSL_library_init (); int main () { - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; +return SSL_library_init (); + ; return 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no +for ac_lib in '' ssleay32 ssl; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_SSL_library_init=$ac_res fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_SSL_library_init+:} false; then : + break fi +done +if ${ac_cv_search_SSL_library_init+:} false; then : +else + ac_cv_search_SSL_library_init=no fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_SSL_library_init" >&5 +$as_echo "$ac_cv_search_SSL_library_init" >&6; } +ac_res=$ac_cv_search_SSL_library_init +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +else + as_fn_error $? "library 'ssleay32' or 'ssl' is required for OpenSSL" "$LINENO" 5 fi -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h + fi + for ac_func in SSL_get_current_compression do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + ac_fn_c_check_func "$LINENO" "SSL_get_current_compression" "ac_cv_func_SSL_get_current_compression" +if test "x$ac_cv_func_SSL_get_current_compression" = xyes; then : cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +#define HAVE_SSL_GET_CURRENT_COMPRESSION 1 _ACEOF fi - done +fi -for ac_header in atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h pwd.h sys/ioctl.h sys/ipc.h sys/poll.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/socket.h sys/sockio.h sys/tas.h sys/time.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : +if test "$with_pam" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pam_start in -lpam" >&5 +$as_echo_n "checking for pam_start in -lpam... " >&6; } +if ${ac_cv_lib_pam_pam_start+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpam $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pam_start (); +int +main () +{ +return pam_start (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_pam_pam_start=yes +else + ac_cv_lib_pam_pam_start=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pam_pam_start" >&5 +$as_echo "$ac_cv_lib_pam_pam_start" >&6; } +if test "x$ac_cv_lib_pam_pam_start" = xyes; then : cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +#define HAVE_LIBPAM 1 _ACEOF + LIBS="-lpam $LIBS" + +else + as_fn_error $? "library 'pam' is required for PAM" "$LINENO" 5 fi -done +fi +if test "$with_libxml" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xmlSaveToBuffer in -lxml2" >&5 +$as_echo_n "checking for xmlSaveToBuffer in -lxml2... " >&6; } +if ${ac_cv_lib_xml2_xmlSaveToBuffer+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lxml2 $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ -# On BSD, test for net/if.h will fail unless sys/socket.h -# is included first. -for ac_header in net/if.h -do : - ac_fn_c_check_header_compile "$LINENO" "net/if.h" "ac_cv_header_net_if_h" "$ac_includes_default -#ifdef HAVE_SYS_SOCKET_H -#include +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" #endif - -" -if test "x$ac_cv_header_net_if_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_NET_IF_H 1 +char xmlSaveToBuffer (); +int +main () +{ +return xmlSaveToBuffer (); + ; + return 0; +} _ACEOF - +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_xml2_xmlSaveToBuffer=yes +else + ac_cv_lib_xml2_xmlSaveToBuffer=no fi - -done - - -# On OpenBSD, test for sys/ucred.h will fail unless sys/param.h -# is included first. -for ac_header in sys/ucred.h -do : - ac_fn_c_check_header_compile "$LINENO" "sys/ucred.h" "ac_cv_header_sys_ucred_h" "$ac_includes_default -#include - -" -if test "x$ac_cv_header_sys_ucred_h" = xyes; then : +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xml2_xmlSaveToBuffer" >&5 +$as_echo "$ac_cv_lib_xml2_xmlSaveToBuffer" >&6; } +if test "x$ac_cv_lib_xml2_xmlSaveToBuffer" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_SYS_UCRED_H 1 +#define HAVE_LIBXML2 1 _ACEOF -fi - -done - + LIBS="-lxml2 $LIBS" -# At least on IRIX, test for netinet/tcp.h will fail unless -# netinet/in.h is included first. -for ac_header in netinet/in.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "netinet/in.h" "ac_cv_header_netinet_in_h" "$ac_includes_default" -if test "x$ac_cv_header_netinet_in_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_NETINET_IN_H 1 -_ACEOF +else + as_fn_error $? "library 'xml2' (version >= 2.6.23) is required for XML support" "$LINENO" 5 +fi fi -done +if test "$with_libxslt" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xsltCleanupGlobals in -lxslt" >&5 +$as_echo_n "checking for xsltCleanupGlobals in -lxslt... " >&6; } +if ${ac_cv_lib_xslt_xsltCleanupGlobals+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lxslt $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ -for ac_header in netinet/tcp.h -do : - ac_fn_c_check_header_compile "$LINENO" "netinet/tcp.h" "ac_cv_header_netinet_tcp_h" "$ac_includes_default -#ifdef HAVE_NETINET_IN_H -#include +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" #endif - -" -if test "x$ac_cv_header_netinet_tcp_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_NETINET_TCP_H 1 +char xsltCleanupGlobals (); +int +main () +{ +return xsltCleanupGlobals (); + ; + return 0; +} _ACEOF - +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_xslt_xsltCleanupGlobals=yes +else + ac_cv_lib_xslt_xsltCleanupGlobals=no fi - -done - - -if expr x"$pgac_cv_check_readline" : 'x-lreadline' >/dev/null ; then - for ac_header in readline/readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_readline_h" = xyes; then : +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xslt_xsltCleanupGlobals" >&5 +$as_echo "$ac_cv_lib_xslt_xsltCleanupGlobals" >&6; } +if test "x$ac_cv_lib_xslt_xsltCleanupGlobals" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_READLINE_H 1 +#define HAVE_LIBXSLT 1 _ACEOF -else - for ac_header in readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_H 1 -_ACEOF + LIBS="-lxslt $LIBS" else - as_fn_error $? "readline header not found -If you have readline already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable readline support." "$LINENO" 5 + as_fn_error $? "library 'xslt' is required for XSLT support" "$LINENO" 5 fi -done - fi -done +# Note: We can test for libldap_r only after we know PTHREAD_LIBS +if test "$with_ldap" = yes ; then + _LIBS="$LIBS" + if test "$PORTNAME" != "win32"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lldap" >&5 +$as_echo_n "checking for ldap_bind in -lldap... " >&6; } +if ${ac_cv_lib_ldap_ldap_bind+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lldap $EXTRA_LDAP_LIBS $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ - for ac_header in readline/history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_HISTORY_H 1 +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ldap_bind (); +int +main () +{ +return ldap_bind (); + ; + return 0; +} _ACEOF - +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ldap_ldap_bind=yes else - for ac_header in history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" -if test "x$ac_cv_header_history_h" = xyes; then : + ac_cv_lib_ldap_ldap_bind=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_ldap_bind" >&5 +$as_echo "$ac_cv_lib_ldap_ldap_bind" >&6; } +if test "x$ac_cv_lib_ldap_ldap_bind" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_HISTORY_H 1 +#define HAVE_LIBLDAP 1 _ACEOF + LIBS="-lldap $LIBS" + else - as_fn_error $? "history header not found -If you have readline already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable readline support." "$LINENO" 5 + as_fn_error $? "library 'ldap' is required for LDAP" "$LINENO" 5 fi -done + LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" + if test "$enable_thread_safety" = yes; then + # on some platforms ldap_r fails to link without PTHREAD_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_simple_bind in -lldap_r" >&5 +$as_echo_n "checking for ldap_simple_bind in -lldap_r... " >&6; } +if ${ac_cv_lib_ldap_r_ldap_simple_bind+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lldap_r $PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ldap_simple_bind (); +int +main () +{ +return ldap_simple_bind (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ldap_r_ldap_simple_bind=yes +else + ac_cv_lib_ldap_r_ldap_simple_bind=no fi - -done - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_r_ldap_simple_bind" >&5 +$as_echo "$ac_cv_lib_ldap_r_ldap_simple_bind" >&6; } +if test "x$ac_cv_lib_ldap_r_ldap_simple_bind" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBLDAP_R 1 +_ACEOF -if expr x"$pgac_cv_check_readline" : 'x-ledit' >/dev/null ; then -# Some installations of libedit usurp /usr/include/readline/, which seems -# bad practice, since in combined installations readline will have its headers -# there. We might have to resort to AC_EGREP checks to make sure we found -# the proper header... - for ac_header in editline/readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "editline/readline.h" "ac_cv_header_editline_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_editline_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_EDITLINE_READLINE_H 1 -_ACEOF + LIBS="-lldap_r $LIBS" else - for ac_header in readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_H 1 -_ACEOF + as_fn_error $? "library 'ldap_r' is required for LDAP" "$LINENO" 5 +fi + LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" + else + LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lwldap32" >&5 +$as_echo_n "checking for ldap_bind in -lwldap32... " >&6; } +if ${ac_cv_lib_wldap32_ldap_bind+:} false; then : + $as_echo_n "(cached) " >&6 else - for ac_header in readline/readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_READLINE_H 1 -_ACEOF + ac_check_lib_save_LIBS=$LIBS +LIBS="-lwldap32 $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ldap_bind (); +int +main () +{ +return ldap_bind (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_wldap32_ldap_bind=yes else - as_fn_error $? "readline header not found -If you have libedit already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable libedit support." "$LINENO" 5 -fi - -done - + ac_cv_lib_wldap32_ldap_bind=no fi - -done - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi - -done - -# Note: in a libedit installation, history.h is sometimes a dummy, and may -# not be there at all. Hence, don't complain if not found. We must check -# though, since in yet other versions it is an independent header. - for ac_header in editline/history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "editline/history.h" "ac_cv_header_editline_history_h" "$ac_includes_default" -if test "x$ac_cv_header_editline_history_h" = xyes; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_wldap32_ldap_bind" >&5 +$as_echo "$ac_cv_lib_wldap32_ldap_bind" >&6; } +if test "x$ac_cv_lib_wldap32_ldap_bind" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_EDITLINE_HISTORY_H 1 +#define HAVE_LIBWLDAP32 1 _ACEOF -else - for ac_header in history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" -if test "x$ac_cv_header_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_HISTORY_H 1 -_ACEOF + LIBS="-lwldap32 $LIBS" else - for ac_header in readline/history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_HISTORY_H 1 -_ACEOF - -fi - -done - + as_fn_error $? "library 'wldap32' is required for LDAP" "$LINENO" 5 fi -done - + LDAP_LIBS_FE="-lwldap32" + LDAP_LIBS_BE="-lwldap32" + fi + LIBS="$_LIBS" fi -done -fi -if test "$with_zlib" = yes; then - ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" -if test "x$ac_cv_header_zlib_h" = xyes; then : +# for contrib/sepgsql +if test "$with_selinux" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for security_compute_create_name in -lselinux" >&5 +$as_echo_n "checking for security_compute_create_name in -lselinux... " >&6; } +if ${ac_cv_lib_selinux_security_compute_create_name+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lselinux $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char security_compute_create_name (); +int +main () +{ +return security_compute_create_name (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_selinux_security_compute_create_name=yes else - as_fn_error $? "zlib header not found -If you have zlib already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-zlib to disable zlib support." "$LINENO" 5 + ac_cv_lib_selinux_security_compute_create_name=no fi - - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi - -if test "$with_gssapi" = yes ; then - for ac_header in gssapi/gssapi.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "gssapi/gssapi.h" "ac_cv_header_gssapi_gssapi_h" "$ac_includes_default" -if test "x$ac_cv_header_gssapi_gssapi_h" = xyes; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_selinux_security_compute_create_name" >&5 +$as_echo "$ac_cv_lib_selinux_security_compute_create_name" >&6; } +if test "x$ac_cv_lib_selinux_security_compute_create_name" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_GSSAPI_GSSAPI_H 1 +#define HAVE_LIBSELINUX 1 _ACEOF -else - for ac_header in gssapi.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "gssapi.h" "ac_cv_header_gssapi_h" "$ac_includes_default" -if test "x$ac_cv_header_gssapi_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_GSSAPI_H 1 -_ACEOF + LIBS="-lselinux $LIBS" else - as_fn_error $? "gssapi.h header file is required for GSSAPI" "$LINENO" 5 + as_fn_error $? "library 'libselinux', version 2.1.10 or newer, is required for SELinux support" "$LINENO" 5 fi -done - fi -done - +# for contrib/uuid-ossp +if test "$with_uuid" = bsd ; then + # On BSD, the UUID functions are in libc + ac_fn_c_check_func "$LINENO" "uuid_to_string" "ac_cv_func_uuid_to_string" +if test "x$ac_cv_func_uuid_to_string" = xyes; then : + UUID_LIBS="" +else + as_fn_error $? "BSD UUID functions are not present" "$LINENO" 5 fi -if test "$with_openssl" = yes ; then - ac_fn_c_check_header_mongrel "$LINENO" "openssl/ssl.h" "ac_cv_header_openssl_ssl_h" "$ac_includes_default" -if test "x$ac_cv_header_openssl_ssl_h" = xyes; then : +elif test "$with_uuid" = e2fs ; then + # On OS X, the UUID functions are in libc + ac_fn_c_check_func "$LINENO" "uuid_generate" "ac_cv_func_uuid_generate" +if test "x$ac_cv_func_uuid_generate" = xyes; then : + UUID_LIBS="" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_generate in -luuid" >&5 +$as_echo_n "checking for uuid_generate in -luuid... " >&6; } +if ${ac_cv_lib_uuid_uuid_generate+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-luuid $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char uuid_generate (); +int +main () +{ +return uuid_generate (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_uuid_uuid_generate=yes else - as_fn_error $? "header file is required for OpenSSL" "$LINENO" 5 + ac_cv_lib_uuid_uuid_generate=no fi - - - ac_fn_c_check_header_mongrel "$LINENO" "openssl/err.h" "ac_cv_header_openssl_err_h" "$ac_includes_default" -if test "x$ac_cv_header_openssl_err_h" = xyes; then : - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_generate" >&5 +$as_echo "$ac_cv_lib_uuid_uuid_generate" >&6; } +if test "x$ac_cv_lib_uuid_uuid_generate" = xyes; then : + UUID_LIBS="-luuid" else - as_fn_error $? "header file is required for OpenSSL" "$LINENO" 5 + as_fn_error $? "library 'uuid' is required for E2FS UUID" "$LINENO" 5 fi - fi -if test "$with_pam" = yes ; then - for ac_header in security/pam_appl.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "security/pam_appl.h" "ac_cv_header_security_pam_appl_h" "$ac_includes_default" -if test "x$ac_cv_header_security_pam_appl_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SECURITY_PAM_APPL_H 1 -_ACEOF - +elif test "$with_uuid" = ossp ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -lossp-uuid" >&5 +$as_echo_n "checking for uuid_export in -lossp-uuid... " >&6; } +if ${ac_cv_lib_ossp_uuid_uuid_export+:} false; then : + $as_echo_n "(cached) " >&6 else - for ac_header in pam/pam_appl.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "pam/pam_appl.h" "ac_cv_header_pam_pam_appl_h" "$ac_includes_default" -if test "x$ac_cv_header_pam_pam_appl_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_PAM_PAM_APPL_H 1 -_ACEOF + ac_check_lib_save_LIBS=$LIBS +LIBS="-lossp-uuid $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char uuid_export (); +int +main () +{ +return uuid_export (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ossp_uuid_uuid_export=yes else - as_fn_error $? "header file or is required for PAM." "$LINENO" 5 + ac_cv_lib_ossp_uuid_uuid_export=no fi - -done - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ossp_uuid_uuid_export" >&5 +$as_echo "$ac_cv_lib_ossp_uuid_uuid_export" >&6; } +if test "x$ac_cv_lib_ossp_uuid_uuid_export" = xyes; then : + UUID_LIBS="-lossp-uuid" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -luuid" >&5 +$as_echo_n "checking for uuid_export in -luuid... " >&6; } +if ${ac_cv_lib_uuid_uuid_export+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-luuid $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ -done - +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char uuid_export (); +int +main () +{ +return uuid_export (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_uuid_uuid_export=yes +else + ac_cv_lib_uuid_uuid_export=no fi - -if test "$with_libxml" = yes ; then - ac_fn_c_check_header_mongrel "$LINENO" "libxml/parser.h" "ac_cv_header_libxml_parser_h" "$ac_includes_default" -if test "x$ac_cv_header_libxml_parser_h" = xyes; then : - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_export" >&5 +$as_echo "$ac_cv_lib_uuid_uuid_export" >&6; } +if test "x$ac_cv_lib_uuid_uuid_export" = xyes; then : + UUID_LIBS="-luuid" else - as_fn_error $? "header file is required for XML support" "$LINENO" 5 + as_fn_error $? "library 'ossp-uuid' or 'uuid' is required for OSSP UUID" "$LINENO" 5 fi - fi -if test "$with_libxslt" = yes ; then - ac_fn_c_check_header_mongrel "$LINENO" "libxslt/xslt.h" "ac_cv_header_libxslt_xslt_h" "$ac_includes_default" -if test "x$ac_cv_header_libxslt_xslt_h" = xyes; then : - -else - as_fn_error $? "header file is required for XSLT support" "$LINENO" 5 fi -fi -if test "$with_ldap" = yes ; then - if test "$PORTNAME" != "win32"; then - for ac_header in ldap.h +## +## Header files +## + +for ac_header in atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h pwd.h sys/ioctl.h sys/ipc.h sys/poll.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/socket.h sys/sockio.h sys/tas.h sys/time.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h do : - ac_fn_c_check_header_mongrel "$LINENO" "ldap.h" "ac_cv_header_ldap_h" "$ac_includes_default" -if test "x$ac_cv_header_ldap_h" = xyes; then : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF -#define HAVE_LDAP_H 1 +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF -else - as_fn_error $? "header file is required for LDAP" "$LINENO" 5 fi done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for compatible LDAP implementation" >&5 -$as_echo_n "checking for compatible LDAP implementation... " >&6; } -if ${pgac_cv_ldap_safe+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#if !defined(LDAP_VENDOR_VERSION) || \ - (defined(LDAP_API_FEATURE_X_OPENLDAP) && \ - LDAP_VENDOR_VERSION >= 20424 && LDAP_VENDOR_VERSION <= 20431) -choke me + +# On BSD, test for net/if.h will fail unless sys/socket.h +# is included first. +for ac_header in net/if.h +do : + ac_fn_c_check_header_compile "$LINENO" "net/if.h" "ac_cv_header_net_if_h" "$ac_includes_default +#ifdef HAVE_SYS_SOCKET_H +#include #endif -int -main () -{ - ; - return 0; -} +" +if test "x$ac_cv_header_net_if_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NET_IF_H 1 _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_ldap_safe=yes -else - pgac_cv_ldap_safe=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_ldap_safe" >&5 -$as_echo "$pgac_cv_ldap_safe" >&6; } -if test "$pgac_cv_ldap_safe" != yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: -*** With OpenLDAP versions 2.4.24 through 2.4.31, inclusive, each backend -*** process that loads libpq (via WAL receiver, dblink, or postgres_fdw) and -*** also uses LDAP will crash on exit." >&5 -$as_echo "$as_me: WARNING: -*** With OpenLDAP versions 2.4.24 through 2.4.31, inclusive, each backend -*** process that loads libpq (via WAL receiver, dblink, or postgres_fdw) and -*** also uses LDAP will crash on exit." >&2;} fi - else - for ac_header in winldap.h + +done + + +# On OpenBSD, test for sys/ucred.h will fail unless sys/param.h +# is included first. +for ac_header in sys/ucred.h do : - ac_fn_c_check_header_compile "$LINENO" "winldap.h" "ac_cv_header_winldap_h" "$ac_includes_default -#include + ac_fn_c_check_header_compile "$LINENO" "sys/ucred.h" "ac_cv_header_sys_ucred_h" "$ac_includes_default +#include " -if test "x$ac_cv_header_winldap_h" = xyes; then : +if test "x$ac_cv_header_sys_ucred_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_WINLDAP_H 1 +#define HAVE_SYS_UCRED_H 1 _ACEOF -else - as_fn_error $? "header file is required for LDAP" "$LINENO" 5 fi done - fi -fi -if test "$with_bonjour" = yes ; then - ac_fn_c_check_header_mongrel "$LINENO" "dns_sd.h" "ac_cv_header_dns_sd_h" "$ac_includes_default" -if test "x$ac_cv_header_dns_sd_h" = xyes; then : +# At least on IRIX, test for netinet/tcp.h will fail unless +# netinet/in.h is included first. +for ac_header in netinet/in.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "netinet/in.h" "ac_cv_header_netinet_in_h" "$ac_includes_default" +if test "x$ac_cv_header_netinet_in_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NETINET_IN_H 1 +_ACEOF -else - as_fn_error $? "header file is required for Bonjour" "$LINENO" 5 fi +done + +for ac_header in netinet/tcp.h +do : + ac_fn_c_check_header_compile "$LINENO" "netinet/tcp.h" "ac_cv_header_netinet_tcp_h" "$ac_includes_default +#ifdef HAVE_NETINET_IN_H +#include +#endif + +" +if test "x$ac_cv_header_netinet_tcp_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NETINET_TCP_H 1 +_ACEOF fi -# for contrib/uuid-ossp -if test "$with_uuid" = bsd ; then - for ac_header in uuid.h +done + + +if expr x"$pgac_cv_check_readline" : 'x-lreadline' >/dev/null ; then + for ac_header in readline/readline.h do : - ac_fn_c_check_header_mongrel "$LINENO" "uuid.h" "ac_cv_header_uuid_h" "$ac_includes_default" -if test "x$ac_cv_header_uuid_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_readline_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_UUID_H 1 +#define HAVE_READLINE_READLINE_H 1 _ACEOF - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include +else + for ac_header in readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_READLINE_H 1 _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "uuid_to_string" >/dev/null 2>&1; then : else - as_fn_error $? "header file does not match BSD UUID library" "$LINENO" 5 + as_fn_error $? "readline header not found +If you have readline already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable readline support." "$LINENO" 5 fi -rm -f conftest* -else - as_fn_error $? "header file is required for BSD UUID" "$LINENO" 5 +done + fi done -elif test "$with_uuid" = e2fs ; then - for ac_header in uuid/uuid.h + for ac_header in readline/history.h do : - ac_fn_c_check_header_mongrel "$LINENO" "uuid/uuid.h" "ac_cv_header_uuid_uuid_h" "$ac_includes_default" -if test "x$ac_cv_header_uuid_uuid_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_history_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_UUID_UUID_H 1 +#define HAVE_READLINE_HISTORY_H 1 _ACEOF - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include +else + for ac_header in history.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" +if test "x$ac_cv_header_history_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_HISTORY_H 1 _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "uuid_generate" >/dev/null 2>&1; then : else - as_fn_error $? "header file does not match E2FS UUID library" "$LINENO" 5 + as_fn_error $? "history header not found +If you have readline already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable readline support." "$LINENO" 5 fi -rm -f conftest* + +done + +fi + +done + +fi + +if expr x"$pgac_cv_check_readline" : 'x-ledit' >/dev/null ; then +# Some installations of libedit usurp /usr/include/readline/, which seems +# bad practice, since in combined installations readline will have its headers +# there. We might have to resort to AC_EGREP checks to make sure we found +# the proper header... + for ac_header in editline/readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "editline/readline.h" "ac_cv_header_editline_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_editline_readline_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EDITLINE_READLINE_H 1 +_ACEOF else - for ac_header in uuid.h + for ac_header in readline.h do : - ac_fn_c_check_header_mongrel "$LINENO" "uuid.h" "ac_cv_header_uuid_h" "$ac_includes_default" -if test "x$ac_cv_header_uuid_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_UUID_H 1 +#define HAVE_READLINE_H 1 _ACEOF - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include +else + for ac_header in readline/readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_readline_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_READLINE_READLINE_H 1 _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "uuid_generate" >/dev/null 2>&1; then : else - as_fn_error $? "header file does not match E2FS UUID library" "$LINENO" 5 + as_fn_error $? "readline header not found +If you have libedit already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable libedit support." "$LINENO" 5 fi -rm -f conftest* -else - as_fn_error $? "header file or is required for E2FS UUID" "$LINENO" 5 +done + fi done @@ -9852,50 +10228,39 @@ fi done -elif test "$with_uuid" = ossp ; then - for ac_header in ossp/uuid.h +# Note: in a libedit installation, history.h is sometimes a dummy, and may +# not be there at all. Hence, don't complain if not found. We must check +# though, since in yet other versions it is an independent header. + for ac_header in editline/history.h do : - ac_fn_c_check_header_mongrel "$LINENO" "ossp/uuid.h" "ac_cv_header_ossp_uuid_h" "$ac_includes_default" -if test "x$ac_cv_header_ossp_uuid_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "editline/history.h" "ac_cv_header_editline_history_h" "$ac_includes_default" +if test "x$ac_cv_header_editline_history_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_OSSP_UUID_H 1 -_ACEOF - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - +#define HAVE_EDITLINE_HISTORY_H 1 _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "uuid_export" >/dev/null 2>&1; then : - -else - as_fn_error $? "header file does not match OSSP UUID library" "$LINENO" 5 -fi -rm -f conftest* else - for ac_header in uuid.h + for ac_header in history.h do : - ac_fn_c_check_header_mongrel "$LINENO" "uuid.h" "ac_cv_header_uuid_h" "$ac_includes_default" -if test "x$ac_cv_header_uuid_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" +if test "x$ac_cv_header_history_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_UUID_H 1 +#define HAVE_HISTORY_H 1 _ACEOF - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include +else + for ac_header in readline/history.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_history_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_READLINE_HISTORY_H 1 _ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "uuid_export" >/dev/null 2>&1; then : -else - as_fn_error $? "header file does not match OSSP UUID library" "$LINENO" 5 fi -rm -f conftest* -else - as_fn_error $? "header file or is required for OSSP UUID" "$LINENO" 5 +done + fi done @@ -9906,341 +10271,150 @@ done fi -if test "$PORTNAME" = "win32" ; then - for ac_header in crtdefs.h +if test "$with_zlib" = yes; then + ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" +if test "x$ac_cv_header_zlib_h" = xyes; then : + +else + as_fn_error $? "zlib header not found +If you have zlib already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-zlib to disable zlib support." "$LINENO" 5 +fi + + +fi + +if test "$with_gssapi" = yes ; then + for ac_header in gssapi/gssapi.h do : - ac_fn_c_check_header_mongrel "$LINENO" "crtdefs.h" "ac_cv_header_crtdefs_h" "$ac_includes_default" -if test "x$ac_cv_header_crtdefs_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "gssapi/gssapi.h" "ac_cv_header_gssapi_gssapi_h" "$ac_includes_default" +if test "x$ac_cv_header_gssapi_gssapi_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_CRTDEFS_H 1 +#define HAVE_GSSAPI_GSSAPI_H 1 +_ACEOF + +else + for ac_header in gssapi.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "gssapi.h" "ac_cv_header_gssapi_h" "$ac_includes_default" +if test "x$ac_cv_header_gssapi_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_GSSAPI_H 1 _ACEOF +else + as_fn_error $? "gssapi.h header file is required for GSSAPI" "$LINENO" 5 fi done fi -## -## Types, structures, compiler characteristics -## +done - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 -$as_echo_n "checking whether byte ordering is bigendian... " >&6; } -if ${ac_cv_c_bigendian+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_cv_c_bigendian=unknown - # See if we're dealing with a universal compiler. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifndef __APPLE_CC__ - not a universal capable compiler - #endif - typedef int dummy; +fi -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : +if test "$with_openssl" = yes ; then + ac_fn_c_check_header_mongrel "$LINENO" "openssl/ssl.h" "ac_cv_header_openssl_ssl_h" "$ac_includes_default" +if test "x$ac_cv_header_openssl_ssl_h" = xyes; then : - # Check for potential -arch flags. It is not universal unless - # there are at least two -arch flags with different values. - ac_arch= - ac_prev= - for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do - if test -n "$ac_prev"; then - case $ac_word in - i?86 | x86_64 | ppc | ppc64) - if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then - ac_arch=$ac_word - else - ac_cv_c_bigendian=universal - break - fi - ;; - esac - ac_prev= - elif test "x$ac_word" = "x-arch"; then - ac_prev=arch - fi - done +else + as_fn_error $? "header file is required for OpenSSL" "$LINENO" 5 fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - if test $ac_cv_c_bigendian = unknown; then - # See if sys/param.h defines the BYTE_ORDER macro. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - #include - -int -main () -{ -#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ - && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ - && LITTLE_ENDIAN) - bogus endian macros - #endif - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - # It does; now see whether it defined to BIG_ENDIAN or not. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - #include -int -main () -{ -#if BYTE_ORDER != BIG_ENDIAN - not big endian - #endif + ac_fn_c_check_header_mongrel "$LINENO" "openssl/err.h" "ac_cv_header_openssl_err_h" "$ac_includes_default" +if test "x$ac_cv_header_openssl_err_h" = xyes; then : - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_bigendian=yes else - ac_cv_c_bigendian=no + as_fn_error $? "header file is required for OpenSSL" "$LINENO" 5 fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + + fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - fi - if test $ac_cv_c_bigendian = unknown; then - # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) - bogus endian macros - #endif - - ; - return 0; -} +if test "$with_pam" = yes ; then + for ac_header in security/pam_appl.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "security/pam_appl.h" "ac_cv_header_security_pam_appl_h" "$ac_includes_default" +if test "x$ac_cv_header_security_pam_appl_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SECURITY_PAM_APPL_H 1 _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - # It does; now see whether it defined to _BIG_ENDIAN or not. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -int -main () -{ -#ifndef _BIG_ENDIAN - not big endian - #endif - ; - return 0; -} +else + for ac_header in pam/pam_appl.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "pam/pam_appl.h" "ac_cv_header_pam_pam_appl_h" "$ac_includes_default" +if test "x$ac_cv_header_pam_pam_appl_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_PAM_PAM_APPL_H 1 _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_bigendian=yes + else - ac_cv_c_bigendian=no + as_fn_error $? "header file or is required for PAM." "$LINENO" 5 fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +done + fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - fi - if test $ac_cv_c_bigendian = unknown; then - # Compile a test program. - if test "$cross_compiling" = yes; then : - # Try to guess by grepping values from an object file. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -short int ascii_mm[] = - { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; - short int ascii_ii[] = - { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; - int use_ascii (int i) { - return ascii_mm[i] + ascii_ii[i]; - } - short int ebcdic_ii[] = - { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; - short int ebcdic_mm[] = - { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; - int use_ebcdic (int i) { - return ebcdic_mm[i] + ebcdic_ii[i]; - } - extern int foo; -int -main () -{ -return use_ascii (foo) == use_ebcdic (foo); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then - ac_cv_c_bigendian=yes - fi - if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then - if test "$ac_cv_c_bigendian" = unknown; then - ac_cv_c_bigendian=no - else - # finding both strings is unlikely to happen, but who knows? - ac_cv_c_bigendian=unknown - fi - fi +done + fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -$ac_includes_default -int -main () -{ - /* Are we little or big endian? From Harbison&Steele. */ - union - { - long int l; - char c[sizeof (long int)]; - } u; - u.l = 1; - return u.c[sizeof (long int) - 1] == 1; +if test "$with_libxml" = yes ; then + ac_fn_c_check_header_mongrel "$LINENO" "libxml/parser.h" "ac_cv_header_libxml_parser_h" "$ac_includes_default" +if test "x$ac_cv_header_libxml_parser_h" = xyes; then : - ; - return 0; -} -_ACEOF -if ac_fn_c_try_run "$LINENO"; then : - ac_cv_c_bigendian=no else - ac_cv_c_bigendian=yes -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext + as_fn_error $? "header file is required for XML support" "$LINENO" 5 fi - fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 -$as_echo "$ac_cv_c_bigendian" >&6; } - case $ac_cv_c_bigendian in #( - yes) - $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h -;; #( - no) - ;; #( - universal) -$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h +fi - ;; #( - *) - as_fn_error $? "unknown endianness - presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; - esac +if test "$with_libxslt" = yes ; then + ac_fn_c_check_header_mongrel "$LINENO" "libxslt/xslt.h" "ac_cv_header_libxslt_xslt_h" "$ac_includes_default" +if test "x$ac_cv_header_libxslt_xslt_h" = xyes; then : -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 -$as_echo_n "checking for inline... " >&6; } -if ${ac_cv_c_inline+:} false; then : - $as_echo_n "(cached) " >&6 else - ac_cv_c_inline=no -for ac_kw in inline __inline__ __inline; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifndef __cplusplus -typedef int foo_t; -static $ac_kw foo_t static_foo () {return 0; } -$ac_kw foo_t foo () {return 0; } -#endif - -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_inline=$ac_kw + as_fn_error $? "header file is required for XSLT support" "$LINENO" 5 fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - test "$ac_cv_c_inline" != no && break -done + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 -$as_echo "$ac_cv_c_inline" >&6; } -case $ac_cv_c_inline in - inline | yes) ;; - *) - case $ac_cv_c_inline in - no) ac_val=;; - *) ac_val=$ac_cv_c_inline;; - esac - cat >>confdefs.h <<_ACEOF -#ifndef __cplusplus -#define inline $ac_val -#endif +if test "$with_ldap" = yes ; then + if test "$PORTNAME" != "win32"; then + for ac_header in ldap.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "ldap.h" "ac_cv_header_ldap_h" "$ac_includes_default" +if test "x$ac_cv_header_ldap_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LDAP_H 1 _ACEOF - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for quiet inline (no complaint if unreferenced)" >&5 -$as_echo_n "checking for quiet inline (no complaint if unreferenced)... " >&6; } -if ${pgac_cv_c_inline_quietly+:} false; then : - $as_echo_n "(cached) " >&6 else - pgac_cv_c_inline_quietly=no - if test "$ac_cv_c_inline" != no; then - pgac_c_inline_save_werror=$ac_c_werror_flag - ac_c_werror_flag=yes - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include "$srcdir/config/test_quiet_include.h" -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_c_inline_quietly=yes -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - ac_c_werror_flag=$pgac_c_inline_save_werror - fi + as_fn_error $? "header file is required for LDAP" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_c_inline_quietly" >&5 -$as_echo "$pgac_cv_c_inline_quietly" >&6; } -if test "$pgac_cv_c_inline_quietly" != no; then - -cat >>confdefs.h <<_ACEOF -#define PG_USE_INLINE 1 -_ACEOF -fi +done -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for printf format archetype" >&5 -$as_echo_n "checking for printf format archetype... " >&6; } -if ${pgac_cv_printf_archetype+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for compatible LDAP implementation" >&5 +$as_echo_n "checking for compatible LDAP implementation... " >&6; } +if ${pgac_cv_ldap_safe+:} false; then : $as_echo_n "(cached) " >&6 else - ac_save_c_werror_flag=$ac_c_werror_flag -ac_c_werror_flag=yes -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -extern int -pgac_write(int ignore, const char *fmt,...) -__attribute__((format(gnu_printf, 2, 3))); +#include +#if !defined(LDAP_VENDOR_VERSION) || \ + (defined(LDAP_API_FEATURE_X_OPENLDAP) && \ + LDAP_VENDOR_VERSION >= 20424 && LDAP_VENDOR_VERSION <= 20431) +choke me +#endif int main () { @@ -10250,2187 +10424,2181 @@ main () } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_printf_archetype=gnu_printf + pgac_cv_ldap_safe=yes else - pgac_cv_printf_archetype=printf + pgac_cv_ldap_safe=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -ac_c_werror_flag=$ac_save_c_werror_flag fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_printf_archetype" >&5 -$as_echo "$pgac_cv_printf_archetype" >&6; } - -cat >>confdefs.h <<_ACEOF -#define PG_PRINTF_ATTRIBUTE $pgac_cv_printf_archetype -_ACEOF - +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_ldap_safe" >&5 +$as_echo "$pgac_cv_ldap_safe" >&6; } +if test "$pgac_cv_ldap_safe" != yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: +*** With OpenLDAP versions 2.4.24 through 2.4.31, inclusive, each backend +*** process that loads libpq (via WAL receiver, dblink, or postgres_fdw) and +*** also uses LDAP will crash on exit." >&5 +$as_echo "$as_me: WARNING: +*** With OpenLDAP versions 2.4.24 through 2.4.31, inclusive, each backend +*** process that loads libpq (via WAL receiver, dblink, or postgres_fdw) and +*** also uses LDAP will crash on exit." >&2;} +fi + else + for ac_header in winldap.h +do : + ac_fn_c_check_header_compile "$LINENO" "winldap.h" "ac_cv_header_winldap_h" "$ac_includes_default +#include - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for flexible array members" >&5 -$as_echo_n "checking for flexible array members... " >&6; } -if ${ac_cv_c_flexmember+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - #include - #include - struct s { int n; double d[]; }; -int -main () -{ -int m = getchar (); - struct s *p = malloc (offsetof (struct s, d) - + m * sizeof (double)); - p->d[0] = 0.0; - return p->d != (double *) NULL; - ; - return 0; -} +" +if test "x$ac_cv_header_winldap_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_WINLDAP_H 1 _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_c_flexmember=yes + else - ac_cv_c_flexmember=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + as_fn_error $? "header file is required for LDAP" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_flexmember" >&5 -$as_echo "$ac_cv_c_flexmember" >&6; } - if test $ac_cv_c_flexmember = yes; then -$as_echo "#define FLEXIBLE_ARRAY_MEMBER /**/" >>confdefs.h - - else - $as_echo "#define FLEXIBLE_ARRAY_MEMBER 1" >>confdefs.h +done fi +fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for signed types" >&5 -$as_echo_n "checking for signed types... " >&6; } -if ${pgac_cv_c_signed+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +if test "$with_bonjour" = yes ; then + ac_fn_c_check_header_mongrel "$LINENO" "dns_sd.h" "ac_cv_header_dns_sd_h" "$ac_includes_default" +if test "x$ac_cv_header_dns_sd_h" = xyes; then : -int -main () -{ -signed char c; signed short s; signed int i; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_c_signed=yes else - pgac_cv_c_signed=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + as_fn_error $? "header file is required for Bonjour" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_c_signed" >&5 -$as_echo "$pgac_cv_c_signed" >&6; } -if test x"$pgac_cv_c_signed" = xno ; then -$as_echo "#define signed /**/" >>confdefs.h fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __func__" >&5 -$as_echo_n "checking for __func__... " >&6; } -if ${pgac_cv_funcname_func_support+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + +# for contrib/uuid-ossp +if test "$with_uuid" = bsd ; then + for ac_header in uuid.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "uuid.h" "ac_cv_header_uuid_h" "$ac_includes_default" +if test "x$ac_cv_header_uuid_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_UUID_H 1 +_ACEOF + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -int -main () -{ -printf("%s\n", __func__); - ; - return 0; -} +#include + _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_funcname_func_support=yes +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "uuid_to_string" >/dev/null 2>&1; then : + else - pgac_cv_funcname_func_support=no + as_fn_error $? "header file does not match BSD UUID library" "$LINENO" 5 fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f conftest* + +else + as_fn_error $? "header file is required for BSD UUID" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_funcname_func_support" >&5 -$as_echo "$pgac_cv_funcname_func_support" >&6; } -if test x"$pgac_cv_funcname_func_support" = xyes ; then -$as_echo "#define HAVE_FUNCNAME__FUNC 1" >>confdefs.h +done -else -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __FUNCTION__" >&5 -$as_echo_n "checking for __FUNCTION__... " >&6; } -if ${pgac_cv_funcname_function_support+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext +elif test "$with_uuid" = e2fs ; then + for ac_header in uuid/uuid.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "uuid/uuid.h" "ac_cv_header_uuid_uuid_h" "$ac_includes_default" +if test "x$ac_cv_header_uuid_uuid_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_UUID_UUID_H 1 +_ACEOF + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -int -main () -{ -printf("%s\n", __FUNCTION__); - ; - return 0; -} +#include + _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_funcname_function_support=yes +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "uuid_generate" >/dev/null 2>&1; then : + else - pgac_cv_funcname_function_support=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + as_fn_error $? "header file does not match E2FS UUID library" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_funcname_function_support" >&5 -$as_echo "$pgac_cv_funcname_function_support" >&6; } -if test x"$pgac_cv_funcname_function_support" = xyes ; then - -$as_echo "#define HAVE_FUNCNAME__FUNCTION 1" >>confdefs.h +rm -f conftest* -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Static_assert" >&5 -$as_echo_n "checking for _Static_assert... " >&6; } -if ${pgac_cv__static_assert+:} false; then : - $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + for ac_header in uuid.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "uuid.h" "ac_cv_header_uuid_h" "$ac_includes_default" +if test "x$ac_cv_header_uuid_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_UUID_H 1 +_ACEOF + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include -int -main () -{ -({ _Static_assert(1, "foo"); }) - ; - return 0; -} _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv__static_assert=yes +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "uuid_generate" >/dev/null 2>&1; then : + else - pgac_cv__static_assert=no + as_fn_error $? "header file does not match E2FS UUID library" "$LINENO" 5 fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext +rm -f conftest* + +else + as_fn_error $? "header file or is required for E2FS UUID" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__static_assert" >&5 -$as_echo "$pgac_cv__static_assert" >&6; } -if test x"$pgac_cv__static_assert" = xyes ; then -$as_echo "#define HAVE__STATIC_ASSERT 1" >>confdefs.h +done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_types_compatible_p" >&5 -$as_echo_n "checking for __builtin_types_compatible_p... " >&6; } -if ${pgac_cv__types_compatible+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + +done + +elif test "$with_uuid" = ossp ; then + for ac_header in ossp/uuid.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "ossp/uuid.h" "ac_cv_header_ossp_uuid_h" "$ac_includes_default" +if test "x$ac_cv_header_ossp_uuid_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_OSSP_UUID_H 1 +_ACEOF + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include -int -main () -{ - int x; static int y[__builtin_types_compatible_p(__typeof__(x), int)]; - ; - return 0; -} _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv__types_compatible=yes +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "uuid_export" >/dev/null 2>&1; then : + else - pgac_cv__types_compatible=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + as_fn_error $? "header file does not match OSSP UUID library" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__types_compatible" >&5 -$as_echo "$pgac_cv__types_compatible" >&6; } -if test x"$pgac_cv__types_compatible" = xyes ; then - -$as_echo "#define HAVE__BUILTIN_TYPES_COMPATIBLE_P 1" >>confdefs.h +rm -f conftest* -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap32" >&5 -$as_echo_n "checking for __builtin_bswap32... " >&6; } -if ${pgac_cv__builtin_bswap32+:} false; then : - $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + for ac_header in uuid.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "uuid.h" "ac_cv_header_uuid_h" "$ac_includes_default" +if test "x$ac_cv_header_uuid_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_UUID_H 1 +_ACEOF + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -static unsigned long int x = __builtin_bswap32(0xaabbccdd); -int -main () -{ +#include - ; - return 0; -} _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv__builtin_bswap32=yes +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "uuid_export" >/dev/null 2>&1; then : + else - pgac_cv__builtin_bswap32=no + as_fn_error $? "header file does not match OSSP UUID library" "$LINENO" 5 fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f conftest* + +else + as_fn_error $? "header file or is required for OSSP UUID" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap32" >&5 -$as_echo "$pgac_cv__builtin_bswap32" >&6; } -if test x"$pgac_cv__builtin_bswap32" = xyes ; then -$as_echo "#define HAVE__BUILTIN_BSWAP32 1" >>confdefs.h +done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_constant_p" >&5 -$as_echo_n "checking for __builtin_constant_p... " >&6; } -if ${pgac_cv__builtin_constant_p+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -static int x; static int y[__builtin_constant_p(x) ? x : 1]; -int -main () -{ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv__builtin_constant_p=yes -else - pgac_cv__builtin_constant_p=no +done + fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test "$PORTNAME" = "win32" ; then + for ac_header in crtdefs.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "crtdefs.h" "ac_cv_header_crtdefs_h" "$ac_includes_default" +if test "x$ac_cv_header_crtdefs_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_CRTDEFS_H 1 +_ACEOF + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_constant_p" >&5 -$as_echo "$pgac_cv__builtin_constant_p" >&6; } -if test x"$pgac_cv__builtin_constant_p" = xyes ; then -$as_echo "#define HAVE__BUILTIN_CONSTANT_P 1" >>confdefs.h +done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_unreachable" >&5 -$as_echo_n "checking for __builtin_unreachable... " >&6; } -if ${pgac_cv__builtin_unreachable+:} false; then : + +## +## Types, structures, compiler characteristics +## + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether byte ordering is bigendian" >&5 +$as_echo_n "checking whether byte ordering is bigendian... " >&6; } +if ${ac_cv_c_bigendian+:} false; then : $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_cv_c_bigendian=unknown + # See if we're dealing with a universal compiler. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#ifndef __APPLE_CC__ + not a universal capable compiler + #endif + typedef int dummy; -int -main () -{ -__builtin_unreachable(); - ; - return 0; -} _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv__builtin_unreachable=yes -else - pgac_cv__builtin_unreachable=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_unreachable" >&5 -$as_echo "$pgac_cv__builtin_unreachable" >&6; } -if test x"$pgac_cv__builtin_unreachable" = xyes ; then - -$as_echo "#define HAVE__BUILTIN_UNREACHABLE 1" >>confdefs.h +if ac_fn_c_try_compile "$LINENO"; then : + # Check for potential -arch flags. It is not universal unless + # there are at least two -arch flags with different values. + ac_arch= + ac_prev= + for ac_word in $CC $CFLAGS $CPPFLAGS $LDFLAGS; do + if test -n "$ac_prev"; then + case $ac_word in + i?86 | x86_64 | ppc | ppc64) + if test -z "$ac_arch" || test "$ac_arch" = "$ac_word"; then + ac_arch=$ac_word + else + ac_cv_c_bigendian=universal + break + fi + ;; + esac + ac_prev= + elif test "x$ac_word" = "x-arch"; then + ac_prev=arch + fi + done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __VA_ARGS__" >&5 -$as_echo_n "checking for __VA_ARGS__... " >&6; } -if ${pgac_cv__va_args+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + if test $ac_cv_c_bigendian = unknown; then + # See if sys/param.h defines the BYTE_ORDER macro. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +#include + #include + int main () { -#define debug(...) fprintf(stderr, __VA_ARGS__) -debug("%s", "blarg"); +#if ! (defined BYTE_ORDER && defined BIG_ENDIAN \ + && defined LITTLE_ENDIAN && BYTE_ORDER && BIG_ENDIAN \ + && LITTLE_ENDIAN) + bogus endian macros + #endif ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv__va_args=yes -else - pgac_cv__va_args=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__va_args" >&5 -$as_echo "$pgac_cv__va_args" >&6; } -if test x"$pgac_cv__va_args" = xyes ; then - -$as_echo "#define HAVE__VA_ARGS 1" >>confdefs.h - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 -$as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } -if ${ac_cv_struct_tm+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + # It does; now see whether it defined to BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ #include -#include + #include int main () { -struct tm tm; - int *p = &tm.tm_sec; - return !p; +#if BYTE_ORDER != BIG_ENDIAN + not big endian + #endif + ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_struct_tm=time.h + ac_cv_c_bigendian=yes else - ac_cv_struct_tm=sys/time.h + ac_cv_c_bigendian=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 -$as_echo "$ac_cv_struct_tm" >&6; } -if test $ac_cv_struct_tm = sys/time.h; then - -$as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h - -fi - -ac_fn_c_check_member "$LINENO" "struct tm" "tm_zone" "ac_cv_member_struct_tm_tm_zone" "#include -#include <$ac_cv_struct_tm> +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # See if defines _LITTLE_ENDIAN or _BIG_ENDIAN (e.g., Solaris). + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include -" -if test "x$ac_cv_member_struct_tm_tm_zone" = xyes; then : +int +main () +{ +#if ! (defined _LITTLE_ENDIAN || defined _BIG_ENDIAN) + bogus endian macros + #endif -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_TM_TM_ZONE 1 + ; + return 0; +} _ACEOF - - -fi - -if test "$ac_cv_member_struct_tm_tm_zone" = yes; then - -$as_echo "#define HAVE_TM_ZONE 1" >>confdefs.h - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tzname" >&5 -$as_echo_n "checking for tzname... " >&6; } -if ${ac_cv_var_tzname+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext +if ac_fn_c_try_compile "$LINENO"; then : + # It does; now see whether it defined to _BIG_ENDIAN or not. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#ifndef tzname /* For SGI. */ -extern char *tzname[]; /* RS6000 and others reject char **tzname. */ -#endif +#include int main () { -atoi(*tzname); +#ifndef _BIG_ENDIAN + not big endian + #endif + ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_var_tzname=yes +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_bigendian=yes else - ac_cv_var_tzname=no + ac_cv_c_bigendian=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_var_tzname" >&5 -$as_echo "$ac_cv_var_tzname" >&6; } -if test $ac_cv_var_tzname = yes; then - -$as_echo "#define HAVE_TZNAME 1" >>confdefs.h +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + fi + if test $ac_cv_c_bigendian = unknown; then + # Compile a test program. + if test "$cross_compiling" = yes; then : + # Try to guess by grepping values from an object file. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +short int ascii_mm[] = + { 0x4249, 0x4765, 0x6E44, 0x6961, 0x6E53, 0x7953, 0 }; + short int ascii_ii[] = + { 0x694C, 0x5454, 0x656C, 0x6E45, 0x6944, 0x6E61, 0 }; + int use_ascii (int i) { + return ascii_mm[i] + ascii_ii[i]; + } + short int ebcdic_ii[] = + { 0x89D3, 0xE3E3, 0x8593, 0x95C5, 0x89C4, 0x9581, 0 }; + short int ebcdic_mm[] = + { 0xC2C9, 0xC785, 0x95C4, 0x8981, 0x95E2, 0xA8E2, 0 }; + int use_ebcdic (int i) { + return ebcdic_mm[i] + ebcdic_ii[i]; + } + extern int foo; +int +main () +{ +return use_ascii (foo) == use_ebcdic (foo); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + if grep BIGenDianSyS conftest.$ac_objext >/dev/null; then + ac_cv_c_bigendian=yes + fi + if grep LiTTleEnDian conftest.$ac_objext >/dev/null ; then + if test "$ac_cv_c_bigendian" = unknown; then + ac_cv_c_bigendian=no + else + # finding both strings is unlikely to happen, but who knows? + ac_cv_c_bigendian=unknown + fi + fi fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ -ac_fn_c_check_type "$LINENO" "union semun" "ac_cv_type_union_semun" "#include -#include -#include -" -if test "x$ac_cv_type_union_semun" = xyes; then : + /* Are we little or big endian? From Harbison&Steele. */ + union + { + long int l; + char c[sizeof (long int)]; + } u; + u.l = 1; + return u.c[sizeof (long int) - 1] == 1; -cat >>confdefs.h <<_ACEOF -#define HAVE_UNION_SEMUN 1 + ; + return 0; +} _ACEOF - - +if ac_fn_c_try_run "$LINENO"; then : + ac_cv_c_bigendian=no +else + ac_cv_c_bigendian=yes +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext fi -ac_fn_c_check_type "$LINENO" "struct sockaddr_un" "ac_cv_type_struct_sockaddr_un" "#include -#ifdef HAVE_SYS_UN_H -#include -#endif - -" -if test "x$ac_cv_type_struct_sockaddr_un" = xyes; then : + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_bigendian" >&5 +$as_echo "$ac_cv_c_bigendian" >&6; } + case $ac_cv_c_bigendian in #( + yes) + $as_echo "#define WORDS_BIGENDIAN 1" >>confdefs.h +;; #( + no) + ;; #( + universal) -$as_echo "#define HAVE_UNIX_SOCKETS 1" >>confdefs.h +$as_echo "#define AC_APPLE_UNIVERSAL_BUILD 1" >>confdefs.h -fi + ;; #( + *) + as_fn_error $? "unknown endianness + presetting ac_cv_c_bigendian=no (or yes) will help" "$LINENO" 5 ;; + esac -ac_fn_c_check_type "$LINENO" "struct sockaddr_storage" "ac_cv_type_struct_sockaddr_storage" "#include -#ifdef HAVE_SYS_SOCKET_H -#include +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for inline" >&5 +$as_echo_n "checking for inline... " >&6; } +if ${ac_cv_c_inline+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_c_inline=no +for ac_kw in inline __inline__ __inline; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifndef __cplusplus +typedef int foo_t; +static $ac_kw foo_t static_foo () {return 0; } +$ac_kw foo_t foo () {return 0; } #endif -" -if test "x$ac_cv_type_struct_sockaddr_storage" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_SOCKADDR_STORAGE 1 _ACEOF - +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_c_inline=$ac_kw +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test "$ac_cv_c_inline" != no && break +done fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_inline" >&5 +$as_echo "$ac_cv_c_inline" >&6; } -ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "ss_family" "ac_cv_member_struct_sockaddr_storage_ss_family" "#include -#ifdef HAVE_SYS_SOCKET_H -#include +case $ac_cv_c_inline in + inline | yes) ;; + *) + case $ac_cv_c_inline in + no) ac_val=;; + *) ac_val=$ac_cv_c_inline;; + esac + cat >>confdefs.h <<_ACEOF +#ifndef __cplusplus +#define inline $ac_val #endif - -" -if test "x$ac_cv_member_struct_sockaddr_storage_ss_family" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY 1 _ACEOF + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for quiet inline (no complaint if unreferenced)" >&5 +$as_echo_n "checking for quiet inline (no complaint if unreferenced)... " >&6; } +if ${pgac_cv_c_inline_quietly+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_cv_c_inline_quietly=no + if test "$ac_cv_c_inline" != no; then + pgac_c_inline_save_werror=$ac_c_werror_flag + ac_c_werror_flag=yes + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include "$srcdir/config/test_quiet_include.h" +int +main () +{ + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_c_inline_quietly=yes fi -ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "__ss_family" "ac_cv_member_struct_sockaddr_storage___ss_family" "#include -#ifdef HAVE_SYS_SOCKET_H -#include -#endif - -" -if test "x$ac_cv_member_struct_sockaddr_storage___ss_family" = xyes; then : +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_c_werror_flag=$pgac_c_inline_save_werror + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_c_inline_quietly" >&5 +$as_echo "$pgac_cv_c_inline_quietly" >&6; } +if test "$pgac_cv_c_inline_quietly" != no; then cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY 1 +#define PG_USE_INLINE 1 _ACEOF - fi -ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "ss_len" "ac_cv_member_struct_sockaddr_storage_ss_len" "#include -#ifdef HAVE_SYS_SOCKET_H -#include -#endif -" -if test "x$ac_cv_member_struct_sockaddr_storage_ss_len" = xyes; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for printf format archetype" >&5 +$as_echo_n "checking for printf format archetype... " >&6; } +if ${pgac_cv_printf_archetype+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +extern int +pgac_write(int ignore, const char *fmt,...) +__attribute__((format(gnu_printf, 2, 3))); +int +main () +{ -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN 1 + ; + return 0; +} _ACEOF - - +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_printf_archetype=gnu_printf +else + pgac_cv_printf_archetype=printf fi -ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "__ss_len" "ac_cv_member_struct_sockaddr_storage___ss_len" "#include -#ifdef HAVE_SYS_SOCKET_H -#include -#endif - -" -if test "x$ac_cv_member_struct_sockaddr_storage___ss_len" = xyes; then : +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_printf_archetype" >&5 +$as_echo "$pgac_cv_printf_archetype" >&6; } cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_SOCKADDR_STORAGE___SS_LEN 1 +#define PG_PRINTF_ATTRIBUTE $pgac_cv_printf_archetype _ACEOF -fi -ac_fn_c_check_member "$LINENO" "struct sockaddr" "sa_len" "ac_cv_member_struct_sockaddr_sa_len" "#include -#ifdef HAVE_SYS_SOCKET_H -#include -#endif - -" -if test "x$ac_cv_member_struct_sockaddr_sa_len" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 -_ACEOF - - -fi - -ac_fn_c_check_type "$LINENO" "struct addrinfo" "ac_cv_type_struct_addrinfo" "#include -#include -#include - -" -if test "x$ac_cv_type_struct_addrinfo" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_ADDRINFO 1 -_ACEOF - - -fi - - - ac_fn_c_check_type "$LINENO" "intptr_t" "ac_cv_type_intptr_t" "$ac_includes_default" -if test "x$ac_cv_type_intptr_t" = xyes; then : - -$as_echo "#define HAVE_INTPTR_T 1" >>confdefs.h + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for flexible array members" >&5 +$as_echo_n "checking for flexible array members... " >&6; } +if ${ac_cv_c_flexmember+:} false; then : + $as_echo_n "(cached) " >&6 else - for ac_type in 'int' 'long int' 'long long int'; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -$ac_includes_default +#include + #include + #include + struct s { int n; double d[]; }; int main () { -static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; -test_array [0] = 0; -return test_array [0]; - +int m = getchar (); + struct s *p = malloc (offsetof (struct s, d) + + m * sizeof (double)); + p->d[0] = 0.0; + return p->d != (double *) NULL; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - -cat >>confdefs.h <<_ACEOF -#define intptr_t $ac_type -_ACEOF - - ac_type= + ac_cv_c_flexmember=yes +else + ac_cv_c_flexmember=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - test -z "$ac_type" && break - done fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_flexmember" >&5 +$as_echo "$ac_cv_c_flexmember" >&6; } + if test $ac_cv_c_flexmember = yes; then +$as_echo "#define FLEXIBLE_ARRAY_MEMBER /**/" >>confdefs.h + else + $as_echo "#define FLEXIBLE_ARRAY_MEMBER 1" >>confdefs.h - ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "$ac_includes_default" -if test "x$ac_cv_type_uintptr_t" = xyes; then : - -$as_echo "#define HAVE_UINTPTR_T 1" >>confdefs.h + fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for signed types" >&5 +$as_echo_n "checking for signed types... " >&6; } +if ${pgac_cv_c_signed+:} false; then : + $as_echo_n "(cached) " >&6 else - for ac_type in 'unsigned int' 'unsigned long int' \ - 'unsigned long long int'; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -$ac_includes_default + int main () { -static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; -test_array [0] = 0; -return test_array [0]; - +signed char c; signed short s; signed int i; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - -cat >>confdefs.h <<_ACEOF -#define uintptr_t $ac_type -_ACEOF - - ac_type= + pgac_cv_c_signed=yes +else + pgac_cv_c_signed=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - test -z "$ac_type" && break - done fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_c_signed" >&5 +$as_echo "$pgac_cv_c_signed" >&6; } +if test x"$pgac_cv_c_signed" = xno ; then +$as_echo "#define signed /**/" >>confdefs.h - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unsigned long long int" >&5 -$as_echo_n "checking for unsigned long long int... " >&6; } -if ${ac_cv_type_unsigned_long_long_int+:} false; then : +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __func__" >&5 +$as_echo_n "checking for __func__... " >&6; } +if ${pgac_cv_funcname_func_support+:} false; then : $as_echo_n "(cached) " >&6 else - ac_cv_type_unsigned_long_long_int=yes - if test "x${ac_cv_prog_cc_c99-no}" = xno; then - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - - /* For now, do not test the preprocessor; as of 2007 there are too many - implementations with broken preprocessors. Perhaps this can - be revisited in 2012. In the meantime, code should not expect - #if to work with literals wider than 32 bits. */ - /* Test literals. */ - long long int ll = 9223372036854775807ll; - long long int nll = -9223372036854775807LL; - unsigned long long int ull = 18446744073709551615ULL; - /* Test constant expressions. */ - typedef int a[((-9223372036854775807LL < 0 && 0 < 9223372036854775807ll) - ? 1 : -1)]; - typedef int b[(18446744073709551615ULL <= (unsigned long long int) -1 - ? 1 : -1)]; - int i = 63; +#include int main () { -/* Test availability of runtime routines for shift and division. */ - long long int llmax = 9223372036854775807ll; - unsigned long long int ullmax = 18446744073709551615ull; - return ((ll << 63) | (ll >> 63) | (ll < i) | (ll > i) - | (llmax / ll) | (llmax % ll) - | (ull << 63) | (ull >> 63) | (ull << i) | (ull >> i) - | (ullmax / ull) | (ullmax % ull)); +printf("%s\n", __func__); ; return 0; } - _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_funcname_func_support=yes else - ac_cv_type_unsigned_long_long_int=no + pgac_cv_funcname_func_support=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_unsigned_long_long_int" >&5 -$as_echo "$ac_cv_type_unsigned_long_long_int" >&6; } - if test $ac_cv_type_unsigned_long_long_int = yes; then - -$as_echo "#define HAVE_UNSIGNED_LONG_LONG_INT 1" >>confdefs.h - - fi - +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_funcname_func_support" >&5 +$as_echo "$pgac_cv_funcname_func_support" >&6; } +if test x"$pgac_cv_funcname_func_support" = xyes ; then +$as_echo "#define HAVE_FUNCNAME__FUNC 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for long long int" >&5 -$as_echo_n "checking for long long int... " >&6; } -if ${ac_cv_type_long_long_int+:} false; then : - $as_echo_n "(cached) " >&6 else - ac_cv_type_long_long_int=yes - if test "x${ac_cv_prog_cc_c99-no}" = xno; then - ac_cv_type_long_long_int=$ac_cv_type_unsigned_long_long_int - if test $ac_cv_type_long_long_int = yes; then - if test "$cross_compiling" = yes; then : - : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __FUNCTION__" >&5 +$as_echo_n "checking for __FUNCTION__... " >&6; } +if ${pgac_cv_funcname_function_support+:} false; then : + $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include - #ifndef LLONG_MAX - # define HALF \ - (1LL << (sizeof (long long int) * CHAR_BIT - 2)) - # define LLONG_MAX (HALF - 1 + HALF) - #endif +#include int main () { -long long int n = 1; - int i; - for (i = 0; ; i++) - { - long long int m = n << i; - if (m >> i != n) - return 1; - if (LLONG_MAX / 2 < m) - break; - } - return 0; +printf("%s\n", __FUNCTION__); ; return 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : - +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_funcname_function_support=yes else - ac_cv_type_long_long_int=no -fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext + pgac_cv_funcname_function_support=no fi - - fi - fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_long_long_int" >&5 -$as_echo "$ac_cv_type_long_long_int" >&6; } - if test $ac_cv_type_long_long_int = yes; then - -$as_echo "#define HAVE_LONG_LONG_INT 1" >>confdefs.h - - fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_funcname_function_support" >&5 +$as_echo "$pgac_cv_funcname_function_support" >&6; } +if test x"$pgac_cv_funcname_function_support" = xyes ; then +$as_echo "#define HAVE_FUNCNAME__FUNCTION 1" >>confdefs.h -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for locale_t" >&5 -$as_echo_n "checking for locale_t... " >&6; } -if ${pgac_cv_type_locale_t+:} false; then : +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for _Static_assert" >&5 +$as_echo_n "checking for _Static_assert... " >&6; } +if ${pgac_cv__static_assert+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -locale_t x; + int main () { - +({ _Static_assert(1, "foo"); }) ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_type_locale_t=yes +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv__static_assert=yes +else + pgac_cv__static_assert=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__static_assert" >&5 +$as_echo "$pgac_cv__static_assert" >&6; } +if test x"$pgac_cv__static_assert" = xyes ; then + +$as_echo "#define HAVE__STATIC_ASSERT 1" >>confdefs.h + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_types_compatible_p" >&5 +$as_echo_n "checking for __builtin_types_compatible_p... " >&6; } +if ${pgac_cv__types_compatible+:} false; then : + $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -locale_t x; + int main () { - + int x; static int y[__builtin_types_compatible_p(__typeof__(x), int)]; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_type_locale_t='yes (in xlocale.h)' + pgac_cv__types_compatible=yes else - pgac_cv_type_locale_t=no + pgac_cv__types_compatible=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_type_locale_t" >&5 -$as_echo "$pgac_cv_type_locale_t" >&6; } -if test "$pgac_cv_type_locale_t" != no; then - -$as_echo "#define HAVE_LOCALE_T 1" >>confdefs.h - -fi -if test "$pgac_cv_type_locale_t" = 'yes (in xlocale.h)'; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__types_compatible" >&5 +$as_echo "$pgac_cv__types_compatible" >&6; } +if test x"$pgac_cv__types_compatible" = xyes ; then -$as_echo "#define LOCALE_T_IN_XLOCALE 1" >>confdefs.h +$as_echo "#define HAVE__BUILTIN_TYPES_COMPATIBLE_P 1" >>confdefs.h fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_bswap32" >&5 +$as_echo_n "checking for __builtin_bswap32... " >&6; } +if ${pgac_cv__builtin_bswap32+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +static unsigned long int x = __builtin_bswap32(0xaabbccdd); +int +main () +{ -ac_fn_c_check_type "$LINENO" "struct cmsgcred" "ac_cv_type_struct_cmsgcred" "#include -#include -#ifdef HAVE_SYS_UCRED_H -#include -#endif -" -if test "x$ac_cv_type_struct_cmsgcred" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_CMSGCRED 1 + ; + return 0; +} _ACEOF - - +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv__builtin_bswap32=yes +else + pgac_cv__builtin_bswap32=no fi - - -ac_fn_c_check_type "$LINENO" "struct option" "ac_cv_type_struct_option" "#ifdef HAVE_GETOPT_H -#include -#endif -" -if test "x$ac_cv_type_struct_option" = xyes; then : - -cat >>confdefs.h <<_ACEOF -#define HAVE_STRUCT_OPTION 1 -_ACEOF - - +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_bswap32" >&5 +$as_echo "$pgac_cv__builtin_bswap32" >&6; } +if test x"$pgac_cv__builtin_bswap32" = xyes ; then - -if test "$with_zlib" = yes; then - # Check that defines z_streamp (versions before about 1.0.4 - # did not). While we could work around the lack of z_streamp, it - # seems unwise to encourage people to use such old zlib versions... - ac_fn_c_check_type "$LINENO" "z_streamp" "ac_cv_type_z_streamp" "#include -" -if test "x$ac_cv_type_z_streamp" = xyes; then : - -else - as_fn_error $? "zlib version is too old -Use --without-zlib to disable zlib support." "$LINENO" 5 -fi +$as_echo "#define HAVE__BUILTIN_BSWAP32 1" >>confdefs.h fi - -# On PPC, check if assembler supports LWARX instruction's mutex hint bit -case $host_cpu in - ppc*|powerpc*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether assembler supports lwarx hint bit" >&5 -$as_echo_n "checking whether assembler supports lwarx hint bit... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_constant_p" >&5 +$as_echo_n "checking for __builtin_constant_p... " >&6; } +if ${pgac_cv__builtin_constant_p+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - +static int x; static int y[__builtin_constant_p(x) ? x : 1]; int main () { -int a = 0; int *p = &a; int r; - __asm__ __volatile__ (" lwarx %0,0,%1,1\n" : "=&r"(r) : "r"(p)); + ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_have_ppc_mutex_hint=yes + pgac_cv__builtin_constant_p=yes else - pgac_cv_have_ppc_mutex_hint=no + pgac_cv__builtin_constant_p=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_have_ppc_mutex_hint" >&5 -$as_echo "$pgac_cv_have_ppc_mutex_hint" >&6; } - if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then - -$as_echo "#define HAVE_PPC_LWARX_MUTEX_HINT 1" >>confdefs.h - - fi - ;; -esac - -# Check largefile support. You might think this is a system service not a -# compiler characteristic, but you'd be wrong. We must check this before -# probing existence of related functions such as fseeko, since the largefile -# defines can affect what is generated for that. -if test "$PORTNAME" != "win32"; then - # Check whether --enable-largefile was given. -if test "${enable_largefile+set}" = set; then : - enableval=$enable_largefile; fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_constant_p" >&5 +$as_echo "$pgac_cv__builtin_constant_p" >&6; } +if test x"$pgac_cv__builtin_constant_p" = xyes ; then -if test "$enable_largefile" != no; then +$as_echo "#define HAVE__BUILTIN_CONSTANT_P 1" >>confdefs.h - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 -$as_echo_n "checking for special C compiler options needed for large files... " >&6; } -if ${ac_cv_sys_largefile_CC+:} false; then : +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __builtin_unreachable" >&5 +$as_echo_n "checking for __builtin_unreachable... " >&6; } +if ${pgac_cv__builtin_unreachable+:} false; then : $as_echo_n "(cached) " >&6 else - ac_cv_sys_largefile_CC=no - if test "$GCC" != yes; then - ac_save_CC=$CC - while :; do - # IRIX 6.2 and later do not support large files by default, - # so use the C compiler's -n32 option if that helps. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; + int main () { - +__builtin_unreachable(); ; return 0; } _ACEOF - if ac_fn_c_try_compile "$LINENO"; then : - break -fi -rm -f core conftest.err conftest.$ac_objext - CC="$CC -n32" - if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_largefile_CC=' -n32'; break +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv__builtin_unreachable=yes +else + pgac_cv__builtin_unreachable=no fi -rm -f core conftest.err conftest.$ac_objext - break - done - CC=$ac_save_CC - rm -f conftest.$ac_ext - fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 -$as_echo "$ac_cv_sys_largefile_CC" >&6; } - if test "$ac_cv_sys_largefile_CC" != no; then - CC=$CC$ac_cv_sys_largefile_CC - fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__builtin_unreachable" >&5 +$as_echo "$pgac_cv__builtin_unreachable" >&6; } +if test x"$pgac_cv__builtin_unreachable" = xyes ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 -$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } -if ${ac_cv_sys_file_offset_bits+:} false; then : +$as_echo "#define HAVE__BUILTIN_UNREACHABLE 1" >>confdefs.h + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for __VA_ARGS__" >&5 +$as_echo_n "checking for __VA_ARGS__... " >&6; } +if ${pgac_cv__va_args+:} false; then : $as_echo_n "(cached) " >&6 else - while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; +#include int main () { +#define debug(...) fprintf(stderr, __VA_ARGS__) +debug("%s", "blarg"); ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_file_offset_bits=no; break + pgac_cv__va_args=yes +else + pgac_cv__va_args=no fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv__va_args" >&5 +$as_echo "$pgac_cv__va_args" >&6; } +if test x"$pgac_cv__va_args" = xyes ; then + +$as_echo "#define HAVE__VA_ARGS 1" >>confdefs.h + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether struct tm is in sys/time.h or time.h" >&5 +$as_echo_n "checking whether struct tm is in sys/time.h or time.h... " >&6; } +if ${ac_cv_struct_tm+:} false; then : + $as_echo_n "(cached) " >&6 +else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#define _FILE_OFFSET_BITS 64 #include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; +#include + int main () { - +struct tm tm; + int *p = &tm.tm_sec; + return !p; ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_file_offset_bits=64; break + ac_cv_struct_tm=time.h +else + ac_cv_struct_tm=sys/time.h fi rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_cv_sys_file_offset_bits=unknown - break -done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 -$as_echo "$ac_cv_sys_file_offset_bits" >&6; } -case $ac_cv_sys_file_offset_bits in #( - no | unknown) ;; - *) +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_struct_tm" >&5 +$as_echo "$ac_cv_struct_tm" >&6; } +if test $ac_cv_struct_tm = sys/time.h; then + +$as_echo "#define TM_IN_SYS_TIME 1" >>confdefs.h + +fi + +ac_fn_c_check_member "$LINENO" "struct tm" "tm_zone" "ac_cv_member_struct_tm_tm_zone" "#include +#include <$ac_cv_struct_tm> + +" +if test "x$ac_cv_member_struct_tm_tm_zone" = xyes; then : + cat >>confdefs.h <<_ACEOF -#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits +#define HAVE_STRUCT_TM_TM_ZONE 1 _ACEOF -;; -esac -rm -rf conftest* - if test $ac_cv_sys_file_offset_bits = unknown; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 -$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } -if ${ac_cv_sys_large_files+:} false; then : + + +fi + +if test "$ac_cv_member_struct_tm_tm_zone" = yes; then + +$as_echo "#define HAVE_TM_ZONE 1" >>confdefs.h + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for tzname" >&5 +$as_echo_n "checking for tzname... " >&6; } +if ${ac_cv_var_tzname+:} false; then : $as_echo_n "(cached) " >&6 else - while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; -int -main () -{ +#include +#ifndef tzname /* For SGI. */ +extern char *tzname[]; /* RS6000 and others reject char **tzname. */ +#endif - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_large_files=no; break -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#define _LARGE_FILES 1 -#include - /* Check that off_t can represent 2**63 - 1 correctly. - We can't simply define LARGE_OFF_T to be 9223372036854775807, - since some C++ compilers masquerading as C compilers - incorrectly reject 9223372036854775807. */ -#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) - int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 - && LARGE_OFF_T % 2147483647 == 1) - ? 1 : -1]; int main () { - +atoi(*tzname); ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_sys_large_files=1; break +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_var_tzname=yes +else + ac_cv_var_tzname=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - ac_cv_sys_large_files=unknown - break -done +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 -$as_echo "$ac_cv_sys_large_files" >&6; } -case $ac_cv_sys_large_files in #( - no | unknown) ;; - *) -cat >>confdefs.h <<_ACEOF -#define _LARGE_FILES $ac_cv_sys_large_files -_ACEOF -;; -esac -rm -rf conftest* - fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_var_tzname" >&5 +$as_echo "$ac_cv_var_tzname" >&6; } +if test $ac_cv_var_tzname = yes; then +$as_echo "#define HAVE_TZNAME 1" >>confdefs.h fi +ac_fn_c_check_type "$LINENO" "union semun" "ac_cv_type_union_semun" "#include +#include +#include +" +if test "x$ac_cv_type_union_semun" = xyes; then : -fi +cat >>confdefs.h <<_ACEOF +#define HAVE_UNION_SEMUN 1 +_ACEOF -# Check for largefile support (must be after AC_SYS_LARGEFILE) -# The cast to long int works around a bug in the HP C Compiler -# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects -# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. -# This bug is HP SR number 8606223364. -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of off_t" >&5 -$as_echo_n "checking size of off_t... " >&6; } -if ${ac_cv_sizeof_off_t+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (off_t))" "ac_cv_sizeof_off_t" "$ac_includes_default"; then : -else - if test "$ac_cv_type_off_t" = yes; then - { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 -$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} -as_fn_error 77 "cannot compute sizeof (off_t) -See \`config.log' for more details" "$LINENO" 5; } - else - ac_cv_sizeof_off_t=0 - fi fi +ac_fn_c_check_type "$LINENO" "struct sockaddr_un" "ac_cv_type_struct_sockaddr_un" "#include +#ifdef HAVE_SYS_UN_H +#include +#endif + +" +if test "x$ac_cv_type_struct_sockaddr_un" = xyes; then : + +$as_echo "#define HAVE_UNIX_SOCKETS 1" >>confdefs.h + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_off_t" >&5 -$as_echo "$ac_cv_sizeof_off_t" >&6; } +ac_fn_c_check_type "$LINENO" "struct sockaddr_storage" "ac_cv_type_struct_sockaddr_storage" "#include +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +" +if test "x$ac_cv_type_struct_sockaddr_storage" = xyes; then : cat >>confdefs.h <<_ACEOF -#define SIZEOF_OFF_T $ac_cv_sizeof_off_t +#define HAVE_STRUCT_SOCKADDR_STORAGE 1 _ACEOF - -# If we don't have largefile support, can't handle segsize >= 2GB. -if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then - as_fn_error $? "Large file support is not enabled. Segment size cannot be larger than 1GB." "$LINENO" 5 fi +ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "ss_family" "ac_cv_member_struct_sockaddr_storage_ss_family" "#include +#ifdef HAVE_SYS_SOCKET_H +#include +#endif -## -## Functions, global variables -## +" +if test "x$ac_cv_member_struct_sockaddr_storage_ss_family" = xyes; then : -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for int timezone" >&5 -$as_echo_n "checking for int timezone... " >&6; } -if ${pgac_cv_var_int_timezone+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int res; -int -main () -{ -#ifndef __CYGWIN__ -res = timezone / 60; -#else -res = _timezone / 60; -#endif - ; - return 0; -} +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_FAMILY 1 _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_int_timezone=yes -else - pgac_cv_var_int_timezone=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_timezone" >&5 -$as_echo "$pgac_cv_var_int_timezone" >&6; } -if test x"$pgac_cv_var_int_timezone" = xyes ; then -$as_echo "#define HAVE_INT_TIMEZONE 1" >>confdefs.h fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking types of arguments for accept()" >&5 -$as_echo_n "checking types of arguments for accept()... " >&6; } - if ${ac_cv_func_accept_return+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ${ac_cv_func_accept_arg1+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ${ac_cv_func_accept_arg2+:} false; then : - $as_echo_n "(cached) " >&6 -else - if ${ac_cv_func_accept_arg3+:} false; then : - $as_echo_n "(cached) " >&6 -else - for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET WSAAPI'; do - for ac_cv_func_accept_arg1 in 'int' 'unsigned int' 'SOCKET'; do - for ac_cv_func_accept_arg2 in 'struct sockaddr *' 'const struct sockaddr *' 'void *'; do - for ac_cv_func_accept_arg3 in 'int' 'size_t' 'socklen_t' 'unsigned int' 'void'; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#ifdef HAVE_SYS_TYPES_H -#include -#endif +ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "__ss_family" "ac_cv_member_struct_sockaddr_storage___ss_family" "#include #ifdef HAVE_SYS_SOCKET_H #include #endif -extern $ac_cv_func_accept_return accept ($ac_cv_func_accept_arg1, $ac_cv_func_accept_arg2, $ac_cv_func_accept_arg3 *); -int -main () -{ - - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_not_found=no; break 4 -else - ac_not_found=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - done - done - done - done - if test "$ac_not_found" = yes; then - as_fn_error $? "could not determine argument types" "$LINENO" 5 - fi - if test "$ac_cv_func_accept_arg3" = "void"; then - ac_cv_func_accept_arg3=int - fi -fi +" +if test "x$ac_cv_member_struct_sockaddr_storage___ss_family" = xyes; then : -fi +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_SOCKADDR_STORAGE___SS_FAMILY 1 +_ACEOF -fi fi - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_accept_return, $ac_cv_func_accept_arg1, $ac_cv_func_accept_arg2, $ac_cv_func_accept_arg3 *" >&5 -$as_echo "$ac_cv_func_accept_return, $ac_cv_func_accept_arg1, $ac_cv_func_accept_arg2, $ac_cv_func_accept_arg3 *" >&6; } - -cat >>confdefs.h <<_ACEOF -#define ACCEPT_TYPE_RETURN $ac_cv_func_accept_return -_ACEOF +ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "ss_len" "ac_cv_member_struct_sockaddr_storage_ss_len" "#include +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +" +if test "x$ac_cv_member_struct_sockaddr_storage_ss_len" = xyes; then : cat >>confdefs.h <<_ACEOF -#define ACCEPT_TYPE_ARG1 $ac_cv_func_accept_arg1 +#define HAVE_STRUCT_SOCKADDR_STORAGE_SS_LEN 1 _ACEOF -cat >>confdefs.h <<_ACEOF -#define ACCEPT_TYPE_ARG2 $ac_cv_func_accept_arg2 +fi +ac_fn_c_check_member "$LINENO" "struct sockaddr_storage" "__ss_len" "ac_cv_member_struct_sockaddr_storage___ss_len" "#include +#ifdef HAVE_SYS_SOCKET_H +#include +#endif + +" +if test "x$ac_cv_member_struct_sockaddr_storage___ss_len" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_SOCKADDR_STORAGE___SS_LEN 1 _ACEOF +fi +ac_fn_c_check_member "$LINENO" "struct sockaddr" "sa_len" "ac_cv_member_struct_sockaddr_sa_len" "#include +#ifdef HAVE_SYS_SOCKET_H +#include +#endif + +" +if test "x$ac_cv_member_struct_sockaddr_sa_len" = xyes; then : + cat >>confdefs.h <<_ACEOF -#define ACCEPT_TYPE_ARG3 $ac_cv_func_accept_arg3 +#define HAVE_STRUCT_SOCKADDR_SA_LEN 1 _ACEOF -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether gettimeofday takes only one argument" >&5 -$as_echo_n "checking whether gettimeofday takes only one argument... " >&6; } -if ${pgac_cv_func_gettimeofday_1arg+:} false; then : - $as_echo_n "(cached) " >&6 +fi + +ac_fn_c_check_type "$LINENO" "struct addrinfo" "ac_cv_type_struct_addrinfo" "#include +#include +#include + +" +if test "x$ac_cv_type_struct_addrinfo" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_STRUCT_ADDRINFO 1 +_ACEOF + + +fi + + + ac_fn_c_check_type "$LINENO" "intptr_t" "ac_cv_type_intptr_t" "$ac_includes_default" +if test "x$ac_cv_type_intptr_t" = xyes; then : + +$as_echo "#define HAVE_INTPTR_T 1" >>confdefs.h + else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + for ac_type in 'int' 'long int' 'long long int'; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +$ac_includes_default int main () { -struct timeval *tp; -struct timezone *tzp; -gettimeofday(tp,tzp); +static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; +test_array [0] = 0; +return test_array [0]; + ; return 0; } _ACEOF if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_func_gettimeofday_1arg=no -else - pgac_cv_func_gettimeofday_1arg=yes -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_gettimeofday_1arg" >&5 -$as_echo "$pgac_cv_func_gettimeofday_1arg" >&6; } -if test x"$pgac_cv_func_gettimeofday_1arg" = xyes ; then -$as_echo "#define GETTIMEOFDAY_1ARG 1" >>confdefs.h +cat >>confdefs.h <<_ACEOF +#define intptr_t $ac_type +_ACEOF + ac_type= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test -z "$ac_type" && break + done fi -# Some versions of libedit contain strlcpy(), setproctitle(), and other -# symbols that that library has no business exposing to the world. Pending -# acquisition of a clue by those developers, ignore libedit (including its -# possible alias of libreadline) while checking for everything else. -LIBS_including_readline="$LIBS" -LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'` -for ac_func in cbrt dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat pthread_is_threaded_np readlink setproctitle setsid shm_open sigprocmask symlink sync_file_range towlower utime utimes wcstombs wcstombs_l -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF + ac_fn_c_check_type "$LINENO" "uintptr_t" "ac_cv_type_uintptr_t" "$ac_includes_default" +if test "x$ac_cv_type_uintptr_t" = xyes; then : -fi -done +$as_echo "#define HAVE_UINTPTR_T 1" >>confdefs.h +else + for ac_type in 'unsigned int' 'unsigned long int' \ + 'unsigned long long int'; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$ac_includes_default +int +main () +{ +static int test_array [1 - 2 * !(sizeof (void *) <= sizeof ($ac_type))]; +test_array [0] = 0; +return test_array [0]; -ac_fn_c_check_func "$LINENO" "fseeko" "ac_cv_func_fseeko" -if test "x$ac_cv_func_fseeko" = xyes; then : - $as_echo "#define HAVE_FSEEKO 1" >>confdefs.h + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : -else - case " $LIBOBJS " in - *" fseeko.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS fseeko.$ac_objext" - ;; -esac +cat >>confdefs.h <<_ACEOF +#define uintptr_t $ac_type +_ACEOF + ac_type= +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + test -z "$ac_type" && break + done fi -case $host_os in - # NetBSD uses a custom fseeko/ftello built on fsetpos/fgetpos - # Mingw uses macros to access Win32 API calls - netbsd*|mingw*) - -$as_echo "#define HAVE_FSEEKO 1" >>confdefs.h - ac_cv_func_fseeko=yes;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGEFILE_SOURCE value needed for large files" >&5 -$as_echo_n "checking for _LARGEFILE_SOURCE value needed for large files... " >&6; } -if ${ac_cv_sys_largefile_source+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for unsigned long long int" >&5 +$as_echo_n "checking for unsigned long long int... " >&6; } +if ${ac_cv_type_unsigned_long_long_int+:} false; then : $as_echo_n "(cached) " >&6 else - while :; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_cv_type_unsigned_long_long_int=yes + if test "x${ac_cv_prog_cc_c99-no}" = xno; then + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include /* for off_t */ - #include + + /* For now, do not test the preprocessor; as of 2007 there are too many + implementations with broken preprocessors. Perhaps this can + be revisited in 2012. In the meantime, code should not expect + #if to work with literals wider than 32 bits. */ + /* Test literals. */ + long long int ll = 9223372036854775807ll; + long long int nll = -9223372036854775807LL; + unsigned long long int ull = 18446744073709551615ULL; + /* Test constant expressions. */ + typedef int a[((-9223372036854775807LL < 0 && 0 < 9223372036854775807ll) + ? 1 : -1)]; + typedef int b[(18446744073709551615ULL <= (unsigned long long int) -1 + ? 1 : -1)]; + int i = 63; int main () { -int (*fp) (FILE *, off_t, int) = fseeko; - return fseeko (stdin, 0, 0) && fp (stdin, 0, 0); +/* Test availability of runtime routines for shift and division. */ + long long int llmax = 9223372036854775807ll; + unsigned long long int ullmax = 18446744073709551615ull; + return ((ll << 63) | (ll >> 63) | (ll < i) | (ll > i) + | (llmax / ll) | (llmax % ll) + | (ull << 63) | (ull >> 63) | (ull << i) | (ull >> i) + | (ullmax / ull) | (ullmax % ull)); ; return 0; } + _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_sys_largefile_source=no; break + +else + ac_cv_type_unsigned_long_long_int=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_unsigned_long_long_int" >&5 +$as_echo "$ac_cv_type_unsigned_long_long_int" >&6; } + if test $ac_cv_type_unsigned_long_long_int = yes; then + +$as_echo "#define HAVE_UNSIGNED_LONG_LONG_INT 1" >>confdefs.h + + fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for long long int" >&5 +$as_echo_n "checking for long long int... " >&6; } +if ${ac_cv_type_long_long_int+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_type_long_long_int=yes + if test "x${ac_cv_prog_cc_c99-no}" = xno; then + ac_cv_type_long_long_int=$ac_cv_type_unsigned_long_long_int + if test $ac_cv_type_long_long_int = yes; then + if test "$cross_compiling" = yes; then : + : +else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#define _LARGEFILE_SOURCE 1 -#include /* for off_t */ - #include +#include + #ifndef LLONG_MAX + # define HALF \ + (1LL << (sizeof (long long int) * CHAR_BIT - 2)) + # define LLONG_MAX (HALF - 1 + HALF) + #endif int main () { -int (*fp) (FILE *, off_t, int) = fseeko; - return fseeko (stdin, 0, 0) && fp (stdin, 0, 0); +long long int n = 1; + int i; + for (i = 0; ; i++) + { + long long int m = n << i; + if (m >> i != n) + return 1; + if (LLONG_MAX / 2 < m) + break; + } + return 0; ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_sys_largefile_source=1; break +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_type_long_long_int=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - ac_cv_sys_largefile_source=unknown - break -done +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_source" >&5 -$as_echo "$ac_cv_sys_largefile_source" >&6; } -case $ac_cv_sys_largefile_source in #( - no | unknown) ;; - *) -cat >>confdefs.h <<_ACEOF -#define _LARGEFILE_SOURCE $ac_cv_sys_largefile_source -_ACEOF -;; -esac -rm -rf conftest* - -# We used to try defining _XOPEN_SOURCE=500 too, to work around a bug -# in glibc 2.1.3, but that breaks too many other things. -# If you want fseeko and ftello with glibc, upgrade to a fixed glibc. -if test $ac_cv_sys_largefile_source != unknown; then - -$as_echo "#define HAVE_FSEEKO 1" >>confdefs.h + fi + fi fi -;; -esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_type_long_long_int" >&5 +$as_echo "$ac_cv_type_long_long_int" >&6; } + if test $ac_cv_type_long_long_int = yes; then -# posix_fadvise() is a no-op on Solaris, so don't incur function overhead -# by calling it, 2009-04-02 -# http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/posix_fadvise.c -if test "$PORTNAME" != "solaris"; then -for ac_func in posix_fadvise -do : - ac_fn_c_check_func "$LINENO" "posix_fadvise" "ac_cv_func_posix_fadvise" -if test "x$ac_cv_func_posix_fadvise" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_POSIX_FADVISE 1 -_ACEOF +$as_echo "#define HAVE_LONG_LONG_INT 1" >>confdefs.h -fi -done + fi -ac_fn_c_check_decl "$LINENO" "posix_fadvise" "ac_cv_have_decl_posix_fadvise" "#include -" -if test "x$ac_cv_have_decl_posix_fadvise" = xyes; then : - ac_have_decl=1 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for locale_t" >&5 +$as_echo_n "checking for locale_t... " >&6; } +if ${pgac_cv_type_locale_t+:} false; then : + $as_echo_n "(cached) " >&6 else - ac_have_decl=0 -fi + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +locale_t x; +int +main () +{ -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_POSIX_FADVISE $ac_have_decl + ; + return 0; +} _ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_type_locale_t=yes +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +locale_t x; +int +main () +{ + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_type_locale_t='yes (in xlocale.h)' +else + pgac_cv_type_locale_t=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_type_locale_t" >&5 +$as_echo "$pgac_cv_type_locale_t" >&6; } +if test "$pgac_cv_type_locale_t" != no; then + +$as_echo "#define HAVE_LOCALE_T 1" >>confdefs.h -ac_fn_c_check_decl "$LINENO" "fdatasync" "ac_cv_have_decl_fdatasync" "#include -" -if test "x$ac_cv_have_decl_fdatasync" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 fi +if test "$pgac_cv_type_locale_t" = 'yes (in xlocale.h)'; then -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_FDATASYNC $ac_have_decl -_ACEOF +$as_echo "#define LOCALE_T_IN_XLOCALE 1" >>confdefs.h -ac_fn_c_check_decl "$LINENO" "strlcat" "ac_cv_have_decl_strlcat" "$ac_includes_default" -if test "x$ac_cv_have_decl_strlcat" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 fi +ac_fn_c_check_type "$LINENO" "struct cmsgcred" "ac_cv_type_struct_cmsgcred" "#include +#include +#ifdef HAVE_SYS_UCRED_H +#include +#endif +" +if test "x$ac_cv_type_struct_cmsgcred" = xyes; then : + cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_STRLCAT $ac_have_decl +#define HAVE_STRUCT_CMSGCRED 1 _ACEOF -ac_fn_c_check_decl "$LINENO" "strlcpy" "ac_cv_have_decl_strlcpy" "$ac_includes_default" -if test "x$ac_cv_have_decl_strlcpy" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 + + fi -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_STRLCPY $ac_have_decl -_ACEOF -# This is probably only present on Darwin, but may as well check always -ac_fn_c_check_decl "$LINENO" "F_FULLFSYNC" "ac_cv_have_decl_F_FULLFSYNC" "#include +ac_fn_c_check_type "$LINENO" "struct option" "ac_cv_type_struct_option" "#ifdef HAVE_GETOPT_H +#include +#endif " -if test "x$ac_cv_have_decl_F_FULLFSYNC" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 -fi +if test "x$ac_cv_type_struct_option" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_F_FULLFSYNC $ac_have_decl +#define HAVE_STRUCT_OPTION 1 _ACEOF -HAVE_IPV6=no -ac_fn_c_check_type "$LINENO" "struct sockaddr_in6" "ac_cv_type_struct_sockaddr_in6" "$ac_includes_default -#include -" -if test "x$ac_cv_type_struct_sockaddr_in6" = xyes; then : - -$as_echo "#define HAVE_IPV6 1" >>confdefs.h - - HAVE_IPV6=yes fi +if test "$with_zlib" = yes; then + # Check that defines z_streamp (versions before about 1.0.4 + # did not). While we could work around the lack of z_streamp, it + # seems unwise to encourage people to use such old zlib versions... + ac_fn_c_check_type "$LINENO" "z_streamp" "ac_cv_type_z_streamp" "#include +" +if test "x$ac_cv_type_z_streamp" = xyes; then : -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PS_STRINGS" >&5 -$as_echo_n "checking for PS_STRINGS... " >&6; } -if ${pgac_cv_var_PS_STRINGS+:} false; then : - $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + as_fn_error $? "zlib version is too old +Use --without-zlib to disable zlib support." "$LINENO" 5 +fi + +fi + +# On PPC, check if assembler supports LWARX instruction's mutex hint bit +case $host_cpu in + ppc*|powerpc*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether assembler supports lwarx hint bit" >&5 +$as_echo_n "checking whether assembler supports lwarx hint bit... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include int main () { -PS_STRINGS->ps_nargvstr = 1; -PS_STRINGS->ps_argvstr = "foo"; +int a = 0; int *p = &a; int r; + __asm__ __volatile__ (" lwarx %0,0,%1,1\n" : "=&r"(r) : "r"(p)); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_PS_STRINGS=yes +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_have_ppc_mutex_hint=yes else - pgac_cv_var_PS_STRINGS=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_PS_STRINGS" >&5 -$as_echo "$pgac_cv_var_PS_STRINGS" >&6; } -if test "$pgac_cv_var_PS_STRINGS" = yes ; then - -$as_echo "#define HAVE_PS_STRINGS 1" >>confdefs.h - + pgac_cv_have_ppc_mutex_hint=no fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_have_ppc_mutex_hint" >&5 +$as_echo "$pgac_cv_have_ppc_mutex_hint" >&6; } + if test x"$pgac_cv_have_ppc_mutex_hint" = xyes ; then +$as_echo "#define HAVE_PPC_LWARX_MUTEX_HINT 1" >>confdefs.h -# We use our snprintf.c emulation if either snprintf() or vsnprintf() -# is missing. Yes, there are machines that have only one. We may -# also decide to use snprintf.c if snprintf() is present but does not -# have all the features we need --- see below. - -if test "$PORTNAME" = "win32"; then - # Win32 gets snprintf.c built unconditionally. - # - # To properly translate all NLS languages strings, we must support the - # *printf() %$ format, which allows *printf() arguments to be selected - # by position in the translated string. - # - # libintl versions < 0.13 use the native *printf() functions, and Win32 - # *printf() doesn't understand %$, so we must use our /port versions, - # which do understand %$. libintl versions >= 0.13 include their own - # *printf versions on Win32. The libintl 0.13 release note text is: - # - # C format strings with positions, as they arise when a translator - # needs to reorder a sentence, are now supported on all platforms. - # On those few platforms (NetBSD and Woe32) for which the native - # printf()/fprintf()/... functions don't support such format - # strings, replacements are provided through . - # - # We could use libintl >= 0.13's *printf() if we were sure that we had - # a litint >= 0.13 at runtime, but seeing that there is no clean way - # to guarantee that, it is best to just use our own, so we are sure to - # get %$ support. In include/port.h we disable the *printf() macros - # that might have been defined by libintl. - # - # We do this unconditionally whether NLS is used or not so we are sure - # that all Win32 libraries and binaries behave the same. - pgac_need_repl_snprintf=yes -else - pgac_need_repl_snprintf=no - for ac_func in snprintf -do : - ac_fn_c_check_func "$LINENO" "snprintf" "ac_cv_func_snprintf" -if test "x$ac_cv_func_snprintf" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SNPRINTF 1 -_ACEOF + fi + ;; +esac -else - pgac_need_repl_snprintf=yes +# Check largefile support. You might think this is a system service not a +# compiler characteristic, but you'd be wrong. We must check this before +# probing existence of related functions such as fseeko, since the largefile +# defines can affect what is generated for that. +if test "$PORTNAME" != "win32"; then + # Check whether --enable-largefile was given. +if test "${enable_largefile+set}" = set; then : + enableval=$enable_largefile; fi -done - for ac_func in vsnprintf -do : - ac_fn_c_check_func "$LINENO" "vsnprintf" "ac_cv_func_vsnprintf" -if test "x$ac_cv_func_vsnprintf" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_VSNPRINTF 1 -_ACEOF +if test "$enable_largefile" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for special C compiler options needed for large files" >&5 +$as_echo_n "checking for special C compiler options needed for large files... " >&6; } +if ${ac_cv_sys_largefile_CC+:} false; then : + $as_echo_n "(cached) " >&6 else - pgac_need_repl_snprintf=yes -fi -done - -fi - + ac_cv_sys_largefile_CC=no + if test "$GCC" != yes; then + ac_save_CC=$CC + while :; do + # IRIX 6.2 and later do not support large files by default, + # so use the C compiler's -n32 option if that helps. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -# Check whether declares snprintf() and vsnprintf(); if not, -# include/c.h will provide declarations. Note this is a separate test -# from whether the functions exist in the C library --- there are -# systems that have the functions but don't bother to declare them :-( + ; + return 0; +} +_ACEOF + if ac_fn_c_try_compile "$LINENO"; then : + break +fi +rm -f core conftest.err conftest.$ac_objext + CC="$CC -n32" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_largefile_CC=' -n32'; break +fi +rm -f core conftest.err conftest.$ac_objext + break + done + CC=$ac_save_CC + rm -f conftest.$ac_ext + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_CC" >&5 +$as_echo "$ac_cv_sys_largefile_CC" >&6; } + if test "$ac_cv_sys_largefile_CC" != no; then + CC=$CC$ac_cv_sys_largefile_CC + fi -ac_fn_c_check_decl "$LINENO" "snprintf" "ac_cv_have_decl_snprintf" "$ac_includes_default" -if test "x$ac_cv_have_decl_snprintf" = xyes; then : - ac_have_decl=1 + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _FILE_OFFSET_BITS value needed for large files" >&5 +$as_echo_n "checking for _FILE_OFFSET_BITS value needed for large files... " >&6; } +if ${ac_cv_sys_file_offset_bits+:} false; then : + $as_echo_n "(cached) " >&6 else - ac_have_decl=0 -fi + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_SNPRINTF $ac_have_decl + ; + return 0; +} _ACEOF -ac_fn_c_check_decl "$LINENO" "vsnprintf" "ac_cv_have_decl_vsnprintf" "$ac_includes_default" -if test "x$ac_cv_have_decl_vsnprintf" = xyes; then : - ac_have_decl=1 -else - ac_have_decl=0 +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_file_offset_bits=no; break fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#define _FILE_OFFSET_BITS 64 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_file_offset_bits=64; break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_sys_file_offset_bits=unknown + break +done +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_file_offset_bits" >&5 +$as_echo "$ac_cv_sys_file_offset_bits" >&6; } +case $ac_cv_sys_file_offset_bits in #( + no | unknown) ;; + *) cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_VSNPRINTF $ac_have_decl +#define _FILE_OFFSET_BITS $ac_cv_sys_file_offset_bits _ACEOF - - - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for isinf" >&5 -$as_echo_n "checking for isinf... " >&6; } -if ${ac_cv_func_isinf+:} false; then : +;; +esac +rm -rf conftest* + if test $ac_cv_sys_file_offset_bits = unknown; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGE_FILES value needed for large files" >&5 +$as_echo_n "checking for _LARGE_FILES value needed for large files... " >&6; } +if ${ac_cv_sys_large_files+:} false; then : $as_echo_n "(cached) " >&6 else + while :; do cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; +int +main () +{ -#include -double glob_double; - + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_large_files=no; break +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#define _LARGE_FILES 1 +#include + /* Check that off_t can represent 2**63 - 1 correctly. + We can't simply define LARGE_OFF_T to be 9223372036854775807, + since some C++ compilers masquerading as C compilers + incorrectly reject 9223372036854775807. */ +#define LARGE_OFF_T (((off_t) 1 << 62) - 1 + ((off_t) 1 << 62)) + int off_t_is_large[(LARGE_OFF_T % 2147483629 == 721 + && LARGE_OFF_T % 2147483647 == 1) + ? 1 : -1]; int main () { -return isinf(glob_double) ? 0 : 1; + ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_func_isinf=yes -else - ac_cv_func_isinf=no +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_sys_large_files=1; break fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cv_sys_large_files=unknown + break +done fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_isinf" >&5 -$as_echo "$ac_cv_func_isinf" >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_large_files" >&5 +$as_echo "$ac_cv_sys_large_files" >&6; } +case $ac_cv_sys_large_files in #( + no | unknown) ;; + *) +cat >>confdefs.h <<_ACEOF +#define _LARGE_FILES $ac_cv_sys_large_files +_ACEOF +;; +esac +rm -rf conftest* + fi -if test $ac_cv_func_isinf = yes ; then -$as_echo "#define HAVE_ISINF 1" >>confdefs.h +fi + +fi + +# Check for largefile support (must be after AC_SYS_LARGEFILE) +# The cast to long int works around a bug in the HP C Compiler +# version HP92453-01 B.11.11.23709.GP, which incorrectly rejects +# declarations like `int a3[[(sizeof (unsigned char)) >= 0]];'. +# This bug is HP SR number 8606223364. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking size of off_t" >&5 +$as_echo_n "checking size of off_t... " >&6; } +if ${ac_cv_sizeof_off_t+:} false; then : + $as_echo_n "(cached) " >&6 else - case " $LIBOBJS " in - *" isinf.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS isinf.$ac_objext" - ;; -esac + if ac_fn_c_compute_int "$LINENO" "(long int) (sizeof (off_t))" "ac_cv_sizeof_off_t" "$ac_includes_default"; then : - # Look for a way to implement a substitute for isinf() - for ac_func in fpclass fp_class fp_class_d class -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - break +else + if test "$ac_cv_type_off_t" = yes; then + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "cannot compute sizeof (off_t) +See \`config.log' for more details" "$LINENO" 5; } + else + ac_cv_sizeof_off_t=0 + fi fi -done fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sizeof_off_t" >&5 +$as_echo "$ac_cv_sizeof_off_t" >&6; } -ac_fn_c_check_func "$LINENO" "crypt" "ac_cv_func_crypt" -if test "x$ac_cv_func_crypt" = xyes; then : - $as_echo "#define HAVE_CRYPT 1" >>confdefs.h -else - case " $LIBOBJS " in - *" crypt.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS crypt.$ac_objext" - ;; -esac -fi +cat >>confdefs.h <<_ACEOF +#define SIZEOF_OFF_T $ac_cv_sizeof_off_t +_ACEOF -ac_fn_c_check_func "$LINENO" "fls" "ac_cv_func_fls" -if test "x$ac_cv_func_fls" = xyes; then : - $as_echo "#define HAVE_FLS 1" >>confdefs.h -else - case " $LIBOBJS " in - *" fls.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS fls.$ac_objext" - ;; -esac +# If we don't have largefile support, can't handle segsize >= 2GB. +if test "$ac_cv_sizeof_off_t" -lt 8 -a "$segsize" != "1"; then + as_fn_error $? "Large file support is not enabled. Segment size cannot be larger than 1GB." "$LINENO" 5 fi -ac_fn_c_check_func "$LINENO" "getopt" "ac_cv_func_getopt" -if test "x$ac_cv_func_getopt" = xyes; then : - $as_echo "#define HAVE_GETOPT 1" >>confdefs.h -else - case " $LIBOBJS " in - *" getopt.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt.$ac_objext" - ;; -esac +## +## Functions, global variables +## +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for int timezone" >&5 +$as_echo_n "checking for int timezone... " >&6; } +if ${pgac_cv_var_int_timezone+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int res; +int +main () +{ +#ifndef __CYGWIN__ +res = timezone / 60; +#else +res = _timezone / 60; +#endif + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_var_int_timezone=yes +else + pgac_cv_var_int_timezone=no fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_timezone" >&5 +$as_echo "$pgac_cv_var_int_timezone" >&6; } +if test x"$pgac_cv_var_int_timezone" = xyes ; then -ac_fn_c_check_func "$LINENO" "getrusage" "ac_cv_func_getrusage" -if test "x$ac_cv_func_getrusage" = xyes; then : - $as_echo "#define HAVE_GETRUSAGE 1" >>confdefs.h - -else - case " $LIBOBJS " in - *" getrusage.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getrusage.$ac_objext" - ;; -esac +$as_echo "#define HAVE_INT_TIMEZONE 1" >>confdefs.h fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking types of arguments for accept()" >&5 +$as_echo_n "checking types of arguments for accept()... " >&6; } + if ${ac_cv_func_accept_return+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ${ac_cv_func_accept_arg1+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ${ac_cv_func_accept_arg2+:} false; then : + $as_echo_n "(cached) " >&6 +else + if ${ac_cv_func_accept_arg3+:} false; then : + $as_echo_n "(cached) " >&6 +else + for ac_cv_func_accept_return in 'int' 'unsigned int PASCAL' 'SOCKET WSAAPI'; do + for ac_cv_func_accept_arg1 in 'int' 'unsigned int' 'SOCKET'; do + for ac_cv_func_accept_arg2 in 'struct sockaddr *' 'const struct sockaddr *' 'void *'; do + for ac_cv_func_accept_arg3 in 'int' 'size_t' 'socklen_t' 'unsigned int' 'void'; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef HAVE_SYS_TYPES_H +#include +#endif +#ifdef HAVE_SYS_SOCKET_H +#include +#endif +extern $ac_cv_func_accept_return accept ($ac_cv_func_accept_arg1, $ac_cv_func_accept_arg2, $ac_cv_func_accept_arg3 *); +int +main () +{ -ac_fn_c_check_func "$LINENO" "inet_aton" "ac_cv_func_inet_aton" -if test "x$ac_cv_func_inet_aton" = xyes; then : - $as_echo "#define HAVE_INET_ATON 1" >>confdefs.h - + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_not_found=no; break 4 else - case " $LIBOBJS " in - *" inet_aton.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS inet_aton.$ac_objext" - ;; -esac + ac_not_found=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + done + done + done + done + if test "$ac_not_found" = yes; then + as_fn_error $? "could not determine argument types" "$LINENO" 5 + fi + if test "$ac_cv_func_accept_arg3" = "void"; then + ac_cv_func_accept_arg3=int + fi fi -ac_fn_c_check_func "$LINENO" "mkdtemp" "ac_cv_func_mkdtemp" -if test "x$ac_cv_func_mkdtemp" = xyes; then : - $as_echo "#define HAVE_MKDTEMP 1" >>confdefs.h +fi -else - case " $LIBOBJS " in - *" mkdtemp.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS mkdtemp.$ac_objext" - ;; -esac +fi fi + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_accept_return, $ac_cv_func_accept_arg1, $ac_cv_func_accept_arg2, $ac_cv_func_accept_arg3 *" >&5 +$as_echo "$ac_cv_func_accept_return, $ac_cv_func_accept_arg1, $ac_cv_func_accept_arg2, $ac_cv_func_accept_arg3 *" >&6; } -ac_fn_c_check_func "$LINENO" "random" "ac_cv_func_random" -if test "x$ac_cv_func_random" = xyes; then : - $as_echo "#define HAVE_RANDOM 1" >>confdefs.h +cat >>confdefs.h <<_ACEOF +#define ACCEPT_TYPE_RETURN $ac_cv_func_accept_return +_ACEOF -else - case " $LIBOBJS " in - *" random.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS random.$ac_objext" - ;; -esac -fi +cat >>confdefs.h <<_ACEOF +#define ACCEPT_TYPE_ARG1 $ac_cv_func_accept_arg1 +_ACEOF -ac_fn_c_check_func "$LINENO" "rint" "ac_cv_func_rint" -if test "x$ac_cv_func_rint" = xyes; then : - $as_echo "#define HAVE_RINT 1" >>confdefs.h -else - case " $LIBOBJS " in - *" rint.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS rint.$ac_objext" - ;; -esac +cat >>confdefs.h <<_ACEOF +#define ACCEPT_TYPE_ARG2 $ac_cv_func_accept_arg2 +_ACEOF -fi -ac_fn_c_check_func "$LINENO" "srandom" "ac_cv_func_srandom" -if test "x$ac_cv_func_srandom" = xyes; then : - $as_echo "#define HAVE_SRANDOM 1" >>confdefs.h +cat >>confdefs.h <<_ACEOF +#define ACCEPT_TYPE_ARG3 $ac_cv_func_accept_arg3 +_ACEOF -else - case " $LIBOBJS " in - *" srandom.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS srandom.$ac_objext" - ;; -esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether gettimeofday takes only one argument" >&5 +$as_echo_n "checking whether gettimeofday takes only one argument... " >&6; } +if ${pgac_cv_func_gettimeofday_1arg+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +struct timeval *tp; +struct timezone *tzp; +gettimeofday(tp,tzp); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_func_gettimeofday_1arg=no +else + pgac_cv_func_gettimeofday_1arg=yes fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_gettimeofday_1arg" >&5 +$as_echo "$pgac_cv_func_gettimeofday_1arg" >&6; } +if test x"$pgac_cv_func_gettimeofday_1arg" = xyes ; then -ac_fn_c_check_func "$LINENO" "strerror" "ac_cv_func_strerror" -if test "x$ac_cv_func_strerror" = xyes; then : - $as_echo "#define HAVE_STRERROR 1" >>confdefs.h - -else - case " $LIBOBJS " in - *" strerror.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS strerror.$ac_objext" - ;; -esac +$as_echo "#define GETTIMEOFDAY_1ARG 1" >>confdefs.h fi -ac_fn_c_check_func "$LINENO" "strlcat" "ac_cv_func_strlcat" -if test "x$ac_cv_func_strlcat" = xyes; then : - $as_echo "#define HAVE_STRLCAT 1" >>confdefs.h -else - case " $LIBOBJS " in - *" strlcat.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS strlcat.$ac_objext" - ;; -esac +# Some versions of libedit contain strlcpy(), setproctitle(), and other +# symbols that that library has no business exposing to the world. Pending +# acquisition of a clue by those developers, ignore libedit (including its +# possible alias of libreadline) while checking for everything else. +LIBS_including_readline="$LIBS" +LIBS=`echo "$LIBS" | sed -e 's/-ledit//g' -e 's/-lreadline//g'` + +for ac_func in cbrt dlopen fdatasync getifaddrs getpeerucred getrlimit mbstowcs_l memmove poll pstat pthread_is_threaded_np readlink setproctitle setsid shm_open sigprocmask symlink sync_file_range towlower utime utimes wcstombs wcstombs_l +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF fi +done -ac_fn_c_check_func "$LINENO" "strlcpy" "ac_cv_func_strlcpy" -if test "x$ac_cv_func_strlcpy" = xyes; then : - $as_echo "#define HAVE_STRLCPY 1" >>confdefs.h + +ac_fn_c_check_func "$LINENO" "fseeko" "ac_cv_func_fseeko" +if test "x$ac_cv_func_fseeko" = xyes; then : + $as_echo "#define HAVE_FSEEKO 1" >>confdefs.h else case " $LIBOBJS " in - *" strlcpy.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS strlcpy.$ac_objext" + *" fseeko.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS fseeko.$ac_objext" ;; esac fi - case $host_os in + # NetBSD uses a custom fseeko/ftello built on fsetpos/fgetpos + # Mingw uses macros to access Win32 API calls + netbsd*|mingw*) - # Windows uses a specialised env handler - # and doesn't need a replacement getpeereid because it doesn't use - # Unix sockets. - mingw*) - -$as_echo "#define HAVE_UNSETENV 1" >>confdefs.h - - -$as_echo "#define HAVE_GETPEEREID 1" >>confdefs.h - - ac_cv_func_unsetenv=yes - ac_cv_func_getpeereid=yes;; - *) - ac_fn_c_check_func "$LINENO" "unsetenv" "ac_cv_func_unsetenv" -if test "x$ac_cv_func_unsetenv" = xyes; then : - $as_echo "#define HAVE_UNSETENV 1" >>confdefs.h +$as_echo "#define HAVE_FSEEKO 1" >>confdefs.h + ac_cv_func_fseeko=yes;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for _LARGEFILE_SOURCE value needed for large files" >&5 +$as_echo_n "checking for _LARGEFILE_SOURCE value needed for large files... " >&6; } +if ${ac_cv_sys_largefile_source+:} false; then : + $as_echo_n "(cached) " >&6 else - case " $LIBOBJS " in - *" unsetenv.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS unsetenv.$ac_objext" - ;; + while :; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include /* for off_t */ + #include +int +main () +{ +int (*fp) (FILE *, off_t, int) = fseeko; + return fseeko (stdin, 0, 0) && fp (stdin, 0, 0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_sys_largefile_source=no; break +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#define _LARGEFILE_SOURCE 1 +#include /* for off_t */ + #include +int +main () +{ +int (*fp) (FILE *, off_t, int) = fseeko; + return fseeko (stdin, 0, 0) && fp (stdin, 0, 0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_sys_largefile_source=1; break +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_cv_sys_largefile_source=unknown + break +done +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_sys_largefile_source" >&5 +$as_echo "$ac_cv_sys_largefile_source" >&6; } +case $ac_cv_sys_largefile_source in #( + no | unknown) ;; + *) +cat >>confdefs.h <<_ACEOF +#define _LARGEFILE_SOURCE $ac_cv_sys_largefile_source +_ACEOF +;; esac +rm -rf conftest* -fi +# We used to try defining _XOPEN_SOURCE=500 too, to work around a bug +# in glibc 2.1.3, but that breaks too many other things. +# If you want fseeko and ftello with glibc, upgrade to a fixed glibc. +if test $ac_cv_sys_largefile_source != unknown; then -ac_fn_c_check_func "$LINENO" "getpeereid" "ac_cv_func_getpeereid" -if test "x$ac_cv_func_getpeereid" = xyes; then : - $as_echo "#define HAVE_GETPEEREID 1" >>confdefs.h +$as_echo "#define HAVE_FSEEKO 1" >>confdefs.h -else - case " $LIBOBJS " in - *" getpeereid.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getpeereid.$ac_objext" - ;; +fi +;; esac +# posix_fadvise() is a no-op on Solaris, so don't incur function overhead +# by calling it, 2009-04-02 +# http://src.opensolaris.org/source/xref/onnv/onnv-gate/usr/src/lib/libc/port/gen/posix_fadvise.c +if test "$PORTNAME" != "solaris"; then +for ac_func in posix_fadvise +do : + ac_fn_c_check_func "$LINENO" "posix_fadvise" "ac_cv_func_posix_fadvise" +if test "x$ac_cv_func_posix_fadvise" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_POSIX_FADVISE 1 +_ACEOF + fi +done +ac_fn_c_check_decl "$LINENO" "posix_fadvise" "ac_cv_have_decl_posix_fadvise" "#include +" +if test "x$ac_cv_have_decl_posix_fadvise" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi - ;; -esac +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_POSIX_FADVISE $ac_have_decl +_ACEOF -# System's version of getaddrinfo(), if any, may be used only if we found -# a definition for struct addrinfo; see notes in src/include/getaddrinfo.h. -# We use only our own getaddrinfo.c on Windows, but it's time to revisit that. -if test x"$ac_cv_type_struct_addrinfo" = xyes && \ - test "$PORTNAME" != "win32"; then - ac_fn_c_check_func "$LINENO" "getaddrinfo" "ac_cv_func_getaddrinfo" -if test "x$ac_cv_func_getaddrinfo" = xyes; then : - $as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h +fi +ac_fn_c_check_decl "$LINENO" "fdatasync" "ac_cv_have_decl_fdatasync" "#include +" +if test "x$ac_cv_have_decl_fdatasync" = xyes; then : + ac_have_decl=1 else - case " $LIBOBJS " in - *" getaddrinfo.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" - ;; -esac - + ac_have_decl=0 fi +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_FDATASYNC $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "strlcat" "ac_cv_have_decl_strlcat" "$ac_includes_default" +if test "x$ac_cv_have_decl_strlcat" = xyes; then : + ac_have_decl=1 else - case " $LIBOBJS " in - *" getaddrinfo.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" - ;; -esac - + ac_have_decl=0 fi -# Similarly, use system's getopt_long() only if system provides struct option. -if test x"$ac_cv_type_struct_option" = xyes ; then - ac_fn_c_check_func "$LINENO" "getopt_long" "ac_cv_func_getopt_long" -if test "x$ac_cv_func_getopt_long" = xyes; then : - $as_echo "#define HAVE_GETOPT_LONG 1" >>confdefs.h - +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_STRLCAT $ac_have_decl +_ACEOF +ac_fn_c_check_decl "$LINENO" "strlcpy" "ac_cv_have_decl_strlcpy" "$ac_includes_default" +if test "x$ac_cv_have_decl_strlcpy" = xyes; then : + ac_have_decl=1 else - case " $LIBOBJS " in - *" getopt_long.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" - ;; -esac - + ac_have_decl=0 fi +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_STRLCPY $ac_have_decl +_ACEOF +# This is probably only present on Darwin, but may as well check always +ac_fn_c_check_decl "$LINENO" "F_FULLFSYNC" "ac_cv_have_decl_F_FULLFSYNC" "#include +" +if test "x$ac_cv_have_decl_F_FULLFSYNC" = xyes; then : + ac_have_decl=1 else - case " $LIBOBJS " in - *" getopt_long.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" - ;; -esac - + ac_have_decl=0 fi -# Solaris' getopt() doesn't do what we want for long options, so always use -# our version on that platform. -if test "$PORTNAME" = "solaris"; then - case " $LIBOBJS " in - *" getopt.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt.$ac_objext" - ;; -esac +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_F_FULLFSYNC $ac_have_decl +_ACEOF -fi -# mingw has adopted a GNU-centric interpretation of optind/optreset, -# so always use our version on Windows. -if test "$PORTNAME" = "win32"; then - case " $LIBOBJS " in - *" getopt.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt.$ac_objext" - ;; -esac +HAVE_IPV6=no +ac_fn_c_check_type "$LINENO" "struct sockaddr_in6" "ac_cv_type_struct_sockaddr_in6" "$ac_includes_default +#include +" +if test "x$ac_cv_type_struct_sockaddr_in6" = xyes; then : - case " $LIBOBJS " in - *" getopt_long.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" - ;; -esac +$as_echo "#define HAVE_IPV6 1" >>confdefs.h + HAVE_IPV6=yes fi -# Win32 (really MinGW) support -if test "$PORTNAME" = "win32"; then - ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday" -if test "x$ac_cv_func_gettimeofday" = xyes; then : - $as_echo "#define HAVE_GETTIMEOFDAY 1" >>confdefs.h + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for PS_STRINGS" >&5 +$as_echo_n "checking for PS_STRINGS... " >&6; } +if ${pgac_cv_var_PS_STRINGS+:} false; then : + $as_echo_n "(cached) " >&6 else - case " $LIBOBJS " in - *" gettimeofday.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS gettimeofday.$ac_objext" - ;; -esac + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +int +main () +{ +PS_STRINGS->ps_nargvstr = 1; +PS_STRINGS->ps_argvstr = "foo"; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_var_PS_STRINGS=yes +else + pgac_cv_var_PS_STRINGS=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_PS_STRINGS" >&5 +$as_echo "$pgac_cv_var_PS_STRINGS" >&6; } +if test "$pgac_cv_var_PS_STRINGS" = yes ; then +$as_echo "#define HAVE_PS_STRINGS 1" >>confdefs.h - case " $LIBOBJS " in - *" dirmod.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" - ;; -esac - - case " $LIBOBJS " in - *" kill.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS kill.$ac_objext" - ;; -esac - - case " $LIBOBJS " in - *" open.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS open.$ac_objext" - ;; -esac - - case " $LIBOBJS " in - *" system.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS system.$ac_objext" - ;; -esac - - case " $LIBOBJS " in - *" win32env.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS win32env.$ac_objext" - ;; -esac - - case " $LIBOBJS " in - *" win32error.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS win32error.$ac_objext" - ;; -esac - - case " $LIBOBJS " in - *" win32setlocale.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS win32setlocale.$ac_objext" - ;; -esac - +fi -$as_echo "#define HAVE_SYMLINK 1" >>confdefs.h - ac_fn_c_check_type "$LINENO" "MINIDUMP_TYPE" "ac_cv_type_MINIDUMP_TYPE" " -#define WIN32_LEAN_AND_MEAN -#include -#include -#include -" -if test "x$ac_cv_type_MINIDUMP_TYPE" = xyes; then : +# We use our snprintf.c emulation if either snprintf() or vsnprintf() +# is missing. Yes, there are machines that have only one. We may +# also decide to use snprintf.c if snprintf() is present but does not +# have all the features we need --- see below. -cat >>confdefs.h <<_ACEOF -#define HAVE_MINIDUMP_TYPE 1 +if test "$PORTNAME" = "win32"; then + # Win32 gets snprintf.c built unconditionally. + # + # To properly translate all NLS languages strings, we must support the + # *printf() %$ format, which allows *printf() arguments to be selected + # by position in the translated string. + # + # libintl versions < 0.13 use the native *printf() functions, and Win32 + # *printf() doesn't understand %$, so we must use our /port versions, + # which do understand %$. libintl versions >= 0.13 include their own + # *printf versions on Win32. The libintl 0.13 release note text is: + # + # C format strings with positions, as they arise when a translator + # needs to reorder a sentence, are now supported on all platforms. + # On those few platforms (NetBSD and Woe32) for which the native + # printf()/fprintf()/... functions don't support such format + # strings, replacements are provided through . + # + # We could use libintl >= 0.13's *printf() if we were sure that we had + # a litint >= 0.13 at runtime, but seeing that there is no clean way + # to guarantee that, it is best to just use our own, so we are sure to + # get %$ support. In include/port.h we disable the *printf() macros + # that might have been defined by libintl. + # + # We do this unconditionally whether NLS is used or not so we are sure + # that all Win32 libraries and binaries behave the same. + pgac_need_repl_snprintf=yes +else + pgac_need_repl_snprintf=no + for ac_func in snprintf +do : + ac_fn_c_check_func "$LINENO" "snprintf" "ac_cv_func_snprintf" +if test "x$ac_cv_func_snprintf" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SNPRINTF 1 _ACEOF -pgac_minidump_type=yes else - pgac_minidump_type=no + pgac_need_repl_snprintf=yes fi +done -fi -if test x"$pgac_minidump_type" = x"yes" ; then - have_win32_dbghelp=yes + for ac_func in vsnprintf +do : + ac_fn_c_check_func "$LINENO" "vsnprintf" "ac_cv_func_vsnprintf" +if test "x$ac_cv_func_vsnprintf" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_VSNPRINTF 1 +_ACEOF else - have_win32_dbghelp=no - -fi - -# Cygwin needs only a bit of that -if test "$PORTNAME" = "cygwin"; then - case " $LIBOBJS " in - *" dirmod.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" - ;; -esac - + pgac_need_repl_snprintf=yes fi +done -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sigsetjmp" >&5 -$as_echo_n "checking for sigsetjmp... " >&6; } -if ${pgac_cv_func_sigsetjmp+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -sigjmp_buf x; sigsetjmp(x, 1); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_func_sigsetjmp=yes -else - pgac_cv_func_sigsetjmp=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_sigsetjmp" >&5 -$as_echo "$pgac_cv_func_sigsetjmp" >&6; } -if test x"$pgac_cv_func_sigsetjmp" = x"yes"; then -$as_echo "#define HAVE_SIGSETJMP 1" >>confdefs.h - -fi -ac_fn_c_check_decl "$LINENO" "sys_siglist" "ac_cv_have_decl_sys_siglist" "#include -/* NetBSD declares sys_siglist in unistd.h. */ -#ifdef HAVE_UNISTD_H -# include -#endif +# Check whether declares snprintf() and vsnprintf(); if not, +# include/c.h will provide declarations. Note this is a separate test +# from whether the functions exist in the C library --- there are +# systems that have the functions but don't bother to declare them :-( -" -if test "x$ac_cv_have_decl_sys_siglist" = xyes; then : +ac_fn_c_check_decl "$LINENO" "snprintf" "ac_cv_have_decl_snprintf" "$ac_includes_default" +if test "x$ac_cv_have_decl_snprintf" = xyes; then : ac_have_decl=1 else ac_have_decl=0 fi cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_SYS_SIGLIST $ac_have_decl +#define HAVE_DECL_SNPRINTF $ac_have_decl _ACEOF - - - -ac_fn_c_check_func "$LINENO" "syslog" "ac_cv_func_syslog" -if test "x$ac_cv_func_syslog" = xyes; then : - ac_fn_c_check_header_mongrel "$LINENO" "syslog.h" "ac_cv_header_syslog_h" "$ac_includes_default" -if test "x$ac_cv_header_syslog_h" = xyes; then : - -$as_echo "#define HAVE_SYSLOG 1" >>confdefs.h - +ac_fn_c_check_decl "$LINENO" "vsnprintf" "ac_cv_have_decl_vsnprintf" "$ac_includes_default" +if test "x$ac_cv_have_decl_vsnprintf" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 fi +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_VSNPRINTF $ac_have_decl +_ACEOF -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for opterr" >&5 -$as_echo_n "checking for opterr... " >&6; } -if ${pgac_cv_var_int_opterr+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for isinf" >&5 +$as_echo_n "checking for isinf... " >&6; } +if ${ac_cv_func_isinf+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -int -main () -{ -extern int opterr; opterr = 1; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_int_opterr=yes -else - pgac_cv_var_int_opterr=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_opterr" >&5 -$as_echo "$pgac_cv_var_int_opterr" >&6; } -if test x"$pgac_cv_var_int_opterr" = x"yes"; then - -$as_echo "#define HAVE_INT_OPTERR 1" >>confdefs.h -fi +#include +double glob_double; -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for optreset" >&5 -$as_echo_n "checking for optreset... " >&6; } -if ${pgac_cv_var_int_optreset+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include int main () { -extern int optreset; optreset = 1; +return isinf(glob_double) ? 0 : 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_int_optreset=yes + ac_cv_func_isinf=yes else - pgac_cv_var_int_optreset=no + ac_cv_func_isinf=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_optreset" >&5 -$as_echo "$pgac_cv_var_int_optreset" >&6; } -if test x"$pgac_cv_var_int_optreset" = x"yes"; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_func_isinf" >&5 +$as_echo "$ac_cv_func_isinf" >&6; } -$as_echo "#define HAVE_INT_OPTRESET 1" >>confdefs.h +if test $ac_cv_func_isinf = yes ; then -fi +$as_echo "#define HAVE_ISINF 1" >>confdefs.h -for ac_func in strtoll strtoq -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - break -fi -done +else + case " $LIBOBJS " in + *" isinf.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS isinf.$ac_objext" + ;; +esac -for ac_func in strtoull strtouq + # Look for a way to implement a substitute for isinf() + for ac_func in fpclass fp_class fp_class_d class do : as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" @@ -12442,774 +12610,608 @@ _ACEOF fi done +fi -# Lastly, restore full LIBS list and check for readline/libedit symbols -LIBS="$LIBS_including_readline" +ac_fn_c_check_func "$LINENO" "crypt" "ac_cv_func_crypt" +if test "x$ac_cv_func_crypt" = xyes; then : + $as_echo "#define HAVE_CRYPT 1" >>confdefs.h -if test "$with_readline" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_completion_append_character" >&5 -$as_echo_n "checking for rl_completion_append_character... " >&6; } -if ${pgac_cv_var_rl_completion_append_character+:} false; then : - $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -#ifdef HAVE_READLINE_READLINE_H -# include -#elif defined(HAVE_READLINE_H) -# include -#endif + case " $LIBOBJS " in + *" crypt.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS crypt.$ac_objext" + ;; +esac -int -main () -{ -rl_completion_append_character = 'x'; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_rl_completion_append_character=yes -else - pgac_cv_var_rl_completion_append_character=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_rl_completion_append_character" >&5 -$as_echo "$pgac_cv_var_rl_completion_append_character" >&6; } -if test x"$pgac_cv_var_rl_completion_append_character" = x"yes"; then -$as_echo "#define HAVE_RL_COMPLETION_APPEND_CHARACTER 1" >>confdefs.h +ac_fn_c_check_func "$LINENO" "fls" "ac_cv_func_fls" +if test "x$ac_cv_func_fls" = xyes; then : + $as_echo "#define HAVE_FLS 1" >>confdefs.h -fi - for ac_func in rl_completion_matches rl_filename_completion_function -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF +else + case " $LIBOBJS " in + *" fls.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS fls.$ac_objext" + ;; +esac fi -done - for ac_func in append_history history_truncate_file -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF +ac_fn_c_check_func "$LINENO" "getopt" "ac_cv_func_getopt" +if test "x$ac_cv_func_getopt" = xyes; then : + $as_echo "#define HAVE_GETOPT 1" >>confdefs.h -fi -done +else + case " $LIBOBJS " in + *" getopt.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt.$ac_objext" + ;; +esac fi +ac_fn_c_check_func "$LINENO" "getrusage" "ac_cv_func_getrusage" +if test "x$ac_cv_func_getrusage" = xyes; then : + $as_echo "#define HAVE_GETRUSAGE 1" >>confdefs.h -# -# Pthreads -# -# For each platform, we need to know about any special compile and link -# libraries, and whether the normal C function names are thread-safe. -# See the comment at the top of src/port/thread.c for more information. -# WIN32 doesn't need the pthread tests; it always uses threads -if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then +else + case " $LIBOBJS " in + *" getrusage.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getrusage.$ac_objext" + ;; +esac +fi +ac_fn_c_check_func "$LINENO" "inet_aton" "ac_cv_func_inet_aton" +if test "x$ac_cv_func_inet_aton" = xyes; then : + $as_echo "#define HAVE_INET_ATON 1" >>confdefs.h -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu +else + case " $LIBOBJS " in + *" inet_aton.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS inet_aton.$ac_objext" + ;; +esac -ax_pthread_ok=no +fi -# We used to check for pthread.h first, but this fails if pthread.h -# requires special compiler flags (e.g. on True64 or Sequent). -# It gets checked for in the link test anyway. +ac_fn_c_check_func "$LINENO" "mkdtemp" "ac_cv_func_mkdtemp" +if test "x$ac_cv_func_mkdtemp" = xyes; then : + $as_echo "#define HAVE_MKDTEMP 1" >>confdefs.h -# First of all, check if the user has set any of the PTHREAD_LIBS, -# etcetera environment variables, and if threads linking works using -# them: -if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5 -$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +else + case " $LIBOBJS " in + *" mkdtemp.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS mkdtemp.$ac_objext" + ;; +esac -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pthread_join (); -int -main () -{ -return pthread_join (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_pthread_ok=yes fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 -$as_echo "$ax_pthread_ok" >&6; } - if test x"$ax_pthread_ok" = xno; then - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" - fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" -fi - -# We must check for the threads library under a number of different -# names; the ordering is very important because some systems -# (e.g. DEC) have both -lpthread and -lpthreads, where one of the -# libraries is broken (non-POSIX). -# Create a list of thread flags to try. Items starting with a "-" are -# C compiler flags, and other items are library names, except for "none" -# which indicates that we try without any flags at all, and "pthread-config" -# which is a program returning the flags for the Pth emulation library. +ac_fn_c_check_func "$LINENO" "random" "ac_cv_func_random" +if test "x$ac_cv_func_random" = xyes; then : + $as_echo "#define HAVE_RANDOM 1" >>confdefs.h -ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" +else + case " $LIBOBJS " in + *" random.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS random.$ac_objext" + ;; +esac -# The ordering *is* (sometimes) important. Some notes on the -# individual items follow: +fi -# pthreads: AIX (must check this before -lpthread) -# none: in case threads are in libc; should be tried before -Kthread and -# other compiler flags to prevent continual compiler warnings -# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) -# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) -# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) -# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) -# -pthreads: Solaris/gcc -# -mthreads: Mingw32/gcc, Lynx/gcc -# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it -# doesn't hurt to check since this sometimes defines pthreads too; -# also defines -D_REENTRANT) -# ... -mt is also the pthreads flag for HP/aCC -# pthread: Linux, etcetera -# --thread-safe: KAI C++ -# pthread-config: use pthread-config program (for GNU Pth library) +ac_fn_c_check_func "$LINENO" "rint" "ac_cv_func_rint" +if test "x$ac_cv_func_rint" = xyes; then : + $as_echo "#define HAVE_RINT 1" >>confdefs.h -case ${host_os} in - solaris*) +else + case " $LIBOBJS " in + *" rint.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS rint.$ac_objext" + ;; +esac - # On Solaris (at least, for some versions), libc contains stubbed - # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthreads/-mt/ - # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather - # a function called by this macro, so we could check for that, but - # who knows whether they'll stub that too in a future libc.) So, - # we'll just look for -pthreads and -lpthread first: +fi - ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" - ;; +ac_fn_c_check_func "$LINENO" "srandom" "ac_cv_func_srandom" +if test "x$ac_cv_func_srandom" = xyes; then : + $as_echo "#define HAVE_SRANDOM 1" >>confdefs.h - darwin*) - ax_pthread_flags="-pthread $ax_pthread_flags" - ;; +else + case " $LIBOBJS " in + *" srandom.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS srandom.$ac_objext" + ;; esac -# Clang doesn't consider unrecognized options an error unless we specify -# -Werror. We throw in some extra Clang-specific options to ensure that -# this doesn't happen for GCC, which also accepts -Werror. - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler needs -Werror to reject unknown flags" >&5 -$as_echo_n "checking if compiler needs -Werror to reject unknown flags... " >&6; } -save_CFLAGS="$CFLAGS" -ax_pthread_extra_flags="-Werror" -CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int foo(void); -int -main () -{ -foo() - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } -else - ax_pthread_extra_flags= - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -CFLAGS="$save_CFLAGS" -if test x"$ax_pthread_ok" = xno; then -for flag in $ax_pthread_flags; do +ac_fn_c_check_func "$LINENO" "strerror" "ac_cv_func_strerror" +if test "x$ac_cv_func_strerror" = xyes; then : + $as_echo "#define HAVE_STRERROR 1" >>confdefs.h - case $flag in - none) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 -$as_echo_n "checking whether pthreads work without any flags... " >&6; } - ;; +else + case " $LIBOBJS " in + *" strerror.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS strerror.$ac_objext" + ;; +esac - -*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 -$as_echo_n "checking whether pthreads work with $flag... " >&6; } - PTHREAD_CFLAGS="$flag" - ;; +fi + +ac_fn_c_check_func "$LINENO" "strlcat" "ac_cv_func_strlcat" +if test "x$ac_cv_func_strlcat" = xyes; then : + $as_echo "#define HAVE_STRLCAT 1" >>confdefs.h - pthread-config) - # Extract the first word of "pthread-config", so it can be a program name with args. -set dummy pthread-config; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ax_pthread_config+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$ax_pthread_config"; then - ac_cv_prog_ax_pthread_config="$ax_pthread_config" # Let the user override the test. else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ax_pthread_config="yes" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS + case " $LIBOBJS " in + *" strlcat.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS strlcat.$ac_objext" + ;; +esac - test -z "$ac_cv_prog_ax_pthread_config" && ac_cv_prog_ax_pthread_config="no" -fi fi -ax_pthread_config=$ac_cv_prog_ax_pthread_config -if test -n "$ax_pthread_config"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_config" >&5 -$as_echo "$ax_pthread_config" >&6; } + +ac_fn_c_check_func "$LINENO" "strlcpy" "ac_cv_func_strlcpy" +if test "x$ac_cv_func_strlcpy" = xyes; then : + $as_echo "#define HAVE_STRLCPY 1" >>confdefs.h + else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + case " $LIBOBJS " in + *" strlcpy.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS strlcpy.$ac_objext" + ;; +esac + fi - if test x"$ax_pthread_config" = xno; then continue; fi - PTHREAD_CFLAGS="`pthread-config --cflags`" - PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" - ;; - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 -$as_echo_n "checking for the pthreads library -l$flag... " >&6; } - PTHREAD_LIBS="-l$flag" - ;; - esac +case $host_os in - save_LIBS="$LIBS" - save_CFLAGS="$CFLAGS" - LIBS="$PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" + # Windows uses a specialised env handler + # and doesn't need a replacement getpeereid because it doesn't use + # Unix sockets. + mingw*) - # Check for various functions. We must include pthread.h, - # since some functions may be macros. (On the Sequent, we - # need a special flag -Kthread to make this header compile.) - # We check for pthread_join because it is in -lpthread on IRIX - # while pthread_create is in libc. We check for pthread_attr_init - # due to DEC craziness with -lpthreads. We check for - # pthread_cleanup_push because it is one of the few pthread - # functions on Solaris that doesn't have a non-functional libc stub. - # We try pthread_create on general principles. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - static void routine(void *a) { a = 0; } - static void *start_routine(void *a) { return a; } -int -main () -{ -pthread_t th; pthread_attr_t attr; - pthread_create(&th, 0, start_routine, 0); - pthread_join(th, 0); - pthread_attr_init(&attr); - pthread_cleanup_push(routine, 0); - pthread_cleanup_pop(0) /* ; */ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_pthread_ok=yes -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext +$as_echo "#define HAVE_UNSETENV 1" >>confdefs.h - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 -$as_echo "$ax_pthread_ok" >&6; } - if test "x$ax_pthread_ok" = xyes; then - break; - fi +$as_echo "#define HAVE_GETPEEREID 1" >>confdefs.h + + ac_cv_func_unsetenv=yes + ac_cv_func_getpeereid=yes;; + *) + ac_fn_c_check_func "$LINENO" "unsetenv" "ac_cv_func_unsetenv" +if test "x$ac_cv_func_unsetenv" = xyes; then : + $as_echo "#define HAVE_UNSETENV 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" unsetenv.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS unsetenv.$ac_objext" + ;; +esac - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" -done fi -# Various other checks: -if test "x$ax_pthread_ok" = xyes; then - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +ac_fn_c_check_func "$LINENO" "getpeereid" "ac_cv_func_getpeereid" +if test "x$ac_cv_func_getpeereid" = xyes; then : + $as_echo "#define HAVE_GETPEEREID 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" getpeereid.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getpeereid.$ac_objext" + ;; +esac - # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5 -$as_echo_n "checking for joinable pthread attribute... " >&6; } - attr_name=unknown - for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -int attr = $attr; return attr /* ; */ - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - attr_name=$attr; break fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5 -$as_echo "$attr_name" >&6; } - if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then -cat >>confdefs.h <<_ACEOF -#define PTHREAD_CREATE_JOINABLE $attr_name -_ACEOF - fi + ;; +esac + +# System's version of getaddrinfo(), if any, may be used only if we found +# a definition for struct addrinfo; see notes in src/include/getaddrinfo.h. +# We use only our own getaddrinfo.c on Windows, but it's time to revisit that. +if test x"$ac_cv_type_struct_addrinfo" = xyes && \ + test "$PORTNAME" != "win32"; then + ac_fn_c_check_func "$LINENO" "getaddrinfo" "ac_cv_func_getaddrinfo" +if test "x$ac_cv_func_getaddrinfo" = xyes; then : + $as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" getaddrinfo.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" + ;; +esac + +fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5 -$as_echo_n "checking if more special flags are required for pthreads... " >&6; } - flag=no - case ${host_os} in - aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; - osf* | hpux*) flag="-D_REENTRANT";; - solaris*) - if test "$GCC" = "yes"; then - flag="-D_REENTRANT" - else - # TODO: What about Clang on Solaris? - flag="-mt -D_REENTRANT" - fi - ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $flag" >&5 -$as_echo "$flag" >&6; } - if test "x$flag" != xno; then - PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" - fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PTHREAD_PRIO_INHERIT" >&5 -$as_echo_n "checking for PTHREAD_PRIO_INHERIT... " >&6; } -if ${ax_cv_PTHREAD_PRIO_INHERIT+:} false; then : - $as_echo_n "(cached) " >&6 else + case " $LIBOBJS " in + *" getaddrinfo.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" + ;; +esac - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -int i = PTHREAD_PRIO_INHERIT; - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_cv_PTHREAD_PRIO_INHERIT=yes -else - ax_cv_PTHREAD_PRIO_INHERIT=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_PRIO_INHERIT" >&5 -$as_echo "$ax_cv_PTHREAD_PRIO_INHERIT" >&6; } - if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"; then : +# Similarly, use system's getopt_long() only if system provides struct option. +if test x"$ac_cv_type_struct_option" = xyes ; then + ac_fn_c_check_func "$LINENO" "getopt_long" "ac_cv_func_getopt_long" +if test "x$ac_cv_func_getopt_long" = xyes; then : + $as_echo "#define HAVE_GETOPT_LONG 1" >>confdefs.h -$as_echo "#define HAVE_PTHREAD_PRIO_INHERIT 1" >>confdefs.h +else + case " $LIBOBJS " in + *" getopt_long.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" + ;; +esac fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - # More AIX lossage: compile with *_r variant - if test "x$GCC" != xyes; then - case $host_os in - aix*) - case "x/$CC" in #( - x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6) : - #handle absolute path differently from PATH based program lookup - case "x$CC" in #( - x/*) : - if as_fn_executable_p ${CC}_r; then : - PTHREAD_CC="${CC}_r" -fi ;; #( - *) : - for ac_prog in ${CC}_r -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_PTHREAD_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$PTHREAD_CC"; then - ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test. else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_PTHREAD_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS + case " $LIBOBJS " in + *" getopt_long.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" + ;; +esac fi -fi -PTHREAD_CC=$ac_cv_prog_PTHREAD_CC -if test -n "$PTHREAD_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5 -$as_echo "$PTHREAD_CC" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + +# Solaris' getopt() doesn't do what we want for long options, so always use +# our version on that platform. +if test "$PORTNAME" = "solaris"; then + case " $LIBOBJS " in + *" getopt.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt.$ac_objext" + ;; +esac + fi +# mingw has adopted a GNU-centric interpretation of optind/optreset, +# so always use our version on Windows. +if test "$PORTNAME" = "win32"; then + case " $LIBOBJS " in + *" getopt.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt.$ac_objext" + ;; +esac - test -n "$PTHREAD_CC" && break -done -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" + case " $LIBOBJS " in + *" getopt_long.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" ;; -esac ;; #( - *) : - ;; esac - ;; - esac - fi + fi -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" +# Win32 (really MinGW) support +if test "$PORTNAME" = "win32"; then + ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday" +if test "x$ac_cv_func_gettimeofday" = xyes; then : + $as_echo "#define HAVE_GETTIMEOFDAY 1" >>confdefs.h +else + case " $LIBOBJS " in + *" gettimeofday.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS gettimeofday.$ac_objext" + ;; +esac +fi + case " $LIBOBJS " in + *" dirmod.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" + ;; +esac -# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x"$ax_pthread_ok" = xyes; then + case " $LIBOBJS " in + *" kill.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS kill.$ac_objext" + ;; +esac -$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h + case " $LIBOBJS " in + *" open.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS open.$ac_objext" + ;; +esac - : -else - ax_pthread_ok=no + case " $LIBOBJS " in + *" system.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS system.$ac_objext" + ;; +esac -fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu + case " $LIBOBJS " in + *" win32env.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS win32env.$ac_objext" + ;; +esac - # set thread flags + case " $LIBOBJS " in + *" win32error.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS win32error.$ac_objext" + ;; +esac -# Some platforms use these, so just define them. They can't hurt if they -# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS -# enables 5-arg getpwuid_r, among other things. -PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" + case " $LIBOBJS " in + *" win32setlocale.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS win32setlocale.$ac_objext" + ;; +esac -# Check for *_r functions -_CFLAGS="$CFLAGS" -_LIBS="$LIBS" -CFLAGS="$CFLAGS $PTHREAD_CFLAGS" -LIBS="$LIBS $PTHREAD_LIBS" -if test "$PORTNAME" != "win32"; then -ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" -if test "x$ac_cv_header_pthread_h" = xyes; then : +$as_echo "#define HAVE_SYMLINK 1" >>confdefs.h + + ac_fn_c_check_type "$LINENO" "MINIDUMP_TYPE" "ac_cv_type_MINIDUMP_TYPE" " +#define WIN32_LEAN_AND_MEAN +#include +#include +#include +" +if test "x$ac_cv_type_MINIDUMP_TYPE" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_MINIDUMP_TYPE 1 +_ACEOF +pgac_minidump_type=yes else - as_fn_error $? " -pthread.h not found; use --disable-thread-safety to disable thread safety" "$LINENO" 5 + pgac_minidump_type=no +fi + fi +if test x"$pgac_minidump_type" = x"yes" ; then + have_win32_dbghelp=yes +else + have_win32_dbghelp=no fi -for ac_func in strerror_r getpwuid_r gethostbyname_r -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF +# Cygwin needs only a bit of that +if test "$PORTNAME" = "cygwin"; then + case " $LIBOBJS " in + *" dirmod.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" + ;; +esac fi -done - -# Do test here with the proper thread flags -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns int" >&5 -$as_echo_n "checking whether strerror_r returns int... " >&6; } -if ${pgac_cv_func_strerror_r_int+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sigsetjmp" >&5 +$as_echo_n "checking for sigsetjmp... " >&6; } +if ${pgac_cv_func_sigsetjmp+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +#include int main () { -#ifndef _AIX -int strerror_r(int, char *, size_t); -#else -/* Older AIX has 'int' for the third argument so we don't test the args. */ -int strerror_r(); -#endif +sigjmp_buf x; sigsetjmp(x, 1); ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_func_strerror_r_int=yes +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_func_sigsetjmp=yes else - pgac_cv_func_strerror_r_int=no + pgac_cv_func_sigsetjmp=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_strerror_r_int" >&5 -$as_echo "$pgac_cv_func_strerror_r_int" >&6; } -if test x"$pgac_cv_func_strerror_r_int" = xyes ; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_sigsetjmp" >&5 +$as_echo "$pgac_cv_func_sigsetjmp" >&6; } +if test x"$pgac_cv_func_sigsetjmp" = x"yes"; then -$as_echo "#define STRERROR_R_INT 1" >>confdefs.h +$as_echo "#define HAVE_SIGSETJMP 1" >>confdefs.h fi +ac_fn_c_check_decl "$LINENO" "sys_siglist" "ac_cv_have_decl_sys_siglist" "#include +/* NetBSD declares sys_siglist in unistd.h. */ +#ifdef HAVE_UNISTD_H +# include +#endif -CFLAGS="$_CFLAGS" -LIBS="$_LIBS" - +" +if test "x$ac_cv_have_decl_sys_siglist" = xyes; then : + ac_have_decl=1 else -# do not use values from template file -PTHREAD_CFLAGS= -PTHREAD_LIBS= + ac_have_decl=0 fi +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_SYS_SIGLIST $ac_have_decl +_ACEOF + + + +ac_fn_c_check_func "$LINENO" "syslog" "ac_cv_func_syslog" +if test "x$ac_cv_func_syslog" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "syslog.h" "ac_cv_header_syslog_h" "$ac_includes_default" +if test "x$ac_cv_header_syslog_h" = xyes; then : + +$as_echo "#define HAVE_SYSLOG 1" >>confdefs.h +fi +fi -# We can test for libldap_r only after we know PTHREAD_LIBS -if test "$with_ldap" = yes ; then - _LIBS="$LIBS" - if test "$PORTNAME" != "win32"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lldap" >&5 -$as_echo_n "checking for ldap_bind in -lldap... " >&6; } -if ${ac_cv_lib_ldap_ldap_bind+:} false; then : + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for opterr" >&5 +$as_echo_n "checking for opterr... " >&6; } +if ${pgac_cv_var_int_opterr+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lldap $EXTRA_LDAP_LIBS $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char ldap_bind (); +#include int main () { -return ldap_bind (); +extern int opterr; opterr = 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ldap_ldap_bind=yes + pgac_cv_var_int_opterr=yes else - ac_cv_lib_ldap_ldap_bind=no + pgac_cv_var_int_opterr=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_ldap_bind" >&5 -$as_echo "$ac_cv_lib_ldap_ldap_bind" >&6; } -if test "x$ac_cv_lib_ldap_ldap_bind" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBLDAP 1 -_ACEOF +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_opterr" >&5 +$as_echo "$pgac_cv_var_int_opterr" >&6; } +if test x"$pgac_cv_var_int_opterr" = x"yes"; then - LIBS="-lldap $LIBS" +$as_echo "#define HAVE_INT_OPTERR 1" >>confdefs.h -else - as_fn_error $? "library 'ldap' is required for LDAP" "$LINENO" 5 fi - LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" - if test "$enable_thread_safety" = yes; then - # on some platforms ldap_r fails to link without PTHREAD_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_simple_bind in -lldap_r" >&5 -$as_echo_n "checking for ldap_simple_bind in -lldap_r... " >&6; } -if ${ac_cv_lib_ldap_r_ldap_simple_bind+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for optreset" >&5 +$as_echo_n "checking for optreset... " >&6; } +if ${pgac_cv_var_int_optreset+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lldap_r $PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char ldap_simple_bind (); +#include int main () { -return ldap_simple_bind (); +extern int optreset; optreset = 1; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ldap_r_ldap_simple_bind=yes + pgac_cv_var_int_optreset=yes else - ac_cv_lib_ldap_r_ldap_simple_bind=no + pgac_cv_var_int_optreset=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_r_ldap_simple_bind" >&5 -$as_echo "$ac_cv_lib_ldap_r_ldap_simple_bind" >&6; } -if test "x$ac_cv_lib_ldap_r_ldap_simple_bind" = xyes; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_optreset" >&5 +$as_echo "$pgac_cv_var_int_optreset" >&6; } +if test x"$pgac_cv_var_int_optreset" = x"yes"; then + +$as_echo "#define HAVE_INT_OPTRESET 1" >>confdefs.h + +fi + +for ac_func in strtoll strtoq +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF -#define HAVE_LIBLDAP_R 1 +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 _ACEOF + break +fi +done - LIBS="-lldap_r $LIBS" - -else - as_fn_error $? "library 'ldap_r' is required for LDAP" "$LINENO" 5 +for ac_func in strtoull strtouq +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + break fi +done - LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" - else - LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lwldap32" >&5 -$as_echo_n "checking for ldap_bind in -lwldap32... " >&6; } -if ${ac_cv_lib_wldap32_ldap_bind+:} false; then : + +# Lastly, restore full LIBS list and check for readline/libedit symbols +LIBS="$LIBS_including_readline" + +if test "$with_readline" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_completion_append_character" >&5 +$as_echo_n "checking for rl_completion_append_character... " >&6; } +if ${pgac_cv_var_rl_completion_append_character+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lwldap32 $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" +#include +#ifdef HAVE_READLINE_READLINE_H +# include +#elif defined(HAVE_READLINE_H) +# include #endif -char ldap_bind (); + int main () { -return ldap_bind (); +rl_completion_append_character = 'x'; ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_wldap32_ldap_bind=yes + pgac_cv_var_rl_completion_append_character=yes else - ac_cv_lib_wldap32_ldap_bind=no + pgac_cv_var_rl_completion_append_character=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_wldap32_ldap_bind" >&5 -$as_echo "$ac_cv_lib_wldap32_ldap_bind" >&6; } -if test "x$ac_cv_lib_wldap32_ldap_bind" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBWLDAP32 1 -_ACEOF +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_rl_completion_append_character" >&5 +$as_echo "$pgac_cv_var_rl_completion_append_character" >&6; } +if test x"$pgac_cv_var_rl_completion_append_character" = x"yes"; then - LIBS="-lwldap32 $LIBS" +$as_echo "#define HAVE_RL_COMPLETION_APPEND_CHARACTER 1" >>confdefs.h -else - as_fn_error $? "library 'wldap32' is required for LDAP" "$LINENO" 5 fi + for ac_func in rl_completion_matches rl_filename_completion_function +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF - LDAP_LIBS_FE="-lwldap32" - LDAP_LIBS_BE="-lwldap32" - fi - LIBS="$_LIBS" fi +done + + for ac_func in append_history history_truncate_file +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF +fi +done +fi # This test makes sure that run tests work at all. Sometimes a shared diff --git a/configure.in b/configure.in index 9f2db8169b4fe..b43c8ec0165ca 100644 --- a/configure.in +++ b/configure.in @@ -947,6 +947,54 @@ program to use during the build.]) fi fi +# +# Pthreads +# +# For each platform, we need to know about any special compile and link +# libraries, and whether the normal C function names are thread-safe. +# See the comment at the top of src/port/thread.c for more information. +# WIN32 doesn't need the pthread tests; it always uses threads +# +# These tests are run before the library-tests, because linking with the +# other libraries can pull in the pthread functions as a side-effect. We +# want to use the -pthread or similar flags directly, and not rely on +# the side-effects of linking with some other library. +if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then +AX_PTHREAD # set thread flags + +# Some platforms use these, so just define them. They can't hurt if they +# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS +# enables 5-arg getpwuid_r, among other things. +PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" + +# Check for *_r functions +_CFLAGS="$CFLAGS" +_LIBS="$LIBS" +CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +LIBS="$LIBS $PTHREAD_LIBS" + +if test "$PORTNAME" != "win32"; then +AC_CHECK_HEADER(pthread.h, [], [AC_MSG_ERROR([ +pthread.h not found; use --disable-thread-safety to disable thread safety])]) +fi + +AC_CHECK_FUNCS([strerror_r getpwuid_r gethostbyname_r]) + +# Do test here with the proper thread flags +PGAC_FUNC_STRERROR_R_INT + +CFLAGS="$_CFLAGS" +LIBS="$_LIBS" + +else +# do not use values from template file +PTHREAD_CFLAGS= +PTHREAD_LIBS= +fi + +AC_SUBST(PTHREAD_CFLAGS) +AC_SUBST(PTHREAD_LIBS) + ## ## Libraries @@ -1047,6 +1095,33 @@ if test "$with_libxslt" = yes ; then AC_CHECK_LIB(xslt, xsltCleanupGlobals, [], [AC_MSG_ERROR([library 'xslt' is required for XSLT support])]) fi +# Note: We can test for libldap_r only after we know PTHREAD_LIBS +if test "$with_ldap" = yes ; then + _LIBS="$LIBS" + if test "$PORTNAME" != "win32"; then + AC_CHECK_LIB(ldap, ldap_bind, [], + [AC_MSG_ERROR([library 'ldap' is required for LDAP])], + [$EXTRA_LDAP_LIBS]) + LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" + if test "$enable_thread_safety" = yes; then + # on some platforms ldap_r fails to link without PTHREAD_LIBS + AC_CHECK_LIB(ldap_r, ldap_simple_bind, [], + [AC_MSG_ERROR([library 'ldap_r' is required for LDAP])], + [$PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS]) + LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" + else + LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" + fi + else + AC_CHECK_LIB(wldap32, ldap_bind, [], [AC_MSG_ERROR([library 'wldap32' is required for LDAP])]) + LDAP_LIBS_FE="-lwldap32" + LDAP_LIBS_BE="-lwldap32" + fi + LIBS="$_LIBS" +fi +AC_SUBST(LDAP_LIBS_FE) +AC_SUBST(LDAP_LIBS_BE) + # for contrib/sepgsql if test "$with_selinux" = yes; then AC_CHECK_LIB(selinux, security_compute_create_name, [], @@ -1563,78 +1638,6 @@ if test "$with_readline" = yes; then fi -# -# Pthreads -# -# For each platform, we need to know about any special compile and link -# libraries, and whether the normal C function names are thread-safe. -# See the comment at the top of src/port/thread.c for more information. -# WIN32 doesn't need the pthread tests; it always uses threads -if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then -AX_PTHREAD # set thread flags - -# Some platforms use these, so just define them. They can't hurt if they -# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS -# enables 5-arg getpwuid_r, among other things. -PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" - -# Check for *_r functions -_CFLAGS="$CFLAGS" -_LIBS="$LIBS" -CFLAGS="$CFLAGS $PTHREAD_CFLAGS" -LIBS="$LIBS $PTHREAD_LIBS" - -if test "$PORTNAME" != "win32"; then -AC_CHECK_HEADER(pthread.h, [], [AC_MSG_ERROR([ -pthread.h not found; use --disable-thread-safety to disable thread safety])]) -fi - -AC_CHECK_FUNCS([strerror_r getpwuid_r gethostbyname_r]) - -# Do test here with the proper thread flags -PGAC_FUNC_STRERROR_R_INT - -CFLAGS="$_CFLAGS" -LIBS="$_LIBS" - -else -# do not use values from template file -PTHREAD_CFLAGS= -PTHREAD_LIBS= -fi - -AC_SUBST(PTHREAD_CFLAGS) -AC_SUBST(PTHREAD_LIBS) - - -# We can test for libldap_r only after we know PTHREAD_LIBS -if test "$with_ldap" = yes ; then - _LIBS="$LIBS" - if test "$PORTNAME" != "win32"; then - AC_CHECK_LIB(ldap, ldap_bind, [], - [AC_MSG_ERROR([library 'ldap' is required for LDAP])], - [$EXTRA_LDAP_LIBS]) - LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" - if test "$enable_thread_safety" = yes; then - # on some platforms ldap_r fails to link without PTHREAD_LIBS - AC_CHECK_LIB(ldap_r, ldap_simple_bind, [], - [AC_MSG_ERROR([library 'ldap_r' is required for LDAP])], - [$PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS]) - LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" - else - LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" - fi - else - AC_CHECK_LIB(wldap32, ldap_bind, [], [AC_MSG_ERROR([library 'wldap32' is required for LDAP])]) - LDAP_LIBS_FE="-lwldap32" - LDAP_LIBS_BE="-lwldap32" - fi - LIBS="$_LIBS" -fi -AC_SUBST(LDAP_LIBS_FE) -AC_SUBST(LDAP_LIBS_BE) - - # This test makes sure that run tests work at all. Sometimes a shared # library is found by the linker, but the runtime linker can't find it. # This check should come after all modifications of compiler or linker From fb990ce6c7d99d329843e5d70d4cdaf8d0825b38 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 8 Jul 2015 20:44:21 -0400 Subject: [PATCH 030/442] Fix null pointer dereference in "\c" psql command. The psql crash happened when no current connection existed. (The second new check is optional given today's undocumented NULL argument handling in PQhost() etc.) Back-patch to 9.0 (all supported versions). --- src/bin/psql/command.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/bin/psql/command.c b/src/bin/psql/command.c index 38253fa09886e..467e34b1ffcd8 100644 --- a/src/bin/psql/command.c +++ b/src/bin/psql/command.c @@ -1641,7 +1641,8 @@ do_connect(char *dbname, char *user, char *host, char *port) * positional syntax. */ keep_password = - ((strcmp(user, PQuser(o_conn)) == 0) && + (o_conn && + (strcmp(user, PQuser(o_conn)) == 0) && (!host || strcmp(host, PQhost(o_conn)) == 0) && (strcmp(port, PQport(o_conn)) == 0) && !has_connection_string); @@ -1768,7 +1769,8 @@ do_connect(char *dbname, char *user, char *host, char *port) /* Tell the user about the new connection */ if (!pset.quiet) { - if (param_is_newly_set(PQhost(o_conn), PQhost(pset.db)) || + if (!o_conn || + param_is_newly_set(PQhost(o_conn), PQhost(pset.db)) || param_is_newly_set(PQport(o_conn), PQport(pset.db))) { char *host = PQhost(pset.db); From 8ed6e70ace8e8d2f0747c16a796a21147ffaf404 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 8 Jul 2015 20:44:21 -0400 Subject: [PATCH 031/442] Replace use of "diff -q". POSIX does not specify the -q option, and many implementations do not offer it. Don't bother changing the MSVC build system, because having non-GNU diff on Windows is vanishingly unlikely. Back-patch to 9.2, where this invocation was introduced. --- src/bin/pg_upgrade/test.sh | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/bin/pg_upgrade/test.sh b/src/bin/pg_upgrade/test.sh index 07002f6a1659a..f4e5d9ae6e920 100644 --- a/src/bin/pg_upgrade/test.sh +++ b/src/bin/pg_upgrade/test.sh @@ -216,10 +216,11 @@ case $testhost in *) sh ./delete_old_cluster.sh ;; esac -if diff -q "$temp_root"/dump1.sql "$temp_root"/dump2.sql; then +if diff "$temp_root"/dump1.sql "$temp_root"/dump2.sql >/dev/null; then echo PASSED exit 0 else + echo "Files $temp_root/dump1.sql and $temp_root/dump2.sql differ" echo "dumps were not identical" exit 1 fi From aaf15ee33a63c582fbb61b67befdd620e85da2ce Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 8 Jul 2015 20:44:21 -0400 Subject: [PATCH 032/442] Revoke support for strxfrm() that write past the specified array length. This formalizes a decision implicit in commit 4ea51cdfe85ceef8afabceb03c446574daa0ac23 and adds clean detection of affected systems. Vendor updates are available for each such known bug. Back-patch to 9.5, where the aforementioned commit first appeared. --- src/backend/main/main.c | 2 ++ src/backend/utils/adt/pg_locale.c | 58 +++++++++++++++++++++++++++++++ src/backend/utils/adt/selfuncs.c | 17 ++++----- src/backend/utils/init/postinit.c | 2 ++ src/include/utils/pg_locale.h | 1 + 5 files changed, 70 insertions(+), 10 deletions(-) diff --git a/src/backend/main/main.c b/src/backend/main/main.c index 2ecadd660cfda..4fad6f3dc5545 100644 --- a/src/backend/main/main.c +++ b/src/backend/main/main.c @@ -149,6 +149,8 @@ main(int argc, char *argv[]) */ unsetenv("LC_ALL"); + check_strxfrm_bug(); + /* * Catch standard options before doing much else, in particular before we * insist on not being root. diff --git a/src/backend/utils/adt/pg_locale.c b/src/backend/utils/adt/pg_locale.c index 84215e07a772a..d91959ea7f840 100644 --- a/src/backend/utils/adt/pg_locale.c +++ b/src/backend/utils/adt/pg_locale.c @@ -854,6 +854,64 @@ IsoLocaleName(const char *winlocname) #endif /* WIN32 && LC_MESSAGES */ +/* + * Detect aging strxfrm() implementations that, in a subset of locales, write + * past the specified buffer length. Affected users must update OS packages + * before using PostgreSQL 9.5 or later. + * + * Assume that the bug can come and go from one postmaster startup to another + * due to physical replication among diverse machines. Assume that the bug's + * presence will not change during the life of a particular postmaster. Given + * those assumptions, call this no less than once per postmaster startup per + * LC_COLLATE setting used. No known-affected system offers strxfrm_l(), so + * there is no need to consider pg_collation locales. + */ +void +check_strxfrm_bug(void) +{ + char buf[32]; + const int canary = 0x7F; + bool ok = true; + + /* + * Given a two-byte ASCII string and length limit 7, 8 or 9, Solaris 10 + * 05/08 returns 18 and modifies 10 bytes. It respects limits above or + * below that range. + * + * The bug is present in Solaris 8 as well; it is absent in Solaris 10 + * 01/13 and Solaris 11.2. Affected locales include is_IS.ISO8859-1, + * en_US.UTF-8, en_US.ISO8859-1, and ru_RU.KOI8-R. Unaffected locales + * include de_DE.UTF-8, de_DE.ISO8859-1, zh_TW.UTF-8, and C. + */ + buf[7] = canary; + (void) strxfrm(buf, "ab", 7); + if (buf[7] != canary) + ok = false; + + /* + * illumos bug #1594 was present in the source tree from 2010-10-11 to + * 2012-02-01. Given an ASCII string of any length and length limit 1, + * affected systems ignore the length limit and modify a number of bytes + * one less than the return value. The problem inputs for this bug do not + * overlap those for the Solaris bug, hence a distinct test. + * + * Affected systems include smartos-20110926T021612Z. Affected locales + * include en_US.ISO8859-1 and en_US.UTF-8. Unaffected locales include C. + */ + buf[1] = canary; + (void) strxfrm(buf, "a", 1); + if (buf[1] != canary) + ok = false; + + if (!ok) + ereport(ERROR, + (errcode(ERRCODE_SYSTEM_ERROR), + errmsg_internal("strxfrm(), in locale \"%s\", writes past the specified array length", + setlocale(LC_COLLATE, NULL)), + errhint("Apply system library package updates."))); +} + + /* * Cache mechanism for collation information. * diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 04ed07b762df6..64b6ae4838f24 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -3932,16 +3932,8 @@ convert_string_datum(Datum value, Oid typid) size_t xfrmlen2 PG_USED_FOR_ASSERTS_ONLY; /* - * Note: originally we guessed at a suitable output buffer size, and - * only needed to call strxfrm twice if our guess was too small. - * However, it seems that some versions of Solaris have buggy strxfrm - * that can write past the specified buffer length in that scenario. - * So, do it the dumb way for portability. - * - * Yet other systems (e.g., glibc) sometimes return a smaller value - * from the second call than the first; thus the Assert must be <= not - * == as you'd expect. Can't any of these people program their way - * out of a paper bag? + * XXX: We could guess at a suitable output buffer size and only call + * strxfrm twice if our guess is too small. * * XXX: strxfrm doesn't support UTF-8 encoding on Win32, it can return * bogus data or set an error. This is not really a problem unless it @@ -3974,6 +3966,11 @@ convert_string_datum(Datum value, Oid typid) #endif xfrmstr = (char *) palloc(xfrmlen + 1); xfrmlen2 = strxfrm(xfrmstr, val, xfrmlen + 1); + + /* + * Some systems (e.g., glibc) can return a smaller value from the + * second call than the first; thus the Assert must be <= not ==. + */ Assert(xfrmlen2 <= xfrmlen); pfree(val); val = xfrmstr; diff --git a/src/backend/utils/init/postinit.c b/src/backend/utils/init/postinit.c index 063b0653b4941..c172d3a30e96c 100644 --- a/src/backend/utils/init/postinit.c +++ b/src/backend/utils/init/postinit.c @@ -378,6 +378,8 @@ CheckMyDatabase(const char *name, bool am_superuser) SetConfigOption("lc_collate", collate, PGC_INTERNAL, PGC_S_OVERRIDE); SetConfigOption("lc_ctype", ctype, PGC_INTERNAL, PGC_S_OVERRIDE); + check_strxfrm_bug(); + ReleaseSysCache(tup); } diff --git a/src/include/utils/pg_locale.h b/src/include/utils/pg_locale.h index 3b5613852bf8c..8e91033a41dd6 100644 --- a/src/include/utils/pg_locale.h +++ b/src/include/utils/pg_locale.h @@ -44,6 +44,7 @@ extern void assign_locale_time(const char *newval, void *extra); extern bool check_locale(int category, const char *locale, char **canonname); extern char *pg_perm_setlocale(int category, const char *locale); +extern void check_strxfrm_bug(void); extern bool lc_collate_is_c(Oid collation); extern bool lc_ctype_is_c(Oid collation); From abf5190c07a7e4de2b10b01dc38723aaa28339f6 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 8 Jul 2015 20:44:21 -0400 Subject: [PATCH 033/442] Finish generic-xlc.h draft atomics implementation. Back-patch to 9.5, where commit b64d92f1a5602c55ee8b27a7ac474f03b7aee340 introduced this file. --- src/include/port/atomics/generic-xlc.h | 26 ++++++-------------------- 1 file changed, 6 insertions(+), 20 deletions(-) diff --git a/src/include/port/atomics/generic-xlc.h b/src/include/port/atomics/generic-xlc.h index 1c743f2bc8060..0ad9168ed2849 100644 --- a/src/include/port/atomics/generic-xlc.h +++ b/src/include/port/atomics/generic-xlc.h @@ -18,8 +18,6 @@ #if defined(HAVE_ATOMICS) -#include - #define PG_HAVE_ATOMIC_U32_SUPPORT typedef struct pg_atomic_uint32 { @@ -48,9 +46,6 @@ static inline bool pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, uint32 *expected, uint32 newval) { - bool ret; - uint64 current; - /* * xlc's documentation tells us: * "If __compare_and_swap is used as a locking primitive, insert a call to @@ -62,18 +57,15 @@ pg_atomic_compare_exchange_u32_impl(volatile pg_atomic_uint32 *ptr, * XXX: __compare_and_swap is defined to take signed parameters, but that * shouldn't matter since we don't perform any arithmetic operations. */ - current = (uint32)__compare_and_swap((volatile int*)ptr->value, - (int)*expected, (int)newval); - ret = current == *expected; - *expected = current; - return ret; + return __compare_and_swap((volatile int*)&ptr->value, + (int *)expected, (int)newval); } #define PG_HAVE_ATOMIC_FETCH_ADD_U32 static inline uint32 pg_atomic_fetch_add_u32_impl(volatile pg_atomic_uint32 *ptr, int32 add_) { - return __fetch_and_add(&ptr->value, add_); + return __fetch_and_add((volatile int *)&ptr->value, add_); } #ifdef PG_HAVE_ATOMIC_U64_SUPPORT @@ -83,23 +75,17 @@ static inline bool pg_atomic_compare_exchange_u64_impl(volatile pg_atomic_uint64 *ptr, uint64 *expected, uint64 newval) { - bool ret; - uint64 current; - __isync(); - current = (uint64)__compare_and_swaplp((volatile long*)ptr->value, - (long)*expected, (long)newval); - ret = current == *expected; - *expected = current; - return ret; + return __compare_and_swaplp((volatile long*)&ptr->value, + (long *)expected, (long)newval);; } #define PG_HAVE_ATOMIC_FETCH_ADD_U64 static inline uint64 pg_atomic_fetch_add_u64_impl(volatile pg_atomic_uint64 *ptr, int64 add_) { - return __fetch_and_addlp(&ptr->value, add_); + return __fetch_and_addlp((volatile long *)&ptr->value, add_); } #endif /* PG_HAVE_ATOMIC_U64_SUPPORT */ From c0d7342f1650b6fdefc865c6da33e1f092778af0 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 8 Jul 2015 20:44:21 -0400 Subject: [PATCH 034/442] Given a gcc-compatible xlc compiler, prefer xlc-style atomics. This evades a ppc64le "IBM XL C/C++ for Linux" compiler bug. Back-patch to 9.5, where the atomics facility was introduced. --- src/include/port/atomics.h | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/include/port/atomics.h b/src/include/port/atomics.h index 1a4c748cb9717..97a00641119d0 100644 --- a/src/include/port/atomics.h +++ b/src/include/port/atomics.h @@ -81,8 +81,15 @@ * * pg_atomic_compare_exchange_u32(), pg_atomic_fetch_add_u32() * using compiler intrinsics are a good idea. */ +/* + * Given a gcc-compatible xlc compiler, prefer the xlc implementation. The + * ppc64le "IBM XL C/C++ for Linux, V13.1.2" implements both interfaces, but + * __sync_lock_test_and_set() of one-byte types elicits SIGSEGV. + */ +#if defined(__IBMC__) || defined(__IBMCPP__) +#include "port/atomics/generic-xlc.h" /* gcc or compatible, including clang and icc */ -#if defined(__GNUC__) || defined(__INTEL_COMPILER) +#elif defined(__GNUC__) || defined(__INTEL_COMPILER) #include "port/atomics/generic-gcc.h" #elif defined(WIN32_ONLY_COMPILER) #include "port/atomics/generic-msvc.h" @@ -90,8 +97,6 @@ #include "port/atomics/generic-acc.h" #elif defined(__SUNPRO_C) && !defined(__GNUC__) #include "port/atomics/generic-sunpro.h" -#elif (defined(__IBMC__) || defined(__IBMCPP__)) && !defined(__GNUC__) -#include "port/atomics/generic-xlc.h" #else /* * Unsupported compiler, we'll likely use slower fallbacks... At least From c1fb42127944d5613df2f3d330c19448fc10ed01 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 8 Jul 2015 20:44:22 -0400 Subject: [PATCH 035/442] Link pg_stat_statements with libm. The AIX 7.1 libm is static, and AIX postgres executables do not export symbols acquired from libraries. Back-patch to 9.5, where commit cfe12763c32437bc708a64ce88a90c7544f16185 added a sqrt() call. --- contrib/pg_stat_statements/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/pg_stat_statements/Makefile b/contrib/pg_stat_statements/Makefile index 975a637897cdf..835ec82fc6056 100644 --- a/contrib/pg_stat_statements/Makefile +++ b/contrib/pg_stat_statements/Makefile @@ -9,6 +9,8 @@ DATA = pg_stat_statements--1.3.sql pg_stat_statements--1.2--1.3.sql \ pg_stat_statements--unpackaged--1.0.sql PGFILEDESC = "pg_stat_statements - execution statistics of SQL statements" +LDFLAGS_SL += $(filter -lm, $(LIBS)) + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) From 7f06c7082a34f4ea564e31ee01114784a788b9fa Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 9 Jul 2015 10:58:24 +0300 Subject: [PATCH 036/442] Revert changes to pthread configure tests on REL9_5_STABLE. Some buildfarm animals are still unhappy. These changes are becoming too invasive for backpatch, for little benefit. This reverts commits 080c4dab3d9575449b81604051b160597cfd55c3 and ce0da6261004ac15f01c21d8b94f11af7a098243. --- aclocal.m4 | 2 +- config/acx_pthread.m4 | 171 ++ config/ax_pthread.m4 | 332 ---- configure | 3886 ++++++++++++++++++++--------------------- configure.in | 147 +- 5 files changed, 2092 insertions(+), 2446 deletions(-) create mode 100644 config/acx_pthread.m4 delete mode 100644 config/ax_pthread.m4 diff --git a/aclocal.m4 b/aclocal.m4 index 6f930b6fc1be2..eaf98007e5b19 100644 --- a/aclocal.m4 +++ b/aclocal.m4 @@ -1,6 +1,6 @@ dnl aclocal.m4 m4_include([config/ac_func_accept_argtypes.m4]) -m4_include([config/ax_pthread.m4]) +m4_include([config/acx_pthread.m4]) m4_include([config/c-compiler.m4]) m4_include([config/c-library.m4]) m4_include([config/docbook.m4]) diff --git a/config/acx_pthread.m4 b/config/acx_pthread.m4 new file mode 100644 index 0000000000000..581164b1e559e --- /dev/null +++ b/config/acx_pthread.m4 @@ -0,0 +1,171 @@ +dnl This is based on an old macro from the GNU Autoconf Macro Archive at: +dnl http://www.gnu.org/software/ac-archive/htmldoc/acx_pthread.html +dnl but it's been rather heavily hacked --- beware of blindly dropping in +dnl upstream changes! +dnl +AC_DEFUN([ACX_PTHREAD], [ +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_LANG_SAVE +AC_LANG_C +acx_pthread_ok=no + +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. + +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) + AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes) + AC_MSG_RESULT($acx_pthread_ok) + if test x"$acx_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" +fi + +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). + +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. + +acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config pthreadGC2" + +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: + +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) + +case "${host_cpu}-${host_os}" in + *solaris*) + + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthread or + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + acx_pthread_flags="-pthread -pthreads pthread -mt $acx_pthread_flags" + ;; +esac + +if test x"$acx_pthread_ok" = xno; then +for flag in $acx_pthread_flags; do + + tryPTHREAD_CFLAGS="" + tryPTHREAD_LIBS="" + case $flag in + none) + AC_MSG_CHECKING([whether pthreads work without any flags]) + ;; + + -*) + AC_MSG_CHECKING([whether pthreads work with $flag]) + tryPTHREAD_CFLAGS="$flag" + ;; + + pthread-config) + # skip this if we already have flags defined, for PostgreSQL + if test x"$PTHREAD_CFLAGS" != x -o x"$PTHREAD_LIBS" != x; then continue; fi + AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no) + if test x"$acx_pthread_config" = xno; then continue; fi + tryPTHREAD_CFLAGS="`pthread-config --cflags`" + tryPTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; + + *) + AC_MSG_CHECKING([for the pthreads library -l$flag]) + tryPTHREAD_LIBS="-l$flag" + ;; + esac + + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$tryPTHREAD_LIBS $PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" + + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + AC_TRY_LINK([#include ], + [pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], + [acx_pthread_ok=yes], [acx_pthread_ok=no]) + + if test "x$acx_pthread_ok" = xyes; then + # Don't use options that are ignored by the compiler. + # We find them by checking stderror. + cat >conftest.$ac_ext <<_ACEOF +int +main (int argc, char **argv) +{ + (void) argc; + (void) argv; + return 0; +} +_ACEOF + rm -f conftest.$ac_objext conftest$ac_exeext + # Check both linking and compiling, because they might tolerate different options. + if test "`(eval $ac_link 2>&1 1>&5)`" = "" && test "`(eval $ac_compile 2>&1 1>&5)`" = ""; then + # The original macro breaks out of the loop at this point, + # but we continue trying flags because Linux needs -lpthread + # too to build libpq successfully. The test above only + # tests for building binaries, not shared libraries. + PTHREAD_LIBS=" $tryPTHREAD_LIBS $PTHREAD_LIBS" + PTHREAD_CFLAGS="$PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" + else acx_pthread_ok=no + fi + fi + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + AC_MSG_RESULT($acx_pthread_ok) +done +fi + +# The original macro has a bunch of other tests here, which we have removed +# because (a) Postgres doesn't need them, and (b) $acx_pthread_ok is not +# meaningful at this point. + +AC_SUBST(PTHREAD_LIBS) +AC_SUBST(PTHREAD_CFLAGS) + +AC_LANG_RESTORE +])dnl ACX_PTHREAD diff --git a/config/ax_pthread.m4 b/config/ax_pthread.m4 deleted file mode 100644 index d383ad5c6d6a5..0000000000000 --- a/config/ax_pthread.m4 +++ /dev/null @@ -1,332 +0,0 @@ -# =========================================================================== -# http://www.gnu.org/software/autoconf-archive/ax_pthread.html -# =========================================================================== -# -# SYNOPSIS -# -# AX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) -# -# DESCRIPTION -# -# This macro figures out how to build C programs using POSIX threads. It -# sets the PTHREAD_LIBS output variable to the threads library and linker -# flags, and the PTHREAD_CFLAGS output variable to any special C compiler -# flags that are needed. (The user can also force certain compiler -# flags/libs to be tested by setting these environment variables.) -# -# Also sets PTHREAD_CC to any special C compiler that is needed for -# multi-threaded programs (defaults to the value of CC otherwise). (This -# is necessary on AIX to use the special cc_r compiler alias.) -# -# NOTE: You are assumed to not only compile your program with these flags, -# but also link it with them as well. e.g. you should link with -# $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS $LIBS -# -# If you are only building threads programs, you may wish to use these -# variables in your default LIBS, CFLAGS, and CC: -# -# LIBS="$PTHREAD_LIBS $LIBS" -# CFLAGS="$CFLAGS $PTHREAD_CFLAGS" -# CC="$PTHREAD_CC" -# -# In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute constant -# has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to that name -# (e.g. PTHREAD_CREATE_UNDETACHED on AIX). -# -# Also HAVE_PTHREAD_PRIO_INHERIT is defined if pthread is found and the -# PTHREAD_PRIO_INHERIT symbol is defined when compiling with -# PTHREAD_CFLAGS. -# -# ACTION-IF-FOUND is a list of shell commands to run if a threads library -# is found, and ACTION-IF-NOT-FOUND is a list of commands to run it if it -# is not found. If ACTION-IF-FOUND is not specified, the default action -# will define HAVE_PTHREAD. -# -# Please let the authors know if this macro fails on any platform, or if -# you have any other suggestions or comments. This macro was based on work -# by SGJ on autoconf scripts for FFTW (http://www.fftw.org/) (with help -# from M. Frigo), as well as ac_pthread and hb_pthread macros posted by -# Alejandro Forero Cuervo to the autoconf macro repository. We are also -# grateful for the helpful feedback of numerous users. -# -# Updated for Autoconf 2.68 by Daniel Richard G. -# -# LICENSE -# -# Copyright (c) 2008 Steven G. Johnson -# Copyright (c) 2011 Daniel Richard G. -# -# This program is free software: you can redistribute it and/or modify it -# under the terms of the GNU General Public License as published by the -# Free Software Foundation, either version 3 of the License, or (at your -# option) any later version. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General -# Public License for more details. -# -# You should have received a copy of the GNU General Public License along -# with this program. If not, see . -# -# As a special exception, the respective Autoconf Macro's copyright owner -# gives unlimited permission to copy, distribute and modify the configure -# scripts that are the output of Autoconf when processing the Macro. You -# need not follow the terms of the GNU General Public License when using -# or distributing such scripts, even though portions of the text of the -# Macro appear in them. The GNU General Public License (GPL) does govern -# all other use of the material that constitutes the Autoconf Macro. -# -# This special exception to the GPL applies to versions of the Autoconf -# Macro released by the Autoconf Archive. When you make and distribute a -# modified version of the Autoconf Macro, you may extend this special -# exception to the GPL to apply to your modified version as well. - -#serial 21 - -AU_ALIAS([ACX_PTHREAD], [AX_PTHREAD]) -AC_DEFUN([AX_PTHREAD], [ -AC_REQUIRE([AC_CANONICAL_HOST]) -AC_LANG_PUSH([C]) -ax_pthread_ok=no - -# We used to check for pthread.h first, but this fails if pthread.h -# requires special compiler flags (e.g. on True64 or Sequent). -# It gets checked for in the link test anyway. - -# First of all, check if the user has set any of the PTHREAD_LIBS, -# etcetera environment variables, and if threads linking works using -# them: -if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) - AC_TRY_LINK_FUNC([pthread_join], [ax_pthread_ok=yes]) - AC_MSG_RESULT([$ax_pthread_ok]) - if test x"$ax_pthread_ok" = xno; then - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" - fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" -fi - -# We must check for the threads library under a number of different -# names; the ordering is very important because some systems -# (e.g. DEC) have both -lpthread and -lpthreads, where one of the -# libraries is broken (non-POSIX). - -# Create a list of thread flags to try. Items starting with a "-" are -# C compiler flags, and other items are library names, except for "none" -# which indicates that we try without any flags at all, and "pthread-config" -# which is a program returning the flags for the Pth emulation library. - -ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" - -# The ordering *is* (sometimes) important. Some notes on the -# individual items follow: - -# pthreads: AIX (must check this before -lpthread) -# none: in case threads are in libc; should be tried before -Kthread and -# other compiler flags to prevent continual compiler warnings -# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) -# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) -# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) -# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) -# -pthreads: Solaris/gcc -# -mthreads: Mingw32/gcc, Lynx/gcc -# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it -# doesn't hurt to check since this sometimes defines pthreads too; -# also defines -D_REENTRANT) -# ... -mt is also the pthreads flag for HP/aCC -# pthread: Linux, etcetera -# --thread-safe: KAI C++ -# pthread-config: use pthread-config program (for GNU Pth library) - -case ${host_os} in - solaris*) - - # On Solaris (at least, for some versions), libc contains stubbed - # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthreads/-mt/ - # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather - # a function called by this macro, so we could check for that, but - # who knows whether they'll stub that too in a future libc.) So, - # we'll just look for -pthreads and -lpthread first: - - ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" - ;; - - darwin*) - ax_pthread_flags="-pthread $ax_pthread_flags" - ;; -esac - -# Clang doesn't consider unrecognized options an error unless we specify -# -Werror. We throw in some extra Clang-specific options to ensure that -# this doesn't happen for GCC, which also accepts -Werror. - -AC_MSG_CHECKING([if compiler needs -Werror to reject unknown flags]) -save_CFLAGS="$CFLAGS" -ax_pthread_extra_flags="-Werror" -CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" -AC_COMPILE_IFELSE([AC_LANG_PROGRAM([int foo(void);],[foo()])], - [AC_MSG_RESULT([yes])], - [ax_pthread_extra_flags= - AC_MSG_RESULT([no])]) -CFLAGS="$save_CFLAGS" - -if test x"$ax_pthread_ok" = xno; then -for flag in $ax_pthread_flags; do - - case $flag in - none) - AC_MSG_CHECKING([whether pthreads work without any flags]) - ;; - - -*) - AC_MSG_CHECKING([whether pthreads work with $flag]) - PTHREAD_CFLAGS="$flag" - ;; - - pthread-config) - AC_CHECK_PROG([ax_pthread_config], [pthread-config], [yes], [no]) - if test x"$ax_pthread_config" = xno; then continue; fi - PTHREAD_CFLAGS="`pthread-config --cflags`" - PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" - ;; - - *) - AC_MSG_CHECKING([for the pthreads library -l$flag]) - PTHREAD_LIBS="-l$flag" - ;; - esac - - save_LIBS="$LIBS" - save_CFLAGS="$CFLAGS" - LIBS="$PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" - - # Check for various functions. We must include pthread.h, - # since some functions may be macros. (On the Sequent, we - # need a special flag -Kthread to make this header compile.) - # We check for pthread_join because it is in -lpthread on IRIX - # while pthread_create is in libc. We check for pthread_attr_init - # due to DEC craziness with -lpthreads. We check for - # pthread_cleanup_push because it is one of the few pthread - # functions on Solaris that doesn't have a non-functional libc stub. - # We try pthread_create on general principles. - AC_LINK_IFELSE([AC_LANG_PROGRAM([#include - static void routine(void *a) { a = 0; } - static void *start_routine(void *a) { return a; }], - [pthread_t th; pthread_attr_t attr; - pthread_create(&th, 0, start_routine, 0); - pthread_join(th, 0); - pthread_attr_init(&attr); - pthread_cleanup_push(routine, 0); - pthread_cleanup_pop(0) /* ; */])], - [ax_pthread_ok=yes], - []) - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - AC_MSG_RESULT([$ax_pthread_ok]) - if test "x$ax_pthread_ok" = xyes; then - break; - fi - - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" -done -fi - -# Various other checks: -if test "x$ax_pthread_ok" = xyes; then - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - - # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. - AC_MSG_CHECKING([for joinable pthread attribute]) - attr_name=unknown - for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do - AC_LINK_IFELSE([AC_LANG_PROGRAM([#include ], - [int attr = $attr; return attr /* ; */])], - [attr_name=$attr; break], - []) - done - AC_MSG_RESULT([$attr_name]) - if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then - AC_DEFINE_UNQUOTED([PTHREAD_CREATE_JOINABLE], [$attr_name], - [Define to necessary symbol if this constant - uses a non-standard name on your system.]) - fi - - AC_MSG_CHECKING([if more special flags are required for pthreads]) - flag=no - case ${host_os} in - aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; - osf* | hpux*) flag="-D_REENTRANT";; - solaris*) - if test "$GCC" = "yes"; then - flag="-D_REENTRANT" - else - # TODO: What about Clang on Solaris? - flag="-mt -D_REENTRANT" - fi - ;; - esac - AC_MSG_RESULT([$flag]) - if test "x$flag" != xno; then - PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" - fi - - AC_CACHE_CHECK([for PTHREAD_PRIO_INHERIT], - [ax_cv_PTHREAD_PRIO_INHERIT], [ - AC_LINK_IFELSE([AC_LANG_PROGRAM([[#include ]], - [[int i = PTHREAD_PRIO_INHERIT;]])], - [ax_cv_PTHREAD_PRIO_INHERIT=yes], - [ax_cv_PTHREAD_PRIO_INHERIT=no]) - ]) - AS_IF([test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"], - [AC_DEFINE([HAVE_PTHREAD_PRIO_INHERIT], [1], [Have PTHREAD_PRIO_INHERIT.])]) - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - # More AIX lossage: compile with *_r variant - if test "x$GCC" != xyes; then - case $host_os in - aix*) - AS_CASE(["x/$CC"], - [x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6], - [#handle absolute path differently from PATH based program lookup - AS_CASE(["x$CC"], - [x/*], - [AS_IF([AS_EXECUTABLE_P([${CC}_r])],[PTHREAD_CC="${CC}_r"])], - [AC_CHECK_PROGS([PTHREAD_CC],[${CC}_r],[$CC])])]) - ;; - esac - fi -fi - -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" - -AC_SUBST([PTHREAD_LIBS]) -AC_SUBST([PTHREAD_CFLAGS]) -AC_SUBST([PTHREAD_CC]) - -# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x"$ax_pthread_ok" = xyes; then - ifelse([$1],,[AC_DEFINE([HAVE_PTHREAD],[1],[Define if you have POSIX threads libraries and header files.])],[$1]) - : -else - ax_pthread_ok=no - $2 -fi -AC_LANG_POP -])dnl AX_PTHREAD diff --git a/configure b/configure index 2b973ae96be97..38cec0fe70c11 100755 --- a/configure +++ b/configure @@ -652,16 +652,15 @@ MSGFMT HAVE_POSIX_SIGNALS PG_CRC32C_OBJS CFLAGS_SSE42 -have_win32_dbghelp -HAVE_IPV6 -LIBOBJS -UUID_LIBS LDAP_LIBS_BE LDAP_LIBS_FE PTHREAD_CFLAGS PTHREAD_LIBS -PTHREAD_CC -ax_pthread_config +acx_pthread_config +have_win32_dbghelp +HAVE_IPV6 +LIBOBJS +UUID_LIBS ZIC python_additional_libs python_libspec @@ -1748,6 +1747,73 @@ fi } # ac_fn_c_try_cpp +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + # ac_fn_c_check_header_mongrel LINENO HEADER VAR INCLUDES # ------------------------------------------------------- # Tests whether HEADER exists, giving a warning if it cannot be compiled using @@ -1912,73 +1978,6 @@ $as_echo "$ac_res" >&6; } } # ac_fn_c_check_header_compile -# ac_fn_c_check_func LINENO FUNC VAR -# ---------------------------------- -# Tests whether FUNC exists, setting the cache variable VAR accordingly -ac_fn_c_check_func () -{ - as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 -$as_echo_n "checking for $2... " >&6; } -if eval \${$3+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Define $2 to an innocuous variant, in case declares $2. - For example, HP-UX 11i declares gettimeofday. */ -#define $2 innocuous_$2 - -/* System header to define __stub macros and hopefully few prototypes, - which can conflict with char $2 (); below. - Prefer to if __STDC__ is defined, since - exists even on freestanding compilers. */ - -#ifdef __STDC__ -# include -#else -# include -#endif - -#undef $2 - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char $2 (); -/* The GNU C library defines this for functions which it implements - to always fail with ENOSYS. Some functions are actually named - something starting with __ and the normal name is an alias. */ -#if defined __stub_$2 || defined __stub___$2 -choke me -#endif - -int -main () -{ -return $2 (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - eval "$3=yes" -else - eval "$3=no" -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -fi -eval ac_res=\$$3 - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 -$as_echo "$ac_res" >&6; } - eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno - -} # ac_fn_c_check_func - # ac_fn_c_check_member LINENO AGGR MEMBER VAR INCLUDES # ---------------------------------------------------- # Tries to find if the field MEMBER exists in type AGGR, after including @@ -7614,44 +7613,62 @@ program to use during the build." "$LINENO" 5 fi fi -# -# Pthreads -# -# For each platform, we need to know about any special compile and link -# libraries, and whether the normal C function names are thread-safe. -# See the comment at the top of src/port/thread.c for more information. -# WIN32 doesn't need the pthread tests; it always uses threads -# -# These tests are run before the library-tests, because linking with the -# other libraries can pull in the pthread functions as a side-effect. We -# want to use the -pthread or similar flags directly, and not rely on -# the side-effects of linking with some other library. -if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then +## +## Libraries +## +## Most libraries are included only if they demonstrably provide a function +## we need, but libm is an exception: always include it, because there are +## too many compilers that play cute optimization games that will break +## probes for standard functions such as pow(). +## -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lm" >&5 +$as_echo_n "checking for main in -lm... " >&6; } +if ${ac_cv_lib_m_main+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lm $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ -ax_pthread_ok=no -# We used to check for pthread.h first, but this fails if pthread.h -# requires special compiler flags (e.g. on True64 or Sequent). -# It gets checked for in the link test anyway. +int +main () +{ +return main (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_m_main=yes +else + ac_cv_lib_m_main=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_m_main" >&5 +$as_echo "$ac_cv_lib_m_main" >&6; } +if test "x$ac_cv_lib_m_main" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBM 1 +_ACEOF -# First of all, check if the user has set any of the PTHREAD_LIBS, -# etcetera environment variables, and if threads linking works using -# them: -if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5 -$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; } - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + LIBS="-lm $LIBS" + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing setproctitle" >&5 +$as_echo_n "checking for library containing setproctitle... " >&6; } +if ${ac_cv_search_setproctitle+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -7660,688 +7677,340 @@ $as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD #ifdef __cplusplus extern "C" #endif -char pthread_join (); +char setproctitle (); int main () { -return pthread_join (); +return setproctitle (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_pthread_ok=yes +for ac_lib in '' util; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_setproctitle=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 -$as_echo "$ax_pthread_ok" >&6; } - if test x"$ax_pthread_ok" = xno; then - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" - fi - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" + conftest$ac_exeext + if ${ac_cv_search_setproctitle+:} false; then : + break fi +done +if ${ac_cv_search_setproctitle+:} false; then : -# We must check for the threads library under a number of different -# names; the ordering is very important because some systems -# (e.g. DEC) have both -lpthread and -lpthreads, where one of the -# libraries is broken (non-POSIX). - -# Create a list of thread flags to try. Items starting with a "-" are -# C compiler flags, and other items are library names, except for "none" -# which indicates that we try without any flags at all, and "pthread-config" -# which is a program returning the flags for the Pth emulation library. - -ax_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" - -# The ordering *is* (sometimes) important. Some notes on the -# individual items follow: - -# pthreads: AIX (must check this before -lpthread) -# none: in case threads are in libc; should be tried before -Kthread and -# other compiler flags to prevent continual compiler warnings -# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) -# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) -# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) -# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) -# -pthreads: Solaris/gcc -# -mthreads: Mingw32/gcc, Lynx/gcc -# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it -# doesn't hurt to check since this sometimes defines pthreads too; -# also defines -D_REENTRANT) -# ... -mt is also the pthreads flag for HP/aCC -# pthread: Linux, etcetera -# --thread-safe: KAI C++ -# pthread-config: use pthread-config program (for GNU Pth library) - -case ${host_os} in - solaris*) - - # On Solaris (at least, for some versions), libc contains stubbed - # (non-functional) versions of the pthreads routines, so link-based - # tests will erroneously succeed. (We need to link with -pthreads/-mt/ - # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather - # a function called by this macro, so we could check for that, but - # who knows whether they'll stub that too in a future libc.) So, - # we'll just look for -pthreads and -lpthread first: - - ax_pthread_flags="-pthreads pthread -mt -pthread $ax_pthread_flags" - ;; - - darwin*) - ax_pthread_flags="-pthread $ax_pthread_flags" - ;; -esac - -# Clang doesn't consider unrecognized options an error unless we specify -# -Werror. We throw in some extra Clang-specific options to ensure that -# this doesn't happen for GCC, which also accepts -Werror. - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if compiler needs -Werror to reject unknown flags" >&5 -$as_echo_n "checking if compiler needs -Werror to reject unknown flags... " >&6; } -save_CFLAGS="$CFLAGS" -ax_pthread_extra_flags="-Werror" -CFLAGS="$CFLAGS $ax_pthread_extra_flags -Wunknown-warning-option -Wsizeof-array-argument" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -int foo(void); -int -main () -{ -foo() - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 -$as_echo "yes" >&6; } else - ax_pthread_extra_flags= - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + ac_cv_search_setproctitle=no fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -CFLAGS="$save_CFLAGS" - -if test x"$ax_pthread_ok" = xno; then -for flag in $ax_pthread_flags; do - - case $flag in - none) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 -$as_echo_n "checking whether pthreads work without any flags... " >&6; } - ;; +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_setproctitle" >&5 +$as_echo "$ac_cv_search_setproctitle" >&6; } +ac_res=$ac_cv_search_setproctitle +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - -*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 -$as_echo_n "checking whether pthreads work with $flag... " >&6; } - PTHREAD_CFLAGS="$flag" - ;; +fi - pthread-config) - # Extract the first word of "pthread-config", so it can be a program name with args. -set dummy pthread-config; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_ax_pthread_config+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 +$as_echo_n "checking for library containing dlopen... " >&6; } +if ${ac_cv_search_dlopen+:} false; then : $as_echo_n "(cached) " >&6 else - if test -n "$ax_pthread_config"; then - ac_cv_prog_ax_pthread_config="$ax_pthread_config" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_ax_pthread_config="yes" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi -done - done -IFS=$as_save_IFS - - test -z "$ac_cv_prog_ax_pthread_config" && ac_cv_prog_ax_pthread_config="no" -fi -fi -ax_pthread_config=$ac_cv_prog_ax_pthread_config -if test -n "$ax_pthread_config"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_config" >&5 -$as_echo "$ax_pthread_config" >&6; } -else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } -fi - - - if test x"$ax_pthread_config" = xno; then continue; fi - PTHREAD_CFLAGS="`pthread-config --cflags`" - PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" - ;; - - *) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 -$as_echo_n "checking for the pthreads library -l$flag... " >&6; } - PTHREAD_LIBS="-l$flag" - ;; - esac - - save_LIBS="$LIBS" - save_CFLAGS="$CFLAGS" - LIBS="$PTHREAD_LIBS $LIBS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS $ax_pthread_extra_flags" - - # Check for various functions. We must include pthread.h, - # since some functions may be macros. (On the Sequent, we - # need a special flag -Kthread to make this header compile.) - # We check for pthread_join because it is in -lpthread on IRIX - # while pthread_create is in libc. We check for pthread_attr_init - # due to DEC craziness with -lpthreads. We check for - # pthread_cleanup_push because it is one of the few pthread - # functions on Solaris that doesn't have a non-functional libc stub. - # We try pthread_create on general principles. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include - static void routine(void *a) { a = 0; } - static void *start_routine(void *a) { return a; } + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); int main () { -pthread_t th; pthread_attr_t attr; - pthread_create(&th, 0, start_routine, 0); - pthread_join(th, 0); - pthread_attr_init(&attr); - pthread_cleanup_push(routine, 0); - pthread_cleanup_pop(0) /* ; */ +return dlopen (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_pthread_ok=yes +for ac_lib in '' dl; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_dlopen=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_pthread_ok" >&5 -$as_echo "$ax_pthread_ok" >&6; } - if test "x$ax_pthread_ok" = xyes; then - break; - fi - - PTHREAD_LIBS="" - PTHREAD_CFLAGS="" + conftest$ac_exeext + if ${ac_cv_search_dlopen+:} false; then : + break +fi done +if ${ac_cv_search_dlopen+:} false; then : + +else + ac_cv_search_dlopen=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 +$as_echo "$ac_cv_search_dlopen" >&6; } +ac_res=$ac_cv_search_dlopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -# Various other checks: -if test "x$ax_pthread_ok" = xyes; then - save_LIBS="$LIBS" - LIBS="$PTHREAD_LIBS $LIBS" - save_CFLAGS="$CFLAGS" - CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +fi - # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5 -$as_echo_n "checking for joinable pthread attribute... " >&6; } - attr_name=unknown - for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do - cat confdefs.h - <<_ACEOF >conftest.$ac_ext +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing socket" >&5 +$as_echo_n "checking for library containing socket... " >&6; } +if ${ac_cv_search_socket+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char socket (); int main () { -int attr = $attr; return attr /* ; */ +return socket (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - attr_name=$attr; break +for ac_lib in '' socket ws2_32; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_socket=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - done - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5 -$as_echo "$attr_name" >&6; } - if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then - -cat >>confdefs.h <<_ACEOF -#define PTHREAD_CREATE_JOINABLE $attr_name -_ACEOF + conftest$ac_exeext + if ${ac_cv_search_socket+:} false; then : + break +fi +done +if ${ac_cv_search_socket+:} false; then : - fi +else + ac_cv_search_socket=no +fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_socket" >&5 +$as_echo "$ac_cv_search_socket" >&6; } +ac_res=$ac_cv_search_socket +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5 -$as_echo_n "checking if more special flags are required for pthreads... " >&6; } - flag=no - case ${host_os} in - aix* | freebsd* | darwin*) flag="-D_THREAD_SAFE";; - osf* | hpux*) flag="-D_REENTRANT";; - solaris*) - if test "$GCC" = "yes"; then - flag="-D_REENTRANT" - else - # TODO: What about Clang on Solaris? - flag="-mt -D_REENTRANT" - fi - ;; - esac - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $flag" >&5 -$as_echo "$flag" >&6; } - if test "x$flag" != xno; then - PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" - fi +fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for PTHREAD_PRIO_INHERIT" >&5 -$as_echo_n "checking for PTHREAD_PRIO_INHERIT... " >&6; } -if ${ax_cv_PTHREAD_PRIO_INHERIT+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shl_load" >&5 +$as_echo_n "checking for library containing shl_load... " >&6; } +if ${ac_cv_search_shl_load+:} false; then : $as_echo_n "(cached) " >&6 else - - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); int main () { -int i = PTHREAD_PRIO_INHERIT; +return shl_load (); ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ax_cv_PTHREAD_PRIO_INHERIT=yes -else - ax_cv_PTHREAD_PRIO_INHERIT=no +for ac_lib in '' dld; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_shl_load=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext - -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ax_cv_PTHREAD_PRIO_INHERIT" >&5 -$as_echo "$ax_cv_PTHREAD_PRIO_INHERIT" >&6; } - if test "x$ax_cv_PTHREAD_PRIO_INHERIT" = "xyes"; then : - -$as_echo "#define HAVE_PTHREAD_PRIO_INHERIT 1" >>confdefs.h - + conftest$ac_exeext + if ${ac_cv_search_shl_load+:} false; then : + break fi - - LIBS="$save_LIBS" - CFLAGS="$save_CFLAGS" - - # More AIX lossage: compile with *_r variant - if test "x$GCC" != xyes; then - case $host_os in - aix*) - case "x/$CC" in #( - x*/c89|x*/c89_128|x*/c99|x*/c99_128|x*/cc|x*/cc128|x*/xlc|x*/xlc_v6|x*/xlc128|x*/xlc128_v6) : - #handle absolute path differently from PATH based program lookup - case "x$CC" in #( - x/*) : - if as_fn_executable_p ${CC}_r; then : - PTHREAD_CC="${CC}_r" -fi ;; #( - *) : - for ac_prog in ${CC}_r -do - # Extract the first word of "$ac_prog", so it can be a program name with args. -set dummy $ac_prog; ac_word=$2 -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 -$as_echo_n "checking for $ac_word... " >&6; } -if ${ac_cv_prog_PTHREAD_CC+:} false; then : - $as_echo_n "(cached) " >&6 -else - if test -n "$PTHREAD_CC"; then - ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test. -else -as_save_IFS=$IFS; IFS=$PATH_SEPARATOR -for as_dir in $PATH -do - IFS=$as_save_IFS - test -z "$as_dir" && as_dir=. - for ac_exec_ext in '' $ac_executable_extensions; do - if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then - ac_cv_prog_PTHREAD_CC="$ac_prog" - $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 - break 2 - fi done - done -IFS=$as_save_IFS +if ${ac_cv_search_shl_load+:} false; then : -fi -fi -PTHREAD_CC=$ac_cv_prog_PTHREAD_CC -if test -n "$PTHREAD_CC"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5 -$as_echo "$PTHREAD_CC" >&6; } else - { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 -$as_echo "no" >&6; } + ac_cv_search_shl_load=no fi - - - test -n "$PTHREAD_CC" && break -done -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" - ;; -esac ;; #( - *) : - ;; -esac - ;; - esac - fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi - -test -n "$PTHREAD_CC" || PTHREAD_CC="$CC" - - - - - -# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: -if test x"$ax_pthread_ok" = xyes; then - -$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h - - : -else - ax_pthread_ok=no +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shl_load" >&5 +$as_echo "$ac_cv_search_shl_load" >&6; } +ac_res=$ac_cv_search_shl_load +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -ac_ext=c -ac_cpp='$CPP $CPPFLAGS' -ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' -ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' -ac_compiler_gnu=$ac_cv_c_compiler_gnu - - # set thread flags - -# Some platforms use these, so just define them. They can't hurt if they -# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS -# enables 5-arg getpwuid_r, among other things. -PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" - -# Check for *_r functions -_CFLAGS="$CFLAGS" -_LIBS="$LIBS" -CFLAGS="$CFLAGS $PTHREAD_CFLAGS" -LIBS="$LIBS $PTHREAD_LIBS" -if test "$PORTNAME" != "win32"; then -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 -$as_echo_n "checking for ANSI C header files... " >&6; } -if ${ac_cv_header_stdc+:} false; then : +# We only use libld in port/dynloader/aix.c +case $host_os in + aix*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ldopen" >&5 +$as_echo_n "checking for library containing ldopen... " >&6; } +if ${ac_cv_search_ldopen+:} false; then : $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -#include -#include +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ldopen (); int main () { - +return ldopen (); ; return 0; } _ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - ac_cv_header_stdc=yes -else - ac_cv_header_stdc=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - -if test $ac_cv_header_stdc = yes; then - # SunOS 4.x string.h does not declare mem*, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "memchr" >/dev/null 2>&1; then : - -else - ac_cv_header_stdc=no +for ac_lib in '' ld; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_ldopen=$ac_res fi -rm -f conftest* - +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_ldopen+:} false; then : + break fi - -if test $ac_cv_header_stdc = yes; then - # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include - -_ACEOF -if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | - $EGREP "free" >/dev/null 2>&1; then : +done +if ${ac_cv_search_ldopen+:} false; then : else - ac_cv_header_stdc=no + ac_cv_search_ldopen=no fi -rm -f conftest* +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ldopen" >&5 +$as_echo "$ac_cv_search_ldopen" >&6; } +ac_res=$ac_cv_search_ldopen +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -if test $ac_cv_header_stdc = yes; then - # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. - if test "$cross_compiling" = yes; then : - : + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getopt_long" >&5 +$as_echo_n "checking for library containing getopt_long... " >&6; } +if ${ac_cv_search_getopt_long+:} false; then : + $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#include -#if ((' ' & 0x0FF) == 0x020) -# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') -# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) -#else -# define ISLOWER(c) \ - (('a' <= (c) && (c) <= 'i') \ - || ('j' <= (c) && (c) <= 'r') \ - || ('s' <= (c) && (c) <= 'z')) -# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) -#endif -#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char getopt_long (); int main () { - int i; - for (i = 0; i < 256; i++) - if (XOR (islower (i), ISLOWER (i)) - || toupper (i) != TOUPPER (i)) - return 2; +return getopt_long (); + ; return 0; } _ACEOF -if ac_fn_c_try_run "$LINENO"; then : - -else - ac_cv_header_stdc=no +for ac_lib in '' getopt gnugetopt; do + if test -z "$ac_lib"; then + ac_res="none required" + else + ac_res=-l$ac_lib + LIBS="-l$ac_lib $ac_func_search_save_LIBS" + fi + if ac_fn_c_try_link "$LINENO"; then : + ac_cv_search_getopt_long=$ac_res fi -rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ - conftest.$ac_objext conftest.beam conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext + if ${ac_cv_search_getopt_long+:} false; then : + break fi +done +if ${ac_cv_search_getopt_long+:} false; then : +else + ac_cv_search_getopt_long=no fi +rm conftest.$ac_ext +LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 -$as_echo "$ac_cv_header_stdc" >&6; } -if test $ac_cv_header_stdc = yes; then - -$as_echo "#define STDC_HEADERS 1" >>confdefs.h +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_getopt_long" >&5 +$as_echo "$ac_cv_search_getopt_long" >&6; } +ac_res=$ac_cv_search_getopt_long +if test "$ac_res" != no; then : + test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -# On IRIX 5.3, sys/types and inttypes.h are conflicting. -for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ - inttypes.h stdint.h unistd.h -do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default -" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 -_ACEOF - -fi - -done - - -ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" -if test "x$ac_cv_header_pthread_h" = xyes; then : - -else - as_fn_error $? " -pthread.h not found; use --disable-thread-safety to disable thread safety" "$LINENO" 5 -fi - - -fi - -for ac_func in strerror_r getpwuid_r gethostbyname_r -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - -fi -done - - -# Do test here with the proper thread flags -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns int" >&5 -$as_echo_n "checking whether strerror_r returns int... " >&6; } -if ${pgac_cv_func_strerror_r_int+:} false; then : - $as_echo_n "(cached) " >&6 -else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -#include -int -main () -{ -#ifndef _AIX -int strerror_r(int, char *, size_t); -#else -/* Older AIX has 'int' for the third argument so we don't test the args. */ -int strerror_r(); -#endif - ; - return 0; -} -_ACEOF -if ac_fn_c_try_compile "$LINENO"; then : - pgac_cv_func_strerror_r_int=yes -else - pgac_cv_func_strerror_r_int=no -fi -rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_strerror_r_int" >&5 -$as_echo "$pgac_cv_func_strerror_r_int" >&6; } -if test x"$pgac_cv_func_strerror_r_int" = xyes ; then - -$as_echo "#define STRERROR_R_INT 1" >>confdefs.h - -fi - - -CFLAGS="$_CFLAGS" -LIBS="$_LIBS" - -else -# do not use values from template file -PTHREAD_CFLAGS= -PTHREAD_LIBS= -fi - - - - - -## -## Libraries -## -## Most libraries are included only if they demonstrably provide a function -## we need, but libm is an exception: always include it, because there are -## too many compilers that play cute optimization games that will break -## probes for standard functions such as pow(). -## - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for main in -lm" >&5 -$as_echo_n "checking for main in -lm... " >&6; } -if ${ac_cv_lib_m_main+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lm $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - - -int -main () -{ -return main (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_m_main=yes -else - ac_cv_lib_m_main=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_m_main" >&5 -$as_echo "$ac_cv_lib_m_main" >&6; } -if test "x$ac_cv_lib_m_main" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBM 1 -_ACEOF - - LIBS="-lm $LIBS" - -fi - -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing setproctitle" >&5 -$as_echo_n "checking for library containing setproctitle... " >&6; } -if ${ac_cv_search_setproctitle+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing crypt" >&5 +$as_echo_n "checking for library containing crypt... " >&6; } +if ${ac_cv_search_crypt+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_func_search_save_LIBS=$LIBS +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. Use char because int might match the return type of a GCC @@ -8349,16 +8018,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char setproctitle (); +char crypt (); int main () { -return setproctitle (); +return crypt (); ; return 0; } _ACEOF -for ac_lib in '' util; do +for ac_lib in '' crypt; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8366,33 +8035,33 @@ for ac_lib in '' util; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_setproctitle=$ac_res + ac_cv_search_crypt=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_setproctitle+:} false; then : + if ${ac_cv_search_crypt+:} false; then : break fi done -if ${ac_cv_search_setproctitle+:} false; then : +if ${ac_cv_search_crypt+:} false; then : else - ac_cv_search_setproctitle=no + ac_cv_search_crypt=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_setproctitle" >&5 -$as_echo "$ac_cv_search_setproctitle" >&6; } -ac_res=$ac_cv_search_setproctitle +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_crypt" >&5 +$as_echo "$ac_cv_search_crypt" >&6; } +ac_res=$ac_cv_search_crypt if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing dlopen" >&5 -$as_echo_n "checking for library containing dlopen... " >&6; } -if ${ac_cv_search_dlopen+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_open" >&5 +$as_echo_n "checking for library containing shm_open... " >&6; } +if ${ac_cv_search_shm_open+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8405,16 +8074,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char dlopen (); +char shm_open (); int main () { -return dlopen (); +return shm_open (); ; return 0; } _ACEOF -for ac_lib in '' dl; do +for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8422,33 +8091,33 @@ for ac_lib in '' dl; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_dlopen=$ac_res + ac_cv_search_shm_open=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_dlopen+:} false; then : + if ${ac_cv_search_shm_open+:} false; then : break fi done -if ${ac_cv_search_dlopen+:} false; then : +if ${ac_cv_search_shm_open+:} false; then : else - ac_cv_search_dlopen=no + ac_cv_search_shm_open=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_dlopen" >&5 -$as_echo "$ac_cv_search_dlopen" >&6; } -ac_res=$ac_cv_search_dlopen +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_open" >&5 +$as_echo "$ac_cv_search_shm_open" >&6; } +ac_res=$ac_cv_search_shm_open if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing socket" >&5 -$as_echo_n "checking for library containing socket... " >&6; } -if ${ac_cv_search_socket+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_unlink" >&5 +$as_echo_n "checking for library containing shm_unlink... " >&6; } +if ${ac_cv_search_shm_unlink+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8461,16 +8130,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char socket (); +char shm_unlink (); int main () { -return socket (); +return shm_unlink (); ; return 0; } _ACEOF -for ac_lib in '' socket ws2_32; do +for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8478,33 +8147,34 @@ for ac_lib in '' socket ws2_32; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_socket=$ac_res + ac_cv_search_shm_unlink=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_socket+:} false; then : + if ${ac_cv_search_shm_unlink+:} false; then : break fi done -if ${ac_cv_search_socket+:} false; then : +if ${ac_cv_search_shm_unlink+:} false; then : else - ac_cv_search_socket=no + ac_cv_search_shm_unlink=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_socket" >&5 -$as_echo "$ac_cv_search_socket" >&6; } -ac_res=$ac_cv_search_socket +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_unlink" >&5 +$as_echo "$ac_cv_search_shm_unlink" >&6; } +ac_res=$ac_cv_search_shm_unlink if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shl_load" >&5 -$as_echo_n "checking for library containing shl_load... " >&6; } -if ${ac_cv_search_shl_load+:} false; then : +# Solaris: +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing fdatasync" >&5 +$as_echo_n "checking for library containing fdatasync... " >&6; } +if ${ac_cv_search_fdatasync+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8517,16 +8187,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char shl_load (); +char fdatasync (); int main () { -return shl_load (); +return fdatasync (); ; return 0; } _ACEOF -for ac_lib in '' dld; do +for ac_lib in '' rt posix4; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8534,36 +8204,34 @@ for ac_lib in '' dld; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shl_load=$ac_res + ac_cv_search_fdatasync=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_shl_load+:} false; then : + if ${ac_cv_search_fdatasync+:} false; then : break fi done -if ${ac_cv_search_shl_load+:} false; then : +if ${ac_cv_search_fdatasync+:} false; then : else - ac_cv_search_shl_load=no + ac_cv_search_fdatasync=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shl_load" >&5 -$as_echo "$ac_cv_search_shl_load" >&6; } -ac_res=$ac_cv_search_shl_load +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_fdatasync" >&5 +$as_echo "$ac_cv_search_fdatasync" >&6; } +ac_res=$ac_cv_search_fdatasync if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -# We only use libld in port/dynloader/aix.c -case $host_os in - aix*) - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing ldopen" >&5 -$as_echo_n "checking for library containing ldopen... " >&6; } -if ${ac_cv_search_ldopen+:} false; then : +# Required for thread_test.c on Solaris +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sched_yield" >&5 +$as_echo_n "checking for library containing sched_yield... " >&6; } +if ${ac_cv_search_sched_yield+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8576,16 +8244,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char ldopen (); +char sched_yield (); int main () { -return ldopen (); +return sched_yield (); ; return 0; } _ACEOF -for ac_lib in '' ld; do +for ac_lib in '' rt; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8593,35 +8261,35 @@ for ac_lib in '' ld; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_ldopen=$ac_res + ac_cv_search_sched_yield=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_ldopen+:} false; then : + if ${ac_cv_search_sched_yield+:} false; then : break fi done -if ${ac_cv_search_ldopen+:} false; then : +if ${ac_cv_search_sched_yield+:} false; then : else - ac_cv_search_ldopen=no + ac_cv_search_sched_yield=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_ldopen" >&5 -$as_echo "$ac_cv_search_ldopen" >&6; } -ac_res=$ac_cv_search_ldopen +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_sched_yield" >&5 +$as_echo "$ac_cv_search_sched_yield" >&6; } +ac_res=$ac_cv_search_sched_yield if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi - ;; -esac -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing getopt_long" >&5 -$as_echo_n "checking for library containing getopt_long... " >&6; } -if ${ac_cv_search_getopt_long+:} false; then : +# Required for thread_test.c on Solaris 2.5: +# Other ports use it too (HP-UX) so test unconditionally +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gethostbyname_r" >&5 +$as_echo_n "checking for library containing gethostbyname_r... " >&6; } +if ${ac_cv_search_gethostbyname_r+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8634,16 +8302,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char getopt_long (); +char gethostbyname_r (); int main () { -return getopt_long (); +return gethostbyname_r (); ; return 0; } _ACEOF -for ac_lib in '' getopt gnugetopt; do +for ac_lib in '' nsl; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8651,33 +8319,34 @@ for ac_lib in '' getopt gnugetopt; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_getopt_long=$ac_res + ac_cv_search_gethostbyname_r=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_getopt_long+:} false; then : + if ${ac_cv_search_gethostbyname_r+:} false; then : break fi done -if ${ac_cv_search_getopt_long+:} false; then : +if ${ac_cv_search_gethostbyname_r+:} false; then : else - ac_cv_search_getopt_long=no + ac_cv_search_gethostbyname_r=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_getopt_long" >&5 -$as_echo "$ac_cv_search_getopt_long" >&6; } -ac_res=$ac_cv_search_getopt_long +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gethostbyname_r" >&5 +$as_echo "$ac_cv_search_gethostbyname_r" >&6; } +ac_res=$ac_cv_search_gethostbyname_r if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing crypt" >&5 -$as_echo_n "checking for library containing crypt... " >&6; } -if ${ac_cv_search_crypt+:} false; then : +# Cygwin: +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shmget" >&5 +$as_echo_n "checking for library containing shmget... " >&6; } +if ${ac_cv_search_shmget+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8690,16 +8359,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char crypt (); +char shmget (); int main () { -return crypt (); +return shmget (); ; return 0; } _ACEOF -for ac_lib in '' crypt; do +for ac_lib in '' cygipc; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8707,37 +8376,49 @@ for ac_lib in '' crypt; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_crypt=$ac_res + ac_cv_search_shmget=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_crypt+:} false; then : + if ${ac_cv_search_shmget+:} false; then : break fi done -if ${ac_cv_search_crypt+:} false; then : +if ${ac_cv_search_shmget+:} false; then : else - ac_cv_search_crypt=no + ac_cv_search_shmget=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_crypt" >&5 -$as_echo "$ac_cv_search_crypt" >&6; } -ac_res=$ac_cv_search_crypt +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shmget" >&5 +$as_echo "$ac_cv_search_shmget" >&6; } +ac_res=$ac_cv_search_shmget if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_open" >&5 -$as_echo_n "checking for library containing shm_open... " >&6; } -if ${ac_cv_search_shm_open+:} false; then : + +if test "$with_readline" = yes; then + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing readline" >&5 +$as_echo_n "checking for library containing readline... " >&6; } +if ${pgac_cv_check_readline+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + pgac_cv_check_readline=no +pgac_save_LIBS=$LIBS +if test x"$with_libedit_preferred" != x"yes" +then READLINE_ORDER="-lreadline -ledit" +else READLINE_ORDER="-ledit -lreadline" +fi +for pgac_rllib in $READLINE_ORDER ; do + for pgac_lib in "" " -ltermcap" " -lncurses" " -lcurses" ; do + LIBS="${pgac_rllib}${pgac_lib} $pgac_save_LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -8746,53 +8427,67 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char shm_open (); +char readline (); int main () { -return shm_open (); +return readline (); ; return 0; } _ACEOF -for ac_lib in '' rt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shm_open=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + + # Older NetBSD, OpenBSD, and Irix have a broken linker that does not + # recognize dependent libraries; assume curses is needed if we didn't + # find any dependency. + case $host_os in + netbsd* | openbsd* | irix*) + if test x"$pgac_lib" = x"" ; then + pgac_lib=" -lcurses" + fi ;; + esac + + pgac_cv_check_readline="${pgac_rllib}${pgac_lib}" + break + fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_shm_open+:} false; then : - break -fi + conftest$ac_exeext conftest.$ac_ext + done + if test "$pgac_cv_check_readline" != no ; then + break + fi done -if ${ac_cv_search_shm_open+:} false; then : +LIBS=$pgac_save_LIBS -else - ac_cv_search_shm_open=no fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_check_readline" >&5 +$as_echo "$pgac_cv_check_readline" >&6; } +if test "$pgac_cv_check_readline" != no ; then + LIBS="$pgac_cv_check_readline $LIBS" + +$as_echo "#define HAVE_LIBREADLINE 1" >>confdefs.h + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_open" >&5 -$as_echo "$ac_cv_search_shm_open" >&6; } -ac_res=$ac_cv_search_shm_open -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + + if test x"$pgac_cv_check_readline" = x"no"; then + as_fn_error $? "readline library not found +If you have readline already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable readline support." "$LINENO" 5 + fi fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shm_unlink" >&5 -$as_echo_n "checking for library containing shm_unlink... " >&6; } -if ${ac_cv_search_shm_unlink+:} false; then : +if test "$with_zlib" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflate in -lz" >&5 +$as_echo_n "checking for inflate in -lz... " >&6; } +if ${ac_cv_lib_z_inflate+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS + ac_check_lib_save_LIBS=$LIBS +LIBS="-lz $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8802,51 +8497,69 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char shm_unlink (); +char inflate (); int main () { -return shm_unlink (); +return inflate (); ; return 0; } _ACEOF -for ac_lib in '' rt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shm_unlink=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_z_inflate=yes +else + ac_cv_lib_z_inflate=no fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_shm_unlink+:} false; then : - break + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -done -if ${ac_cv_search_shm_unlink+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflate" >&5 +$as_echo "$ac_cv_lib_z_inflate" >&6; } +if test "x$ac_cv_lib_z_inflate" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBZ 1 +_ACEOF + + LIBS="-lz $LIBS" else - ac_cv_search_shm_unlink=no + as_fn_error $? "zlib library not found +If you have zlib already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-zlib to disable zlib support." "$LINENO" 5 fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shm_unlink" >&5 -$as_echo "$ac_cv_search_shm_unlink" >&6; } -ac_res=$ac_cv_search_shm_unlink -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +if test "$enable_spinlocks" = yes; then + +$as_echo "#define HAVE_SPINLOCKS 1" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: +*** Not using spinlocks will cause poor performance." >&5 +$as_echo "$as_me: WARNING: +*** Not using spinlocks will cause poor performance." >&2;} fi -# Solaris: -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing fdatasync" >&5 -$as_echo_n "checking for library containing fdatasync... " >&6; } -if ${ac_cv_search_fdatasync+:} false; then : +if test "$enable_atomics" = yes; then + +$as_echo "#define HAVE_ATOMICS 1" >>confdefs.h + +else + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: +*** Not using atomic operations will cause poor performance." >&5 +$as_echo "$as_me: WARNING: +*** Not using atomic operations will cause poor performance." >&2;} +fi + +if test "$with_gssapi" = yes ; then + if test "$PORTNAME" != "win32"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gss_init_sec_context" >&5 +$as_echo_n "checking for library containing gss_init_sec_context... " >&6; } +if ${ac_cv_search_gss_init_sec_context+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8859,16 +8572,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char fdatasync (); +char gss_init_sec_context (); int main () { -return fdatasync (); +return gss_init_sec_context (); ; return 0; } _ACEOF -for ac_lib in '' rt posix4; do +for ac_lib in '' gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8876,37 +8589,46 @@ for ac_lib in '' rt posix4; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_fdatasync=$ac_res + ac_cv_search_gss_init_sec_context=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_fdatasync+:} false; then : + if ${ac_cv_search_gss_init_sec_context+:} false; then : break fi done -if ${ac_cv_search_fdatasync+:} false; then : +if ${ac_cv_search_gss_init_sec_context+:} false; then : else - ac_cv_search_fdatasync=no + ac_cv_search_gss_init_sec_context=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_fdatasync" >&5 -$as_echo "$ac_cv_search_fdatasync" >&6; } -ac_res=$ac_cv_search_fdatasync +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gss_init_sec_context" >&5 +$as_echo "$ac_cv_search_gss_init_sec_context" >&6; } +ac_res=$ac_cv_search_gss_init_sec_context if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -fi +else + as_fn_error $? "could not find function 'gss_init_sec_context' required for GSSAPI" "$LINENO" 5 +fi -# Required for thread_test.c on Solaris -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing sched_yield" >&5 -$as_echo_n "checking for library containing sched_yield... " >&6; } -if ${ac_cv_search_sched_yield+:} false; then : + else + LIBS="$LIBS -lgssapi32" + fi +fi + +if test "$with_openssl" = yes ; then + if test "$PORTNAME" != "win32"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CRYPTO_new_ex_data in -lcrypto" >&5 +$as_echo_n "checking for CRYPTO_new_ex_data in -lcrypto... " >&6; } +if ${ac_cv_lib_crypto_CRYPTO_new_ex_data+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS + ac_check_lib_save_LIBS=$LIBS +LIBS="-lcrypto $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -8916,52 +8638,88 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char sched_yield (); +char CRYPTO_new_ex_data (); int main () { -return sched_yield (); +return CRYPTO_new_ex_data (); ; return 0; } _ACEOF -for ac_lib in '' rt; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_sched_yield=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_crypto_CRYPTO_new_ex_data=yes +else + ac_cv_lib_crypto_CRYPTO_new_ex_data=no fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_sched_yield+:} false; then : - break + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -done -if ${ac_cv_search_sched_yield+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_CRYPTO_new_ex_data" >&5 +$as_echo "$ac_cv_lib_crypto_CRYPTO_new_ex_data" >&6; } +if test "x$ac_cv_lib_crypto_CRYPTO_new_ex_data" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBCRYPTO 1 +_ACEOF + + LIBS="-lcrypto $LIBS" else - ac_cv_search_sched_yield=no + as_fn_error $? "library 'crypto' is required for OpenSSL" "$LINENO" 5 fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SSL_library_init in -lssl" >&5 +$as_echo_n "checking for SSL_library_init in -lssl... " >&6; } +if ${ac_cv_lib_ssl_SSL_library_init+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lssl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char SSL_library_init (); +int +main () +{ +return SSL_library_init (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ssl_SSL_library_init=yes +else + ac_cv_lib_ssl_SSL_library_init=no fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_sched_yield" >&5 -$as_echo "$ac_cv_search_sched_yield" >&6; } -ac_res=$ac_cv_search_sched_yield -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ssl_SSL_library_init" >&5 +$as_echo "$ac_cv_lib_ssl_SSL_library_init" >&6; } +if test "x$ac_cv_lib_ssl_SSL_library_init" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBSSL 1 +_ACEOF + + LIBS="-lssl $LIBS" +else + as_fn_error $? "library 'ssl' is required for OpenSSL" "$LINENO" 5 fi -# Required for thread_test.c on Solaris 2.5: -# Other ports use it too (HP-UX) so test unconditionally -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gethostbyname_r" >&5 -$as_echo_n "checking for library containing gethostbyname_r... " >&6; } -if ${ac_cv_search_gethostbyname_r+:} false; then : + else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing CRYPTO_new_ex_data" >&5 +$as_echo_n "checking for library containing CRYPTO_new_ex_data... " >&6; } +if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -8974,16 +8732,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char gethostbyname_r (); +char CRYPTO_new_ex_data (); int main () { -return gethostbyname_r (); +return CRYPTO_new_ex_data (); ; return 0; } _ACEOF -for ac_lib in '' nsl; do +for ac_lib in '' eay32 crypto; do if test -z "$ac_lib"; then ac_res="none required" else @@ -8991,34 +8749,35 @@ for ac_lib in '' nsl; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_gethostbyname_r=$ac_res + ac_cv_search_CRYPTO_new_ex_data=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_gethostbyname_r+:} false; then : + if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : break fi done -if ${ac_cv_search_gethostbyname_r+:} false; then : +if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : else - ac_cv_search_gethostbyname_r=no + ac_cv_search_CRYPTO_new_ex_data=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gethostbyname_r" >&5 -$as_echo "$ac_cv_search_gethostbyname_r" >&6; } -ac_res=$ac_cv_search_gethostbyname_r +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_CRYPTO_new_ex_data" >&5 +$as_echo "$ac_cv_search_CRYPTO_new_ex_data" >&6; } +ac_res=$ac_cv_search_CRYPTO_new_ex_data if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +else + as_fn_error $? "library 'eay32' or 'crypto' is required for OpenSSL" "$LINENO" 5 fi -# Cygwin: -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing shmget" >&5 -$as_echo_n "checking for library containing shmget... " >&6; } -if ${ac_cv_search_shmget+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing SSL_library_init" >&5 +$as_echo_n "checking for library containing SSL_library_init... " >&6; } +if ${ac_cv_search_SSL_library_init+:} false; then : $as_echo_n "(cached) " >&6 else ac_func_search_save_LIBS=$LIBS @@ -9031,16 +8790,16 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char shmget (); +char SSL_library_init (); int main () { -return shmget (); +return SSL_library_init (); ; return 0; } _ACEOF -for ac_lib in '' cygipc; do +for ac_lib in '' ssleay32 ssl; do if test -z "$ac_lib"; then ac_res="none required" else @@ -9048,49 +8807,55 @@ for ac_lib in '' cygipc; do LIBS="-l$ac_lib $ac_func_search_save_LIBS" fi if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_shmget=$ac_res + ac_cv_search_SSL_library_init=$ac_res fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext - if ${ac_cv_search_shmget+:} false; then : + if ${ac_cv_search_SSL_library_init+:} false; then : break fi done -if ${ac_cv_search_shmget+:} false; then : +if ${ac_cv_search_SSL_library_init+:} false; then : else - ac_cv_search_shmget=no + ac_cv_search_SSL_library_init=no fi rm conftest.$ac_ext LIBS=$ac_func_search_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_shmget" >&5 -$as_echo "$ac_cv_search_shmget" >&6; } -ac_res=$ac_cv_search_shmget +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_SSL_library_init" >&5 +$as_echo "$ac_cv_search_SSL_library_init" >&6; } +ac_res=$ac_cv_search_SSL_library_init if test "$ac_res" != no; then : test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" +else + as_fn_error $? "library 'ssleay32' or 'ssl' is required for OpenSSL" "$LINENO" 5 fi + fi + for ac_func in SSL_get_current_compression +do : + ac_fn_c_check_func "$LINENO" "SSL_get_current_compression" "ac_cv_func_SSL_get_current_compression" +if test "x$ac_cv_func_SSL_get_current_compression" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_SSL_GET_CURRENT_COMPRESSION 1 +_ACEOF -if test "$with_readline" = yes; then +fi +done +fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing readline" >&5 -$as_echo_n "checking for library containing readline... " >&6; } -if ${pgac_cv_check_readline+:} false; then : +if test "$with_pam" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pam_start in -lpam" >&5 +$as_echo_n "checking for pam_start in -lpam... " >&6; } +if ${ac_cv_lib_pam_pam_start+:} false; then : $as_echo_n "(cached) " >&6 else - pgac_cv_check_readline=no -pgac_save_LIBS=$LIBS -if test x"$with_libedit_preferred" != x"yes" -then READLINE_ORDER="-lreadline -ledit" -else READLINE_ORDER="-ledit -lreadline" -fi -for pgac_rllib in $READLINE_ORDER ; do - for pgac_lib in "" " -ltermcap" " -lncurses" " -lcurses" ; do - LIBS="${pgac_rllib}${pgac_lib} $pgac_save_LIBS" - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_check_lib_save_LIBS=$LIBS +LIBS="-lpam $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ /* Override any GCC internal prototype to avoid an error. @@ -9099,67 +8864,47 @@ for pgac_rllib in $READLINE_ORDER ; do #ifdef __cplusplus extern "C" #endif -char readline (); +char pam_start (); int main () { -return readline (); +return pam_start (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - - # Older NetBSD, OpenBSD, and Irix have a broken linker that does not - # recognize dependent libraries; assume curses is needed if we didn't - # find any dependency. - case $host_os in - netbsd* | openbsd* | irix*) - if test x"$pgac_lib" = x"" ; then - pgac_lib=" -lcurses" - fi ;; - esac - - pgac_cv_check_readline="${pgac_rllib}${pgac_lib}" - break - + ac_cv_lib_pam_pam_start=yes +else + ac_cv_lib_pam_pam_start=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext - done - if test "$pgac_cv_check_readline" != no ; then - break - fi -done -LIBS=$pgac_save_LIBS - +LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_check_readline" >&5 -$as_echo "$pgac_cv_check_readline" >&6; } -if test "$pgac_cv_check_readline" != no ; then - LIBS="$pgac_cv_check_readline $LIBS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pam_pam_start" >&5 +$as_echo "$ac_cv_lib_pam_pam_start" >&6; } +if test "x$ac_cv_lib_pam_pam_start" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBPAM 1 +_ACEOF -$as_echo "#define HAVE_LIBREADLINE 1" >>confdefs.h + LIBS="-lpam $LIBS" +else + as_fn_error $? "library 'pam' is required for PAM" "$LINENO" 5 fi - - if test x"$pgac_cv_check_readline" = x"no"; then - as_fn_error $? "readline library not found -If you have readline already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable readline support." "$LINENO" 5 - fi fi -if test "$with_zlib" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for inflate in -lz" >&5 -$as_echo_n "checking for inflate in -lz... " >&6; } -if ${ac_cv_lib_z_inflate+:} false; then : +if test "$with_libxml" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xmlSaveToBuffer in -lxml2" >&5 +$as_echo_n "checking for xmlSaveToBuffer in -lxml2... " >&6; } +if ${ac_cv_lib_xml2_xmlSaveToBuffer+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS -LIBS="-lz $LIBS" +LIBS="-lxml2 $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9169,72 +8914,47 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char inflate (); +char xmlSaveToBuffer (); int main () { -return inflate (); +return xmlSaveToBuffer (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_z_inflate=yes + ac_cv_lib_xml2_xmlSaveToBuffer=yes else - ac_cv_lib_z_inflate=no + ac_cv_lib_xml2_xmlSaveToBuffer=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_z_inflate" >&5 -$as_echo "$ac_cv_lib_z_inflate" >&6; } -if test "x$ac_cv_lib_z_inflate" = xyes; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xml2_xmlSaveToBuffer" >&5 +$as_echo "$ac_cv_lib_xml2_xmlSaveToBuffer" >&6; } +if test "x$ac_cv_lib_xml2_xmlSaveToBuffer" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_LIBZ 1 +#define HAVE_LIBXML2 1 _ACEOF - LIBS="-lz $LIBS" - -else - as_fn_error $? "zlib library not found -If you have zlib already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-zlib to disable zlib support." "$LINENO" 5 -fi - -fi - -if test "$enable_spinlocks" = yes; then - -$as_echo "#define HAVE_SPINLOCKS 1" >>confdefs.h + LIBS="-lxml2 $LIBS" else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: -*** Not using spinlocks will cause poor performance." >&5 -$as_echo "$as_me: WARNING: -*** Not using spinlocks will cause poor performance." >&2;} + as_fn_error $? "library 'xml2' (version >= 2.6.23) is required for XML support" "$LINENO" 5 fi -if test "$enable_atomics" = yes; then - -$as_echo "#define HAVE_ATOMICS 1" >>confdefs.h - -else - { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: -*** Not using atomic operations will cause poor performance." >&5 -$as_echo "$as_me: WARNING: -*** Not using atomic operations will cause poor performance." >&2;} fi -if test "$with_gssapi" = yes ; then - if test "$PORTNAME" != "win32"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing gss_init_sec_context" >&5 -$as_echo_n "checking for library containing gss_init_sec_context... " >&6; } -if ${ac_cv_search_gss_init_sec_context+:} false; then : +if test "$with_libxslt" = yes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xsltCleanupGlobals in -lxslt" >&5 +$as_echo_n "checking for xsltCleanupGlobals in -lxslt... " >&6; } +if ${ac_cv_lib_xslt_xsltCleanupGlobals+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS + ac_check_lib_save_LIBS=$LIBS +LIBS="-lxslt $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9244,63 +8964,48 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char gss_init_sec_context (); +char xsltCleanupGlobals (); int main () { -return gss_init_sec_context (); +return xsltCleanupGlobals (); ; return 0; } _ACEOF -for ac_lib in '' gssapi_krb5 gss 'gssapi -lkrb5 -lcrypto'; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_gss_init_sec_context=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_xslt_xsltCleanupGlobals=yes +else + ac_cv_lib_xslt_xsltCleanupGlobals=no fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_gss_init_sec_context+:} false; then : - break + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -done -if ${ac_cv_search_gss_init_sec_context+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xslt_xsltCleanupGlobals" >&5 +$as_echo "$ac_cv_lib_xslt_xsltCleanupGlobals" >&6; } +if test "x$ac_cv_lib_xslt_xsltCleanupGlobals" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBXSLT 1 +_ACEOF -else - ac_cv_search_gss_init_sec_context=no -fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_gss_init_sec_context" >&5 -$as_echo "$ac_cv_search_gss_init_sec_context" >&6; } -ac_res=$ac_cv_search_gss_init_sec_context -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" + LIBS="-lxslt $LIBS" else - as_fn_error $? "could not find function 'gss_init_sec_context' required for GSSAPI" "$LINENO" 5 + as_fn_error $? "library 'xslt' is required for XSLT support" "$LINENO" 5 fi - else - LIBS="$LIBS -lgssapi32" - fi fi -if test "$with_openssl" = yes ; then - if test "$PORTNAME" != "win32"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for CRYPTO_new_ex_data in -lcrypto" >&5 -$as_echo_n "checking for CRYPTO_new_ex_data in -lcrypto... " >&6; } -if ${ac_cv_lib_crypto_CRYPTO_new_ex_data+:} false; then : +# for contrib/sepgsql +if test "$with_selinux" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for security_compute_create_name in -lselinux" >&5 +$as_echo_n "checking for security_compute_create_name in -lselinux... " >&6; } +if ${ac_cv_lib_selinux_security_compute_create_name+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS -LIBS="-lcrypto $LIBS" +LIBS="-lselinux $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9310,44 +9015,62 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char CRYPTO_new_ex_data (); +char security_compute_create_name (); int main () { -return CRYPTO_new_ex_data (); +return security_compute_create_name (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_crypto_CRYPTO_new_ex_data=yes + ac_cv_lib_selinux_security_compute_create_name=yes else - ac_cv_lib_crypto_CRYPTO_new_ex_data=no + ac_cv_lib_selinux_security_compute_create_name=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_crypto_CRYPTO_new_ex_data" >&5 -$as_echo "$ac_cv_lib_crypto_CRYPTO_new_ex_data" >&6; } -if test "x$ac_cv_lib_crypto_CRYPTO_new_ex_data" = xyes; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_selinux_security_compute_create_name" >&5 +$as_echo "$ac_cv_lib_selinux_security_compute_create_name" >&6; } +if test "x$ac_cv_lib_selinux_security_compute_create_name" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_LIBCRYPTO 1 +#define HAVE_LIBSELINUX 1 _ACEOF - LIBS="-lcrypto $LIBS" + LIBS="-lselinux $LIBS" else - as_fn_error $? "library 'crypto' is required for OpenSSL" "$LINENO" 5 + as_fn_error $? "library 'libselinux', version 2.1.10 or newer, is required for SELinux support" "$LINENO" 5 fi - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for SSL_library_init in -lssl" >&5 -$as_echo_n "checking for SSL_library_init in -lssl... " >&6; } -if ${ac_cv_lib_ssl_SSL_library_init+:} false; then : +fi + +# for contrib/uuid-ossp +if test "$with_uuid" = bsd ; then + # On BSD, the UUID functions are in libc + ac_fn_c_check_func "$LINENO" "uuid_to_string" "ac_cv_func_uuid_to_string" +if test "x$ac_cv_func_uuid_to_string" = xyes; then : + UUID_LIBS="" +else + as_fn_error $? "BSD UUID functions are not present" "$LINENO" 5 +fi + +elif test "$with_uuid" = e2fs ; then + # On OS X, the UUID functions are in libc + ac_fn_c_check_func "$LINENO" "uuid_generate" "ac_cv_func_uuid_generate" +if test "x$ac_cv_func_uuid_generate" = xyes; then : + UUID_LIBS="" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_generate in -luuid" >&5 +$as_echo_n "checking for uuid_generate in -luuid... " >&6; } +if ${ac_cv_lib_uuid_uuid_generate+:} false; then : $as_echo_n "(cached) " >&6 else ac_check_lib_save_LIBS=$LIBS -LIBS="-lssl $LIBS" +LIBS="-luuid $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9357,44 +9080,42 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char SSL_library_init (); +char uuid_generate (); int main () { -return SSL_library_init (); +return uuid_generate (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ssl_SSL_library_init=yes + ac_cv_lib_uuid_uuid_generate=yes else - ac_cv_lib_ssl_SSL_library_init=no + ac_cv_lib_uuid_uuid_generate=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ssl_SSL_library_init" >&5 -$as_echo "$ac_cv_lib_ssl_SSL_library_init" >&6; } -if test "x$ac_cv_lib_ssl_SSL_library_init" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBSSL 1 -_ACEOF - - LIBS="-lssl $LIBS" - +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_generate" >&5 +$as_echo "$ac_cv_lib_uuid_uuid_generate" >&6; } +if test "x$ac_cv_lib_uuid_uuid_generate" = xyes; then : + UUID_LIBS="-luuid" else - as_fn_error $? "library 'ssl' is required for OpenSSL" "$LINENO" 5 + as_fn_error $? "library 'uuid' is required for E2FS UUID" "$LINENO" 5 fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing CRYPTO_new_ex_data" >&5 -$as_echo_n "checking for library containing CRYPTO_new_ex_data... " >&6; } -if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : +fi + +elif test "$with_uuid" = ossp ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -lossp-uuid" >&5 +$as_echo_n "checking for uuid_export in -lossp-uuid... " >&6; } +if ${ac_cv_lib_ossp_uuid_uuid_export+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS + ac_check_lib_save_LIBS=$LIBS +LIBS="-lossp-uuid $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9404,55 +9125,36 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char CRYPTO_new_ex_data (); +char uuid_export (); int main () { -return CRYPTO_new_ex_data (); +return uuid_export (); ; return 0; } _ACEOF -for ac_lib in '' eay32 crypto; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_CRYPTO_new_ex_data=$ac_res -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : - break -fi -done -if ${ac_cv_search_CRYPTO_new_ex_data+:} false; then : - +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_ossp_uuid_uuid_export=yes else - ac_cv_search_CRYPTO_new_ex_data=no + ac_cv_lib_ossp_uuid_uuid_export=no fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_CRYPTO_new_ex_data" >&5 -$as_echo "$ac_cv_search_CRYPTO_new_ex_data" >&6; } -ac_res=$ac_cv_search_CRYPTO_new_ex_data -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" - +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ossp_uuid_uuid_export" >&5 +$as_echo "$ac_cv_lib_ossp_uuid_uuid_export" >&6; } +if test "x$ac_cv_lib_ossp_uuid_uuid_export" = xyes; then : + UUID_LIBS="-lossp-uuid" else - as_fn_error $? "library 'eay32' or 'crypto' is required for OpenSSL" "$LINENO" 5 -fi - - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for library containing SSL_library_init" >&5 -$as_echo_n "checking for library containing SSL_library_init... " >&6; } -if ${ac_cv_search_SSL_library_init+:} false; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -luuid" >&5 +$as_echo_n "checking for uuid_export in -luuid... " >&6; } +if ${ac_cv_lib_uuid_uuid_export+:} false; then : $as_echo_n "(cached) " >&6 else - ac_func_search_save_LIBS=$LIBS + ac_check_lib_save_LIBS=$LIBS +LIBS="-luuid $LIBS" cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ @@ -9462,828 +9164,422 @@ cat confdefs.h - <<_ACEOF >conftest.$ac_ext #ifdef __cplusplus extern "C" #endif -char SSL_library_init (); +char uuid_export (); int main () { -return SSL_library_init (); +return uuid_export (); ; return 0; } _ACEOF -for ac_lib in '' ssleay32 ssl; do - if test -z "$ac_lib"; then - ac_res="none required" - else - ac_res=-l$ac_lib - LIBS="-l$ac_lib $ac_func_search_save_LIBS" - fi - if ac_fn_c_try_link "$LINENO"; then : - ac_cv_search_SSL_library_init=$ac_res +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_uuid_uuid_export=yes +else + ac_cv_lib_uuid_uuid_export=no fi rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext - if ${ac_cv_search_SSL_library_init+:} false; then : - break + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -done -if ${ac_cv_search_SSL_library_init+:} false; then : - +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_export" >&5 +$as_echo "$ac_cv_lib_uuid_uuid_export" >&6; } +if test "x$ac_cv_lib_uuid_uuid_export" = xyes; then : + UUID_LIBS="-luuid" else - ac_cv_search_SSL_library_init=no + as_fn_error $? "library 'ossp-uuid' or 'uuid' is required for OSSP UUID" "$LINENO" 5 fi -rm conftest.$ac_ext -LIBS=$ac_func_search_save_LIBS + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_search_SSL_library_init" >&5 -$as_echo "$ac_cv_search_SSL_library_init" >&6; } -ac_res=$ac_cv_search_SSL_library_init -if test "$ac_res" != no; then : - test "$ac_res" = "none required" || LIBS="$ac_res $LIBS" -else - as_fn_error $? "library 'ssleay32' or 'ssl' is required for OpenSSL" "$LINENO" 5 fi - fi - for ac_func in SSL_get_current_compression -do : - ac_fn_c_check_func "$LINENO" "SSL_get_current_compression" "ac_cv_func_SSL_get_current_compression" -if test "x$ac_cv_func_SSL_get_current_compression" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_SSL_GET_CURRENT_COMPRESSION 1 -_ACEOF -fi -done -fi +## +## Header files +## -if test "$with_pam" = yes ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pam_start in -lpam" >&5 -$as_echo_n "checking for pam_start in -lpam... " >&6; } -if ${ac_cv_lib_pam_pam_start+:} false; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lpam $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include +#include +#include +#include -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char pam_start (); int main () { -return pam_start (); + ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_pam_pam_start=yes +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes else - ac_cv_lib_pam_pam_start=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + ac_cv_header_stdc=no fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_pam_pam_start" >&5 -$as_echo "$ac_cv_lib_pam_pam_start" >&6; } -if test "x$ac_cv_lib_pam_pam_start" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBPAM 1 -_ACEOF +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext - LIBS="-lpam $LIBS" +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : else - as_fn_error $? "library 'pam' is required for PAM" "$LINENO" 5 + ac_cv_header_stdc=no fi +rm -f conftest* fi -if test "$with_libxml" = yes ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xmlSaveToBuffer in -lxml2" >&5 -$as_echo_n "checking for xmlSaveToBuffer in -lxml2... " >&6; } -if ${ac_cv_lib_xml2_xmlSaveToBuffer+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lxml2 $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ +#include -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char xmlSaveToBuffer (); -int -main () -{ -return xmlSaveToBuffer (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_xml2_xmlSaveToBuffer=yes -else - ac_cv_lib_xml2_xmlSaveToBuffer=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xml2_xmlSaveToBuffer" >&5 -$as_echo "$ac_cv_lib_xml2_xmlSaveToBuffer" >&6; } -if test "x$ac_cv_lib_xml2_xmlSaveToBuffer" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBXML2 1 _ACEOF - - LIBS="-lxml2 $LIBS" +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : else - as_fn_error $? "library 'xml2' (version >= 2.6.23) is required for XML support" "$LINENO" 5 + ac_cv_header_stdc=no fi +rm -f conftest* fi -if test "$with_libxslt" = yes ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for xsltCleanupGlobals in -lxslt" >&5 -$as_echo_n "checking for xsltCleanupGlobals in -lxslt... " >&6; } -if ${ac_cv_lib_xslt_xsltCleanupGlobals+:} false; then : - $as_echo_n "(cached) " >&6 +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lxslt $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext + cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) #endif -char xsltCleanupGlobals (); + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) int main () { -return xsltCleanupGlobals (); - ; + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_xslt_xsltCleanupGlobals=yes +if ac_fn_c_try_run "$LINENO"; then : + else - ac_cv_lib_xslt_xsltCleanupGlobals=no + ac_cv_header_stdc=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_xslt_xsltCleanupGlobals" >&5 -$as_echo "$ac_cv_lib_xslt_xsltCleanupGlobals" >&6; } -if test "x$ac_cv_lib_xslt_xsltCleanupGlobals" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBXSLT 1 -_ACEOF - LIBS="-lxslt $LIBS" +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h -else - as_fn_error $? "library 'xslt' is required for XSLT support" "$LINENO" 5 fi +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + fi -# Note: We can test for libldap_r only after we know PTHREAD_LIBS -if test "$with_ldap" = yes ; then - _LIBS="$LIBS" - if test "$PORTNAME" != "win32"; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lldap" >&5 -$as_echo_n "checking for ldap_bind in -lldap... " >&6; } -if ${ac_cv_lib_ldap_ldap_bind+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lldap $EXTRA_LDAP_LIBS $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +done -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char ldap_bind (); -int -main () -{ -return ldap_bind (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ldap_ldap_bind=yes -else - ac_cv_lib_ldap_ldap_bind=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_ldap_bind" >&5 -$as_echo "$ac_cv_lib_ldap_ldap_bind" >&6; } -if test "x$ac_cv_lib_ldap_ldap_bind" = xyes; then : + +for ac_header in atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h pwd.h sys/ioctl.h sys/ipc.h sys/poll.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/socket.h sys/sockio.h sys/tas.h sys/time.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : cat >>confdefs.h <<_ACEOF -#define HAVE_LIBLDAP 1 +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 _ACEOF - LIBS="-lldap $LIBS" - -else - as_fn_error $? "library 'ldap' is required for LDAP" "$LINENO" 5 fi - LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" - if test "$enable_thread_safety" = yes; then - # on some platforms ldap_r fails to link without PTHREAD_LIBS - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_simple_bind in -lldap_r" >&5 -$as_echo_n "checking for ldap_simple_bind in -lldap_r... " >&6; } -if ${ac_cv_lib_ldap_r_ldap_simple_bind+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lldap_r $PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +done -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" + +# On BSD, test for net/if.h will fail unless sys/socket.h +# is included first. +for ac_header in net/if.h +do : + ac_fn_c_check_header_compile "$LINENO" "net/if.h" "ac_cv_header_net_if_h" "$ac_includes_default +#ifdef HAVE_SYS_SOCKET_H +#include #endif -char ldap_simple_bind (); -int -main () -{ -return ldap_simple_bind (); - ; - return 0; -} + +" +if test "x$ac_cv_header_net_if_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NET_IF_H 1 _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ldap_r_ldap_simple_bind=yes -else - ac_cv_lib_ldap_r_ldap_simple_bind=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_r_ldap_simple_bind" >&5 -$as_echo "$ac_cv_lib_ldap_r_ldap_simple_bind" >&6; } -if test "x$ac_cv_lib_ldap_r_ldap_simple_bind" = xyes; then : + +done + + +# On OpenBSD, test for sys/ucred.h will fail unless sys/param.h +# is included first. +for ac_header in sys/ucred.h +do : + ac_fn_c_check_header_compile "$LINENO" "sys/ucred.h" "ac_cv_header_sys_ucred_h" "$ac_includes_default +#include + +" +if test "x$ac_cv_header_sys_ucred_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_LIBLDAP_R 1 +#define HAVE_SYS_UCRED_H 1 _ACEOF - LIBS="-lldap_r $LIBS" +fi + +done + + +# At least on IRIX, test for netinet/tcp.h will fail unless +# netinet/in.h is included first. +for ac_header in netinet/in.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "netinet/in.h" "ac_cv_header_netinet_in_h" "$ac_includes_default" +if test "x$ac_cv_header_netinet_in_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NETINET_IN_H 1 +_ACEOF -else - as_fn_error $? "library 'ldap_r' is required for LDAP" "$LINENO" 5 fi - LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" - else - LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" - fi - else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lwldap32" >&5 -$as_echo_n "checking for ldap_bind in -lwldap32... " >&6; } -if ${ac_cv_lib_wldap32_ldap_bind+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lwldap32 $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ +done -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" +for ac_header in netinet/tcp.h +do : + ac_fn_c_check_header_compile "$LINENO" "netinet/tcp.h" "ac_cv_header_netinet_tcp_h" "$ac_includes_default +#ifdef HAVE_NETINET_IN_H +#include #endif -char ldap_bind (); -int -main () -{ -return ldap_bind (); - ; - return 0; -} + +" +if test "x$ac_cv_header_netinet_tcp_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_NETINET_TCP_H 1 _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_wldap32_ldap_bind=yes -else - ac_cv_lib_wldap32_ldap_bind=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_wldap32_ldap_bind" >&5 -$as_echo "$ac_cv_lib_wldap32_ldap_bind" >&6; } -if test "x$ac_cv_lib_wldap32_ldap_bind" = xyes; then : + +done + + +if expr x"$pgac_cv_check_readline" : 'x-lreadline' >/dev/null ; then + for ac_header in readline/readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_readline_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_LIBWLDAP32 1 +#define HAVE_READLINE_READLINE_H 1 _ACEOF - LIBS="-lwldap32 $LIBS" +else + for ac_header in readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_READLINE_H 1 +_ACEOF else - as_fn_error $? "library 'wldap32' is required for LDAP" "$LINENO" 5 + as_fn_error $? "readline header not found +If you have readline already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable readline support." "$LINENO" 5 fi - LDAP_LIBS_FE="-lwldap32" - LDAP_LIBS_BE="-lwldap32" - fi - LIBS="$_LIBS" +done + fi +done + for ac_header in readline/history.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_history_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_READLINE_HISTORY_H 1 +_ACEOF -# for contrib/sepgsql -if test "$with_selinux" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for security_compute_create_name in -lselinux" >&5 -$as_echo_n "checking for security_compute_create_name in -lselinux... " >&6; } -if ${ac_cv_lib_selinux_security_compute_create_name+:} false; then : - $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lselinux $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char security_compute_create_name (); -int -main () -{ -return security_compute_create_name (); - ; - return 0; -} + for ac_header in history.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" +if test "x$ac_cv_header_history_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_HISTORY_H 1 _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_selinux_security_compute_create_name=yes + else - ac_cv_lib_selinux_security_compute_create_name=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS + as_fn_error $? "history header not found +If you have readline already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable readline support." "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_selinux_security_compute_create_name" >&5 -$as_echo "$ac_cv_lib_selinux_security_compute_create_name" >&6; } -if test "x$ac_cv_lib_selinux_security_compute_create_name" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_LIBSELINUX 1 -_ACEOF - LIBS="-lselinux $LIBS" +done -else - as_fn_error $? "library 'libselinux', version 2.1.10 or newer, is required for SELinux support" "$LINENO" 5 fi +done + fi -# for contrib/uuid-ossp -if test "$with_uuid" = bsd ; then - # On BSD, the UUID functions are in libc - ac_fn_c_check_func "$LINENO" "uuid_to_string" "ac_cv_func_uuid_to_string" -if test "x$ac_cv_func_uuid_to_string" = xyes; then : - UUID_LIBS="" -else - as_fn_error $? "BSD UUID functions are not present" "$LINENO" 5 -fi +if expr x"$pgac_cv_check_readline" : 'x-ledit' >/dev/null ; then +# Some installations of libedit usurp /usr/include/readline/, which seems +# bad practice, since in combined installations readline will have its headers +# there. We might have to resort to AC_EGREP checks to make sure we found +# the proper header... + for ac_header in editline/readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "editline/readline.h" "ac_cv_header_editline_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_editline_readline_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_EDITLINE_READLINE_H 1 +_ACEOF -elif test "$with_uuid" = e2fs ; then - # On OS X, the UUID functions are in libc - ac_fn_c_check_func "$LINENO" "uuid_generate" "ac_cv_func_uuid_generate" -if test "x$ac_cv_func_uuid_generate" = xyes; then : - UUID_LIBS="" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_generate in -luuid" >&5 -$as_echo_n "checking for uuid_generate in -luuid... " >&6; } -if ${ac_cv_lib_uuid_uuid_generate+:} false; then : - $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-luuid $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char uuid_generate (); -int -main () -{ -return uuid_generate (); - ; - return 0; -} + for ac_header in readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_READLINE_H 1 _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_uuid_uuid_generate=yes -else - ac_cv_lib_uuid_uuid_generate=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_generate" >&5 -$as_echo "$ac_cv_lib_uuid_uuid_generate" >&6; } -if test "x$ac_cv_lib_uuid_uuid_generate" = xyes; then : - UUID_LIBS="-luuid" -else - as_fn_error $? "library 'uuid' is required for E2FS UUID" "$LINENO" 5 -fi -fi - -elif test "$with_uuid" = ossp ; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -lossp-uuid" >&5 -$as_echo_n "checking for uuid_export in -lossp-uuid... " >&6; } -if ${ac_cv_lib_ossp_uuid_uuid_export+:} false; then : - $as_echo_n "(cached) " >&6 else - ac_check_lib_save_LIBS=$LIBS -LIBS="-lossp-uuid $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ - -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char uuid_export (); -int -main () -{ -return uuid_export (); - ; - return 0; -} + for ac_header in readline/readline.h +do : + ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_readline_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_READLINE_READLINE_H 1 _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_ossp_uuid_uuid_export=yes -else - ac_cv_lib_ossp_uuid_uuid_export=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ossp_uuid_uuid_export" >&5 -$as_echo "$ac_cv_lib_ossp_uuid_uuid_export" >&6; } -if test "x$ac_cv_lib_ossp_uuid_uuid_export" = xyes; then : - UUID_LIBS="-lossp-uuid" -else - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for uuid_export in -luuid" >&5 -$as_echo_n "checking for uuid_export in -luuid... " >&6; } -if ${ac_cv_lib_uuid_uuid_export+:} false; then : - $as_echo_n "(cached) " >&6 -else - ac_check_lib_save_LIBS=$LIBS -LIBS="-luuid $LIBS" -cat confdefs.h - <<_ACEOF >conftest.$ac_ext -/* end confdefs.h. */ -/* Override any GCC internal prototype to avoid an error. - Use char because int might match the return type of a GCC - builtin and then its argument prototype would still apply. */ -#ifdef __cplusplus -extern "C" -#endif -char uuid_export (); -int -main () -{ -return uuid_export (); - ; - return 0; -} -_ACEOF -if ac_fn_c_try_link "$LINENO"; then : - ac_cv_lib_uuid_uuid_export=yes else - ac_cv_lib_uuid_uuid_export=no -fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext -LIBS=$ac_check_lib_save_LIBS -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_uuid_uuid_export" >&5 -$as_echo "$ac_cv_lib_uuid_uuid_export" >&6; } -if test "x$ac_cv_lib_uuid_uuid_export" = xyes; then : - UUID_LIBS="-luuid" -else - as_fn_error $? "library 'ossp-uuid' or 'uuid' is required for OSSP UUID" "$LINENO" 5 + as_fn_error $? "readline header not found +If you have libedit already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-readline to disable libedit support." "$LINENO" 5 fi -fi +done fi +done +fi -## -## Header files -## +done -for ac_header in atomic.h crypt.h dld.h fp_class.h getopt.h ieeefp.h ifaddrs.h langinfo.h mbarrier.h poll.h pwd.h sys/ioctl.h sys/ipc.h sys/poll.h sys/pstat.h sys/resource.h sys/select.h sys/sem.h sys/shm.h sys/socket.h sys/sockio.h sys/tas.h sys/time.h sys/un.h termios.h ucred.h utime.h wchar.h wctype.h +# Note: in a libedit installation, history.h is sometimes a dummy, and may +# not be there at all. Hence, don't complain if not found. We must check +# though, since in yet other versions it is an independent header. + for ac_header in editline/history.h do : - as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` -ac_fn_c_check_header_mongrel "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default" -if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + ac_fn_c_check_header_mongrel "$LINENO" "editline/history.h" "ac_cv_header_editline_history_h" "$ac_includes_default" +if test "x$ac_cv_header_editline_history_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +#define HAVE_EDITLINE_HISTORY_H 1 _ACEOF -fi - -done - - -# On BSD, test for net/if.h will fail unless sys/socket.h -# is included first. -for ac_header in net/if.h +else + for ac_header in history.h do : - ac_fn_c_check_header_compile "$LINENO" "net/if.h" "ac_cv_header_net_if_h" "$ac_includes_default -#ifdef HAVE_SYS_SOCKET_H -#include -#endif - -" -if test "x$ac_cv_header_net_if_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" +if test "x$ac_cv_header_history_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_NET_IF_H 1 +#define HAVE_HISTORY_H 1 _ACEOF -fi - -done - - -# On OpenBSD, test for sys/ucred.h will fail unless sys/param.h -# is included first. -for ac_header in sys/ucred.h +else + for ac_header in readline/history.h do : - ac_fn_c_check_header_compile "$LINENO" "sys/ucred.h" "ac_cv_header_sys_ucred_h" "$ac_includes_default -#include - -" -if test "x$ac_cv_header_sys_ucred_h" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" +if test "x$ac_cv_header_readline_history_h" = xyes; then : cat >>confdefs.h <<_ACEOF -#define HAVE_SYS_UCRED_H 1 +#define HAVE_READLINE_HISTORY_H 1 _ACEOF fi done +fi -# At least on IRIX, test for netinet/tcp.h will fail unless -# netinet/in.h is included first. -for ac_header in netinet/in.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "netinet/in.h" "ac_cv_header_netinet_in_h" "$ac_includes_default" -if test "x$ac_cv_header_netinet_in_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_NETINET_IN_H 1 -_ACEOF +done fi done -for ac_header in netinet/tcp.h -do : - ac_fn_c_check_header_compile "$LINENO" "netinet/tcp.h" "ac_cv_header_netinet_tcp_h" "$ac_includes_default -#ifdef HAVE_NETINET_IN_H -#include -#endif +fi -" -if test "x$ac_cv_header_netinet_tcp_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_NETINET_TCP_H 1 -_ACEOF +if test "$with_zlib" = yes; then + ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" +if test "x$ac_cv_header_zlib_h" = xyes; then : +else + as_fn_error $? "zlib header not found +If you have zlib already installed, see config.log for details on the +failure. It is possible the compiler isn't looking in the proper directory. +Use --without-zlib to disable zlib support." "$LINENO" 5 fi -done - -if expr x"$pgac_cv_check_readline" : 'x-lreadline' >/dev/null ; then - for ac_header in readline/readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_READLINE_H 1 -_ACEOF - -else - for ac_header in readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_H 1 -_ACEOF - -else - as_fn_error $? "readline header not found -If you have readline already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable readline support." "$LINENO" 5 -fi - -done - -fi - -done - - for ac_header in readline/history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_HISTORY_H 1 -_ACEOF - -else - for ac_header in history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" -if test "x$ac_cv_header_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_HISTORY_H 1 -_ACEOF - -else - as_fn_error $? "history header not found -If you have readline already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable readline support." "$LINENO" 5 -fi - -done - -fi - -done - -fi - -if expr x"$pgac_cv_check_readline" : 'x-ledit' >/dev/null ; then -# Some installations of libedit usurp /usr/include/readline/, which seems -# bad practice, since in combined installations readline will have its headers -# there. We might have to resort to AC_EGREP checks to make sure we found -# the proper header... - for ac_header in editline/readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "editline/readline.h" "ac_cv_header_editline_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_editline_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_EDITLINE_READLINE_H 1 -_ACEOF - -else - for ac_header in readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline.h" "ac_cv_header_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_H 1 -_ACEOF - -else - for ac_header in readline/readline.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/readline.h" "ac_cv_header_readline_readline_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_readline_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_READLINE_H 1 -_ACEOF - -else - as_fn_error $? "readline header not found -If you have libedit already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-readline to disable libedit support." "$LINENO" 5 -fi - -done - -fi - -done - -fi - -done - -# Note: in a libedit installation, history.h is sometimes a dummy, and may -# not be there at all. Hence, don't complain if not found. We must check -# though, since in yet other versions it is an independent header. - for ac_header in editline/history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "editline/history.h" "ac_cv_header_editline_history_h" "$ac_includes_default" -if test "x$ac_cv_header_editline_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_EDITLINE_HISTORY_H 1 -_ACEOF - -else - for ac_header in history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "history.h" "ac_cv_header_history_h" "$ac_includes_default" -if test "x$ac_cv_header_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_HISTORY_H 1 -_ACEOF - -else - for ac_header in readline/history.h -do : - ac_fn_c_check_header_mongrel "$LINENO" "readline/history.h" "ac_cv_header_readline_history_h" "$ac_includes_default" -if test "x$ac_cv_header_readline_history_h" = xyes; then : - cat >>confdefs.h <<_ACEOF -#define HAVE_READLINE_HISTORY_H 1 -_ACEOF - -fi - -done - -fi - -done - -fi - -done - -fi - -if test "$with_zlib" = yes; then - ac_fn_c_check_header_mongrel "$LINENO" "zlib.h" "ac_cv_header_zlib_h" "$ac_includes_default" -if test "x$ac_cv_header_zlib_h" = xyes; then : - -else - as_fn_error $? "zlib header not found -If you have zlib already installed, see config.log for details on the -failure. It is possible the compiler isn't looking in the proper directory. -Use --without-zlib to disable zlib support." "$LINENO" 5 -fi - - -fi +fi if test "$with_gssapi" = yes ; then for ac_header in gssapi/gssapi.h @@ -12824,396 +12120,910 @@ if test x"$ac_cv_type_struct_addrinfo" = xyes && \ if test "x$ac_cv_func_getaddrinfo" = xyes; then : $as_echo "#define HAVE_GETADDRINFO 1" >>confdefs.h -else - case " $LIBOBJS " in - *" getaddrinfo.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" - ;; -esac +else + case " $LIBOBJS " in + *" getaddrinfo.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" + ;; +esac + +fi + + +else + case " $LIBOBJS " in + *" getaddrinfo.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" + ;; +esac + +fi + +# Similarly, use system's getopt_long() only if system provides struct option. +if test x"$ac_cv_type_struct_option" = xyes ; then + ac_fn_c_check_func "$LINENO" "getopt_long" "ac_cv_func_getopt_long" +if test "x$ac_cv_func_getopt_long" = xyes; then : + $as_echo "#define HAVE_GETOPT_LONG 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" getopt_long.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" + ;; +esac + +fi + + +else + case " $LIBOBJS " in + *" getopt_long.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" + ;; +esac + +fi + +# Solaris' getopt() doesn't do what we want for long options, so always use +# our version on that platform. +if test "$PORTNAME" = "solaris"; then + case " $LIBOBJS " in + *" getopt.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt.$ac_objext" + ;; +esac + +fi + +# mingw has adopted a GNU-centric interpretation of optind/optreset, +# so always use our version on Windows. +if test "$PORTNAME" = "win32"; then + case " $LIBOBJS " in + *" getopt.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt.$ac_objext" + ;; +esac + + case " $LIBOBJS " in + *" getopt_long.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" + ;; +esac + +fi + +# Win32 (really MinGW) support +if test "$PORTNAME" = "win32"; then + ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday" +if test "x$ac_cv_func_gettimeofday" = xyes; then : + $as_echo "#define HAVE_GETTIMEOFDAY 1" >>confdefs.h + +else + case " $LIBOBJS " in + *" gettimeofday.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS gettimeofday.$ac_objext" + ;; +esac + +fi + + + case " $LIBOBJS " in + *" dirmod.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" + ;; +esac + + case " $LIBOBJS " in + *" kill.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS kill.$ac_objext" + ;; +esac + + case " $LIBOBJS " in + *" open.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS open.$ac_objext" + ;; +esac + + case " $LIBOBJS " in + *" system.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS system.$ac_objext" + ;; +esac + + case " $LIBOBJS " in + *" win32env.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS win32env.$ac_objext" + ;; +esac + + case " $LIBOBJS " in + *" win32error.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS win32error.$ac_objext" + ;; +esac + + case " $LIBOBJS " in + *" win32setlocale.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS win32setlocale.$ac_objext" + ;; +esac + + +$as_echo "#define HAVE_SYMLINK 1" >>confdefs.h + + ac_fn_c_check_type "$LINENO" "MINIDUMP_TYPE" "ac_cv_type_MINIDUMP_TYPE" " +#define WIN32_LEAN_AND_MEAN +#include +#include +#include +" +if test "x$ac_cv_type_MINIDUMP_TYPE" = xyes; then : + +cat >>confdefs.h <<_ACEOF +#define HAVE_MINIDUMP_TYPE 1 +_ACEOF + +pgac_minidump_type=yes +else + pgac_minidump_type=no +fi + +fi +if test x"$pgac_minidump_type" = x"yes" ; then + have_win32_dbghelp=yes + +else + have_win32_dbghelp=no + +fi + +# Cygwin needs only a bit of that +if test "$PORTNAME" = "cygwin"; then + case " $LIBOBJS " in + *" dirmod.$ac_objext "* ) ;; + *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" + ;; +esac + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sigsetjmp" >&5 +$as_echo_n "checking for sigsetjmp... " >&6; } +if ${pgac_cv_func_sigsetjmp+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +sigjmp_buf x; sigsetjmp(x, 1); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_func_sigsetjmp=yes +else + pgac_cv_func_sigsetjmp=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_sigsetjmp" >&5 +$as_echo "$pgac_cv_func_sigsetjmp" >&6; } +if test x"$pgac_cv_func_sigsetjmp" = x"yes"; then + +$as_echo "#define HAVE_SIGSETJMP 1" >>confdefs.h + +fi + +ac_fn_c_check_decl "$LINENO" "sys_siglist" "ac_cv_have_decl_sys_siglist" "#include +/* NetBSD declares sys_siglist in unistd.h. */ +#ifdef HAVE_UNISTD_H +# include +#endif + +" +if test "x$ac_cv_have_decl_sys_siglist" = xyes; then : + ac_have_decl=1 +else + ac_have_decl=0 +fi + +cat >>confdefs.h <<_ACEOF +#define HAVE_DECL_SYS_SIGLIST $ac_have_decl +_ACEOF + + + +ac_fn_c_check_func "$LINENO" "syslog" "ac_cv_func_syslog" +if test "x$ac_cv_func_syslog" = xyes; then : + ac_fn_c_check_header_mongrel "$LINENO" "syslog.h" "ac_cv_header_syslog_h" "$ac_includes_default" +if test "x$ac_cv_header_syslog_h" = xyes; then : + +$as_echo "#define HAVE_SYSLOG 1" >>confdefs.h + +fi + + +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for opterr" >&5 +$as_echo_n "checking for opterr... " >&6; } +if ${pgac_cv_var_int_opterr+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +extern int opterr; opterr = 1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_var_int_opterr=yes +else + pgac_cv_var_int_opterr=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_opterr" >&5 +$as_echo "$pgac_cv_var_int_opterr" >&6; } +if test x"$pgac_cv_var_int_opterr" = x"yes"; then + +$as_echo "#define HAVE_INT_OPTERR 1" >>confdefs.h + +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for optreset" >&5 +$as_echo_n "checking for optreset... " >&6; } +if ${pgac_cv_var_int_optreset+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +extern int optreset; optreset = 1; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_var_int_optreset=yes +else + pgac_cv_var_int_optreset=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_optreset" >&5 +$as_echo "$pgac_cv_var_int_optreset" >&6; } +if test x"$pgac_cv_var_int_optreset" = x"yes"; then + +$as_echo "#define HAVE_INT_OPTRESET 1" >>confdefs.h + +fi + +for ac_func in strtoll strtoq +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + break +fi +done + +for ac_func in strtoull strtouq +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + break +fi +done + + +# Lastly, restore full LIBS list and check for readline/libedit symbols +LIBS="$LIBS_including_readline" + +if test "$with_readline" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_completion_append_character" >&5 +$as_echo_n "checking for rl_completion_append_character... " >&6; } +if ${pgac_cv_var_rl_completion_append_character+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#ifdef HAVE_READLINE_READLINE_H +# include +#elif defined(HAVE_READLINE_H) +# include +#endif + +int +main () +{ +rl_completion_append_character = 'x'; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + pgac_cv_var_rl_completion_append_character=yes +else + pgac_cv_var_rl_completion_append_character=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_rl_completion_append_character" >&5 +$as_echo "$pgac_cv_var_rl_completion_append_character" >&6; } +if test x"$pgac_cv_var_rl_completion_append_character" = x"yes"; then + +$as_echo "#define HAVE_RL_COMPLETION_APPEND_CHARACTER 1" >>confdefs.h + +fi + for ac_func in rl_completion_matches rl_filename_completion_function +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + + for ac_func in append_history history_truncate_file +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF + +fi +done + +fi + + +# +# Pthreads +# +# For each platform, we need to know about any special compile and link +# libraries, and whether the normal C function names are thread-safe. +# See the comment at the top of src/port/thread.c for more information. +# WIN32 doesn't need the pthread tests; it always uses threads +if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +acx_pthread_ok=no + +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. + +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5 +$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_join (); +int +main () +{ +return pthread_join (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + acx_pthread_ok=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_ok" >&5 +$as_echo "$acx_pthread_ok" >&6; } + if test x"$acx_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" +fi + +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). -fi +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. +acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config pthreadGC2" -else - case " $LIBOBJS " in - *" getaddrinfo.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getaddrinfo.$ac_objext" - ;; -esac +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: -fi +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) -# Similarly, use system's getopt_long() only if system provides struct option. -if test x"$ac_cv_type_struct_option" = xyes ; then - ac_fn_c_check_func "$LINENO" "getopt_long" "ac_cv_func_getopt_long" -if test "x$ac_cv_func_getopt_long" = xyes; then : - $as_echo "#define HAVE_GETOPT_LONG 1" >>confdefs.h +case "${host_cpu}-${host_os}" in + *solaris*) -else - case " $LIBOBJS " in - *" getopt_long.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" - ;; + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthread or + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + acx_pthread_flags="-pthread -pthreads pthread -mt $acx_pthread_flags" + ;; esac -fi +if test x"$acx_pthread_ok" = xno; then +for flag in $acx_pthread_flags; do + + tryPTHREAD_CFLAGS="" + tryPTHREAD_LIBS="" + case $flag in + none) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 +$as_echo_n "checking whether pthreads work without any flags... " >&6; } + ;; + -*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 +$as_echo_n "checking whether pthreads work with $flag... " >&6; } + tryPTHREAD_CFLAGS="$flag" + ;; + pthread-config) + # skip this if we already have flags defined, for PostgreSQL + if test x"$PTHREAD_CFLAGS" != x -o x"$PTHREAD_LIBS" != x; then continue; fi + # Extract the first word of "pthread-config", so it can be a program name with args. +set dummy pthread-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_acx_pthread_config+:} false; then : + $as_echo_n "(cached) " >&6 else - case " $LIBOBJS " in - *" getopt_long.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" - ;; -esac + if test -n "$acx_pthread_config"; then + ac_cv_prog_acx_pthread_config="$acx_pthread_config" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if as_fn_executable_p "$as_dir/$ac_word$ac_exec_ext"; then + ac_cv_prog_acx_pthread_config="yes" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + test -z "$ac_cv_prog_acx_pthread_config" && ac_cv_prog_acx_pthread_config="no" fi - -# Solaris' getopt() doesn't do what we want for long options, so always use -# our version on that platform. -if test "$PORTNAME" = "solaris"; then - case " $LIBOBJS " in - *" getopt.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt.$ac_objext" - ;; -esac - +fi +acx_pthread_config=$ac_cv_prog_acx_pthread_config +if test -n "$acx_pthread_config"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_config" >&5 +$as_echo "$acx_pthread_config" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } fi -# mingw has adopted a GNU-centric interpretation of optind/optreset, -# so always use our version on Windows. -if test "$PORTNAME" = "win32"; then - case " $LIBOBJS " in - *" getopt.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt.$ac_objext" - ;; -esac - case " $LIBOBJS " in - *" getopt_long.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS getopt_long.$ac_objext" - ;; -esac + if test x"$acx_pthread_config" = xno; then continue; fi + tryPTHREAD_CFLAGS="`pthread-config --cflags`" + tryPTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; -fi + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 +$as_echo_n "checking for the pthreads library -l$flag... " >&6; } + tryPTHREAD_LIBS="-l$flag" + ;; + esac -# Win32 (really MinGW) support -if test "$PORTNAME" = "win32"; then - ac_fn_c_check_func "$LINENO" "gettimeofday" "ac_cv_func_gettimeofday" -if test "x$ac_cv_func_gettimeofday" = xyes; then : - $as_echo "#define HAVE_GETTIMEOFDAY 1" >>confdefs.h + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$tryPTHREAD_LIBS $PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + acx_pthread_ok=yes else - case " $LIBOBJS " in - *" gettimeofday.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS gettimeofday.$ac_objext" - ;; -esac - + acx_pthread_ok=no fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test "x$acx_pthread_ok" = xyes; then + # Don't use options that are ignored by the compiler. + # We find them by checking stderror. + cat >conftest.$ac_ext <<_ACEOF +int +main (int argc, char **argv) +{ + (void) argc; + (void) argv; + return 0; +} +_ACEOF + rm -f conftest.$ac_objext conftest$ac_exeext + # Check both linking and compiling, because they might tolerate different options. + if test "`(eval $ac_link 2>&1 1>&5)`" = "" && test "`(eval $ac_compile 2>&1 1>&5)`" = ""; then + # The original macro breaks out of the loop at this point, + # but we continue trying flags because Linux needs -lpthread + # too to build libpq successfully. The test above only + # tests for building binaries, not shared libraries. + PTHREAD_LIBS=" $tryPTHREAD_LIBS $PTHREAD_LIBS" + PTHREAD_CFLAGS="$PTHREAD_CFLAGS $tryPTHREAD_CFLAGS" + else acx_pthread_ok=no + fi + fi - case " $LIBOBJS " in - *" dirmod.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" - ;; -esac + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" - case " $LIBOBJS " in - *" kill.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS kill.$ac_objext" - ;; -esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_ok" >&5 +$as_echo "$acx_pthread_ok" >&6; } +done +fi - case " $LIBOBJS " in - *" open.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS open.$ac_objext" - ;; -esac +# The original macro has a bunch of other tests here, which we have removed +# because (a) Postgres doesn't need them, and (b) $acx_pthread_ok is not +# meaningful at this point. - case " $LIBOBJS " in - *" system.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS system.$ac_objext" - ;; -esac - case " $LIBOBJS " in - *" win32env.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS win32env.$ac_objext" - ;; -esac - case " $LIBOBJS " in - *" win32error.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS win32error.$ac_objext" - ;; -esac - case " $LIBOBJS " in - *" win32setlocale.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS win32setlocale.$ac_objext" - ;; -esac +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + # set thread flags -$as_echo "#define HAVE_SYMLINK 1" >>confdefs.h +# Some platforms use these, so just define them. They can't hurt if they +# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS +# enables 5-arg getpwuid_r, among other things. +PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" - ac_fn_c_check_type "$LINENO" "MINIDUMP_TYPE" "ac_cv_type_MINIDUMP_TYPE" " -#define WIN32_LEAN_AND_MEAN -#include -#include -#include -" -if test "x$ac_cv_type_MINIDUMP_TYPE" = xyes; then : +# Check for *_r functions +_CFLAGS="$CFLAGS" +_LIBS="$LIBS" +CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +LIBS="$LIBS $PTHREAD_LIBS" -cat >>confdefs.h <<_ACEOF -#define HAVE_MINIDUMP_TYPE 1 -_ACEOF +if test "$PORTNAME" != "win32"; then +ac_fn_c_check_header_mongrel "$LINENO" "pthread.h" "ac_cv_header_pthread_h" "$ac_includes_default" +if test "x$ac_cv_header_pthread_h" = xyes; then : -pgac_minidump_type=yes else - pgac_minidump_type=no -fi - + as_fn_error $? " +pthread.h not found; use --disable-thread-safety to disable thread safety" "$LINENO" 5 fi -if test x"$pgac_minidump_type" = x"yes" ; then - have_win32_dbghelp=yes -else - have_win32_dbghelp=no fi -# Cygwin needs only a bit of that -if test "$PORTNAME" = "cygwin"; then - case " $LIBOBJS " in - *" dirmod.$ac_objext "* ) ;; - *) LIBOBJS="$LIBOBJS dirmod.$ac_objext" - ;; -esac +for ac_func in strerror_r getpwuid_r gethostbyname_r +do : + as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` +ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" +if eval test \"x\$"$as_ac_var"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +_ACEOF fi +done -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sigsetjmp" >&5 -$as_echo_n "checking for sigsetjmp... " >&6; } -if ${pgac_cv_func_sigsetjmp+:} false; then : + +# Do test here with the proper thread flags +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether strerror_r returns int" >&5 +$as_echo_n "checking whether strerror_r returns int... " >&6; } +if ${pgac_cv_func_strerror_r_int+:} false; then : $as_echo_n "(cached) " >&6 else cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include +#include int main () { -sigjmp_buf x; sigsetjmp(x, 1); +#ifndef _AIX +int strerror_r(int, char *, size_t); +#else +/* Older AIX has 'int' for the third argument so we don't test the args. */ +int strerror_r(); +#endif ; return 0; } _ACEOF -if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_func_sigsetjmp=yes +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_func_strerror_r_int=yes else - pgac_cv_func_sigsetjmp=no + pgac_cv_func_strerror_r_int=no fi -rm -f core conftest.err conftest.$ac_objext \ - conftest$ac_exeext conftest.$ac_ext +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_sigsetjmp" >&5 -$as_echo "$pgac_cv_func_sigsetjmp" >&6; } -if test x"$pgac_cv_func_sigsetjmp" = x"yes"; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_func_strerror_r_int" >&5 +$as_echo "$pgac_cv_func_strerror_r_int" >&6; } +if test x"$pgac_cv_func_strerror_r_int" = xyes ; then -$as_echo "#define HAVE_SIGSETJMP 1" >>confdefs.h +$as_echo "#define STRERROR_R_INT 1" >>confdefs.h fi -ac_fn_c_check_decl "$LINENO" "sys_siglist" "ac_cv_have_decl_sys_siglist" "#include -/* NetBSD declares sys_siglist in unistd.h. */ -#ifdef HAVE_UNISTD_H -# include -#endif -" -if test "x$ac_cv_have_decl_sys_siglist" = xyes; then : - ac_have_decl=1 +CFLAGS="$_CFLAGS" +LIBS="$_LIBS" + else - ac_have_decl=0 +# do not use values from template file +PTHREAD_CFLAGS= +PTHREAD_LIBS= fi -cat >>confdefs.h <<_ACEOF -#define HAVE_DECL_SYS_SIGLIST $ac_have_decl -_ACEOF - - - -ac_fn_c_check_func "$LINENO" "syslog" "ac_cv_func_syslog" -if test "x$ac_cv_func_syslog" = xyes; then : - ac_fn_c_check_header_mongrel "$LINENO" "syslog.h" "ac_cv_header_syslog_h" "$ac_includes_default" -if test "x$ac_cv_header_syslog_h" = xyes; then : - -$as_echo "#define HAVE_SYSLOG 1" >>confdefs.h - -fi -fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for opterr" >&5 -$as_echo_n "checking for opterr... " >&6; } -if ${pgac_cv_var_int_opterr+:} false; then : +# We can test for libldap_r only after we know PTHREAD_LIBS +if test "$with_ldap" = yes ; then + _LIBS="$LIBS" + if test "$PORTNAME" != "win32"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lldap" >&5 +$as_echo_n "checking for ldap_bind in -lldap... " >&6; } +if ${ac_cv_lib_ldap_ldap_bind+:} false; then : $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_check_lib_save_LIBS=$LIBS +LIBS="-lldap $EXTRA_LDAP_LIBS $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ldap_bind (); int main () { -extern int opterr; opterr = 1; +return ldap_bind (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_int_opterr=yes + ac_cv_lib_ldap_ldap_bind=yes else - pgac_cv_var_int_opterr=no + ac_cv_lib_ldap_ldap_bind=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_opterr" >&5 -$as_echo "$pgac_cv_var_int_opterr" >&6; } -if test x"$pgac_cv_var_int_opterr" = x"yes"; then +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_ldap_bind" >&5 +$as_echo "$ac_cv_lib_ldap_ldap_bind" >&6; } +if test "x$ac_cv_lib_ldap_ldap_bind" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_LIBLDAP 1 +_ACEOF -$as_echo "#define HAVE_INT_OPTERR 1" >>confdefs.h + LIBS="-lldap $LIBS" +else + as_fn_error $? "library 'ldap' is required for LDAP" "$LINENO" 5 fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for optreset" >&5 -$as_echo_n "checking for optreset... " >&6; } -if ${pgac_cv_var_int_optreset+:} false; then : + LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" + if test "$enable_thread_safety" = yes; then + # on some platforms ldap_r fails to link without PTHREAD_LIBS + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_simple_bind in -lldap_r" >&5 +$as_echo_n "checking for ldap_simple_bind in -lldap_r... " >&6; } +if ${ac_cv_lib_ldap_r_ldap_simple_bind+:} false; then : $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_check_lib_save_LIBS=$LIBS +LIBS="-lldap_r $PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ldap_simple_bind (); int main () { -extern int optreset; optreset = 1; +return ldap_simple_bind (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_int_optreset=yes + ac_cv_lib_ldap_r_ldap_simple_bind=yes else - pgac_cv_var_int_optreset=no + ac_cv_lib_ldap_r_ldap_simple_bind=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_int_optreset" >&5 -$as_echo "$pgac_cv_var_int_optreset" >&6; } -if test x"$pgac_cv_var_int_optreset" = x"yes"; then - -$as_echo "#define HAVE_INT_OPTRESET 1" >>confdefs.h - -fi - -for ac_func in strtoll strtoq -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF - break -fi -done - -for ac_func in strtoull strtouq -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_ldap_r_ldap_simple_bind" >&5 +$as_echo "$ac_cv_lib_ldap_r_ldap_simple_bind" >&6; } +if test "x$ac_cv_lib_ldap_r_ldap_simple_bind" = xyes; then : cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +#define HAVE_LIBLDAP_R 1 _ACEOF - break -fi -done + LIBS="-lldap_r $LIBS" -# Lastly, restore full LIBS list and check for readline/libedit symbols -LIBS="$LIBS_including_readline" +else + as_fn_error $? "library 'ldap_r' is required for LDAP" "$LINENO" 5 +fi -if test "$with_readline" = yes; then - { $as_echo "$as_me:${as_lineno-$LINENO}: checking for rl_completion_append_character" >&5 -$as_echo_n "checking for rl_completion_append_character... " >&6; } -if ${pgac_cv_var_rl_completion_append_character+:} false; then : + LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" + else + LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ldap_bind in -lwldap32" >&5 +$as_echo_n "checking for ldap_bind in -lwldap32... " >&6; } +if ${ac_cv_lib_wldap32_ldap_bind+:} false; then : $as_echo_n "(cached) " >&6 else - cat confdefs.h - <<_ACEOF >conftest.$ac_ext + ac_check_lib_save_LIBS=$LIBS +LIBS="-lwldap32 $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext /* end confdefs.h. */ -#include -#ifdef HAVE_READLINE_READLINE_H -# include -#elif defined(HAVE_READLINE_H) -# include -#endif +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char ldap_bind (); int main () { -rl_completion_append_character = 'x'; +return ldap_bind (); ; return 0; } _ACEOF if ac_fn_c_try_link "$LINENO"; then : - pgac_cv_var_rl_completion_append_character=yes + ac_cv_lib_wldap32_ldap_bind=yes else - pgac_cv_var_rl_completion_append_character=no + ac_cv_lib_wldap32_ldap_bind=no fi rm -f core conftest.err conftest.$ac_objext \ conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS fi -{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_var_rl_completion_append_character" >&5 -$as_echo "$pgac_cv_var_rl_completion_append_character" >&6; } -if test x"$pgac_cv_var_rl_completion_append_character" = x"yes"; then - -$as_echo "#define HAVE_RL_COMPLETION_APPEND_CHARACTER 1" >>confdefs.h - -fi - for ac_func in rl_completion_matches rl_filename_completion_function -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_wldap32_ldap_bind" >&5 +$as_echo "$ac_cv_lib_wldap32_ldap_bind" >&6; } +if test "x$ac_cv_lib_wldap32_ldap_bind" = xyes; then : cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 +#define HAVE_LIBWLDAP32 1 _ACEOF -fi -done - - for ac_func in append_history history_truncate_file -do : - as_ac_var=`$as_echo "ac_cv_func_$ac_func" | $as_tr_sh` -ac_fn_c_check_func "$LINENO" "$ac_func" "$as_ac_var" -if eval test \"x\$"$as_ac_var"\" = x"yes"; then : - cat >>confdefs.h <<_ACEOF -#define `$as_echo "HAVE_$ac_func" | $as_tr_cpp` 1 -_ACEOF + LIBS="-lwldap32 $LIBS" +else + as_fn_error $? "library 'wldap32' is required for LDAP" "$LINENO" 5 fi -done + LDAP_LIBS_FE="-lwldap32" + LDAP_LIBS_BE="-lwldap32" + fi + LIBS="$_LIBS" fi + + # This test makes sure that run tests work at all. Sometimes a shared # library is found by the linker, but the runtime linker can't find it. # This check should come after all modifications of compiler or linker diff --git a/configure.in b/configure.in index b43c8ec0165ca..143e667ce27eb 100644 --- a/configure.in +++ b/configure.in @@ -947,54 +947,6 @@ program to use during the build.]) fi fi -# -# Pthreads -# -# For each platform, we need to know about any special compile and link -# libraries, and whether the normal C function names are thread-safe. -# See the comment at the top of src/port/thread.c for more information. -# WIN32 doesn't need the pthread tests; it always uses threads -# -# These tests are run before the library-tests, because linking with the -# other libraries can pull in the pthread functions as a side-effect. We -# want to use the -pthread or similar flags directly, and not rely on -# the side-effects of linking with some other library. -if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then -AX_PTHREAD # set thread flags - -# Some platforms use these, so just define them. They can't hurt if they -# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS -# enables 5-arg getpwuid_r, among other things. -PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" - -# Check for *_r functions -_CFLAGS="$CFLAGS" -_LIBS="$LIBS" -CFLAGS="$CFLAGS $PTHREAD_CFLAGS" -LIBS="$LIBS $PTHREAD_LIBS" - -if test "$PORTNAME" != "win32"; then -AC_CHECK_HEADER(pthread.h, [], [AC_MSG_ERROR([ -pthread.h not found; use --disable-thread-safety to disable thread safety])]) -fi - -AC_CHECK_FUNCS([strerror_r getpwuid_r gethostbyname_r]) - -# Do test here with the proper thread flags -PGAC_FUNC_STRERROR_R_INT - -CFLAGS="$_CFLAGS" -LIBS="$_LIBS" - -else -# do not use values from template file -PTHREAD_CFLAGS= -PTHREAD_LIBS= -fi - -AC_SUBST(PTHREAD_CFLAGS) -AC_SUBST(PTHREAD_LIBS) - ## ## Libraries @@ -1095,33 +1047,6 @@ if test "$with_libxslt" = yes ; then AC_CHECK_LIB(xslt, xsltCleanupGlobals, [], [AC_MSG_ERROR([library 'xslt' is required for XSLT support])]) fi -# Note: We can test for libldap_r only after we know PTHREAD_LIBS -if test "$with_ldap" = yes ; then - _LIBS="$LIBS" - if test "$PORTNAME" != "win32"; then - AC_CHECK_LIB(ldap, ldap_bind, [], - [AC_MSG_ERROR([library 'ldap' is required for LDAP])], - [$EXTRA_LDAP_LIBS]) - LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" - if test "$enable_thread_safety" = yes; then - # on some platforms ldap_r fails to link without PTHREAD_LIBS - AC_CHECK_LIB(ldap_r, ldap_simple_bind, [], - [AC_MSG_ERROR([library 'ldap_r' is required for LDAP])], - [$PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS]) - LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" - else - LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" - fi - else - AC_CHECK_LIB(wldap32, ldap_bind, [], [AC_MSG_ERROR([library 'wldap32' is required for LDAP])]) - LDAP_LIBS_FE="-lwldap32" - LDAP_LIBS_BE="-lwldap32" - fi - LIBS="$_LIBS" -fi -AC_SUBST(LDAP_LIBS_FE) -AC_SUBST(LDAP_LIBS_BE) - # for contrib/sepgsql if test "$with_selinux" = yes; then AC_CHECK_LIB(selinux, security_compute_create_name, [], @@ -1638,6 +1563,78 @@ if test "$with_readline" = yes; then fi +# +# Pthreads +# +# For each platform, we need to know about any special compile and link +# libraries, and whether the normal C function names are thread-safe. +# See the comment at the top of src/port/thread.c for more information. +# WIN32 doesn't need the pthread tests; it always uses threads +if test "$enable_thread_safety" = yes -a "$PORTNAME" != "win32"; then +ACX_PTHREAD # set thread flags + +# Some platforms use these, so just define them. They can't hurt if they +# are not supported. For example, on Solaris -D_POSIX_PTHREAD_SEMANTICS +# enables 5-arg getpwuid_r, among other things. +PTHREAD_CFLAGS="$PTHREAD_CFLAGS -D_REENTRANT -D_THREAD_SAFE -D_POSIX_PTHREAD_SEMANTICS" + +# Check for *_r functions +_CFLAGS="$CFLAGS" +_LIBS="$LIBS" +CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +LIBS="$LIBS $PTHREAD_LIBS" + +if test "$PORTNAME" != "win32"; then +AC_CHECK_HEADER(pthread.h, [], [AC_MSG_ERROR([ +pthread.h not found; use --disable-thread-safety to disable thread safety])]) +fi + +AC_CHECK_FUNCS([strerror_r getpwuid_r gethostbyname_r]) + +# Do test here with the proper thread flags +PGAC_FUNC_STRERROR_R_INT + +CFLAGS="$_CFLAGS" +LIBS="$_LIBS" + +else +# do not use values from template file +PTHREAD_CFLAGS= +PTHREAD_LIBS= +fi + +AC_SUBST(PTHREAD_CFLAGS) +AC_SUBST(PTHREAD_LIBS) + + +# We can test for libldap_r only after we know PTHREAD_LIBS +if test "$with_ldap" = yes ; then + _LIBS="$LIBS" + if test "$PORTNAME" != "win32"; then + AC_CHECK_LIB(ldap, ldap_bind, [], + [AC_MSG_ERROR([library 'ldap' is required for LDAP])], + [$EXTRA_LDAP_LIBS]) + LDAP_LIBS_BE="-lldap $EXTRA_LDAP_LIBS" + if test "$enable_thread_safety" = yes; then + # on some platforms ldap_r fails to link without PTHREAD_LIBS + AC_CHECK_LIB(ldap_r, ldap_simple_bind, [], + [AC_MSG_ERROR([library 'ldap_r' is required for LDAP])], + [$PTHREAD_CFLAGS $PTHREAD_LIBS $EXTRA_LDAP_LIBS]) + LDAP_LIBS_FE="-lldap_r $EXTRA_LDAP_LIBS" + else + LDAP_LIBS_FE="-lldap $EXTRA_LDAP_LIBS" + fi + else + AC_CHECK_LIB(wldap32, ldap_bind, [], [AC_MSG_ERROR([library 'wldap32' is required for LDAP])]) + LDAP_LIBS_FE="-lwldap32" + LDAP_LIBS_BE="-lwldap32" + fi + LIBS="$_LIBS" +fi +AC_SUBST(LDAP_LIBS_FE) +AC_SUBST(LDAP_LIBS_BE) + + # This test makes sure that run tests work at all. Sometimes a shared # library is found by the linker, but the runtime linker can't find it. # This check should come after all modifications of compiler or linker From 1a0959b3887f05e55712e1ef27b7d1b3c75d645f Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 9 Jul 2015 16:00:14 +0300 Subject: [PATCH 037/442] Fix another broken link in documentation. Tom fixed another one of these in commit 7f32dbcd, but there was another almost identical one in libpq docs. Per his comment: HP's web server has apparently become case-sensitive sometime recently. Per bug #13479 from Daniel Abraham. Corrected link identified by Alvaro. --- doc/src/sgml/libpq.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/libpq.sgml b/doc/src/sgml/libpq.sgml index de6b3ad86bfdb..fe0a7920cce14 100644 --- a/doc/src/sgml/libpq.sgml +++ b/doc/src/sgml/libpq.sgml @@ -7572,7 +7572,7 @@ ldap://ldap.acme.com/cn=dbserver,cn=hosts?pgconnectinfo?base?(objectclass=*) libpq will not also initialize those libraries. See + url="http://h71000.www7.hp.com/doc/83final/ba554_90007/ch04.html"> for details on the SSL API. From 19a65458159ca5f46d8ac154e62273fa2a8cf13f Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Thu, 9 Jul 2015 22:30:52 +0900 Subject: [PATCH 038/442] Make wal_compression PGC_SUSET rather than PGC_USERSET. When enabling wal_compression, there is a risk to leak data similarly to the BREACH and CRIME attacks on SSL where the compression ratio of a full page image gives a hint of what is the existing data of this page. This vulnerability is quite cumbersome to exploit in practice, but doable. So this patch makes wal_compression PGC_SUSET in order to prevent non-superusers from enabling it and exploiting the vulnerability while DBA thinks the risk very seriously and disables it in postgresql.conf. Back-patch to 9.5 where wal_compression was introduced. --- doc/src/sgml/config.sgml | 1 + src/backend/utils/misc/guc.c | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index 4b7bd8a86e643..b91d6c75d276e 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -2303,6 +2303,7 @@ include_dir 'conf.d' is on or during a base backup. A compressed page image will be decompressed during WAL replay. The default value is off. + Only superusers can change this setting. diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 0356ecb48219c..34c23f9560d32 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -995,7 +995,7 @@ static struct config_bool ConfigureNamesBool[] = }, { - {"wal_compression", PGC_USERSET, WAL_SETTINGS, + {"wal_compression", PGC_SUSET, WAL_SETTINGS, gettext_noop("Compresses full-page writes written in WAL file."), NULL }, From 193e0270752b07f8d0700710a39c4ec367f57339 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 9 Jul 2015 13:22:23 -0400 Subject: [PATCH 039/442] Fix postmaster's handling of a startup-process crash. Ordinarily, a failure (unexpected exit status) of the startup subprocess should be considered fatal, so the postmaster should just close up shop and quit. However, if we sent the startup process a SIGQUIT or SIGKILL signal, the failure is hardly "unexpected", and we should attempt restart; this is necessary for recovery from ordinary backend crashes in hot-standby scenarios. I attempted to implement the latter rule with a two-line patch in commit 442231d7f71764b8c628044e7ce2225f9aa43b67, but it now emerges that that patch was a few bricks shy of a load: it failed to distinguish the case of a signaled startup process from the case where the new startup process crashes before reaching database consistency. That resulted in infinitely respawning a new startup process only to have it crash again. To handle this properly, we really must track whether we have sent the *current* startup process a kill signal. Rather than add yet another ad-hoc boolean to the postmaster's state, I chose to unify this with the existing RecoveryError flag into an enum tracking the startup process's state. That seems more consistent with the postmaster's general state machine design. Back-patch to 9.0, like the previous patch. --- src/backend/postmaster/postmaster.c | 56 ++++++++++++++++++++--------- 1 file changed, 40 insertions(+), 16 deletions(-) diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index df8037b498d48..1bb3138a03ab0 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -249,6 +249,17 @@ static pid_t StartupPID = 0, PgStatPID = 0, SysLoggerPID = 0; +/* Startup process's status */ +typedef enum +{ + STARTUP_NOT_RUNNING, + STARTUP_RUNNING, + STARTUP_SIGNALED, /* we sent it a SIGQUIT or SIGKILL */ + STARTUP_CRASHED +} StartupStatusEnum; + +static StartupStatusEnum StartupStatus = STARTUP_NOT_RUNNING; + /* Startup/shutdown state */ #define NoShutdown 0 #define SmartShutdown 1 @@ -258,7 +269,6 @@ static pid_t StartupPID = 0, static int Shutdown = NoShutdown; static bool FatalError = false; /* T if recovering from backend crash */ -static bool RecoveryError = false; /* T if WAL recovery failed */ /* * We use a simple state machine to control startup, shutdown, and @@ -301,8 +311,6 @@ static bool RecoveryError = false; /* T if WAL recovery failed */ * states, nor in PM_SHUTDOWN states (because we don't enter those states * when trying to recover from a crash). It can be true in PM_STARTUP state, * because we don't clear it until we've successfully started WAL redo. - * Similarly, RecoveryError means that we have crashed during recovery, and - * should not try to restart. */ typedef enum { @@ -1246,6 +1254,7 @@ PostmasterMain(int argc, char *argv[]) */ StartupPID = StartupDataBase(); Assert(StartupPID != 0); + StartupStatus = STARTUP_RUNNING; pmState = PM_STARTUP; /* Some workers may be scheduled to start now */ @@ -1666,7 +1675,7 @@ ServerLoop(void) /* If we have lost the archiver, try to start a new one. */ if (PgArchPID == 0 && PgArchStartupAllowed()) - PgArchPID = pgarch_start(); + PgArchPID = pgarch_start(); /* If we need to signal the autovacuum launcher, do so now */ if (avlauncher_needs_signal) @@ -2591,6 +2600,7 @@ reaper(SIGNAL_ARGS) if (Shutdown > NoShutdown && (EXIT_STATUS_0(exitstatus) || EXIT_STATUS_1(exitstatus))) { + StartupStatus = STARTUP_NOT_RUNNING; pmState = PM_WAIT_BACKENDS; /* PostmasterStateMachine logic does the rest */ continue; @@ -2600,6 +2610,7 @@ reaper(SIGNAL_ARGS) { ereport(LOG, (errmsg("shutdown at recovery target"))); + StartupStatus = STARTUP_NOT_RUNNING; Shutdown = SmartShutdown; TerminateChildren(SIGTERM); pmState = PM_WAIT_BACKENDS; @@ -2624,16 +2635,18 @@ reaper(SIGNAL_ARGS) /* * After PM_STARTUP, any unexpected exit (including FATAL exit) of * the startup process is catastrophic, so kill other children, - * and set RecoveryError so we don't try to reinitialize after - * they're gone. Exception: if FatalError is already set, that - * implies we previously sent the startup process a SIGQUIT, so + * and set StartupStatus so we don't try to reinitialize after + * they're gone. Exception: if StartupStatus is STARTUP_SIGNALED, + * then we previously sent the startup process a SIGQUIT; so * that's probably the reason it died, and we do want to try to * restart in that case. */ if (!EXIT_STATUS_0(exitstatus)) { - if (!FatalError) - RecoveryError = true; + if (StartupStatus == STARTUP_SIGNALED) + StartupStatus = STARTUP_NOT_RUNNING; + else + StartupStatus = STARTUP_CRASHED; HandleChildCrash(pid, exitstatus, _("startup process")); continue; @@ -2642,6 +2655,7 @@ reaper(SIGNAL_ARGS) /* * Startup succeeded, commence normal operations */ + StartupStatus = STARTUP_NOT_RUNNING; FatalError = false; Assert(AbortStartTime == 0); ReachedNormalRunning = true; @@ -2962,7 +2976,7 @@ CleanupBackgroundWorker(int pid, ReportBackgroundWorkerPID(rw); /* report child death */ LogChildExit(EXIT_STATUS_0(exitstatus) ? DEBUG1 : LOG, - namebuf, pid, exitstatus); + namebuf, pid, exitstatus); return true; } @@ -3190,7 +3204,10 @@ HandleChildCrash(int pid, int exitstatus, const char *procname) /* Take care of the startup process too */ if (pid == StartupPID) + { StartupPID = 0; + StartupStatus = STARTUP_CRASHED; + } else if (StartupPID != 0 && take_action) { ereport(DEBUG2, @@ -3198,6 +3215,7 @@ HandleChildCrash(int pid, int exitstatus, const char *procname) (SendStop ? "SIGSTOP" : "SIGQUIT"), (int) StartupPID))); signal_child(StartupPID, (SendStop ? SIGSTOP : SIGQUIT)); + StartupStatus = STARTUP_SIGNALED; } /* Take care of the bgwriter too */ @@ -3589,13 +3607,14 @@ PostmasterStateMachine(void) } /* - * If recovery failed, or the user does not want an automatic restart - * after backend crashes, wait for all non-syslogger children to exit, and - * then exit postmaster. We don't try to reinitialize when recovery fails, - * because more than likely it will just fail again and we will keep - * trying forever. + * If the startup process failed, or the user does not want an automatic + * restart after backend crashes, wait for all non-syslogger children to + * exit, and then exit postmaster. We don't try to reinitialize when the + * startup process fails, because more than likely it will just fail again + * and we will keep trying forever. */ - if (pmState == PM_NO_CHILDREN && (RecoveryError || !restart_after_crash)) + if (pmState == PM_NO_CHILDREN && + (StartupStatus == STARTUP_CRASHED || !restart_after_crash)) ExitPostmaster(1); /* @@ -3615,6 +3634,7 @@ PostmasterStateMachine(void) StartupPID = StartupDataBase(); Assert(StartupPID != 0); + StartupStatus = STARTUP_RUNNING; pmState = PM_STARTUP; /* crash recovery started, reset SIGKILL flag */ AbortStartTime = 0; @@ -3746,7 +3766,11 @@ TerminateChildren(int signal) { SignalChildren(signal); if (StartupPID != 0) + { signal_child(StartupPID, signal); + if (signal == SIGQUIT || signal == SIGKILL) + StartupStatus = STARTUP_SIGNALED; + } if (BgWriterPID != 0) signal_child(BgWriterPID, signal); if (CheckpointerPID != 0) From 5acc7730c8c93d5755bc6a0bf36df407f48b2b27 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 9 Jul 2015 18:50:31 -0400 Subject: [PATCH 040/442] Improve documentation about array concat operator vs. underlying functions. The documentation implied that there was seldom any reason to use the array_append, array_prepend, and array_cat functions directly. But that's not really true, because they can help make it clear which case is meant, which the || operator can't do since it's overloaded to represent all three cases. Add some discussion and examples illustrating the potentially confusing behavior that can ensue if the parser misinterprets what was meant. Per a complaint from Michael Herold. Back-patch to 9.2, which is where || started to behave this way. --- doc/src/sgml/array.sgml | 47 +++++++++++++++++++++++++++++++++++------ 1 file changed, 41 insertions(+), 6 deletions(-) diff --git a/doc/src/sgml/array.sgml b/doc/src/sgml/array.sgml index 5e4130aa6dfa4..4385a09cd9798 100644 --- a/doc/src/sgml/array.sgml +++ b/doc/src/sgml/array.sgml @@ -346,7 +346,7 @@ SELECT array_length(schedule, 1) FROM sal_emp WHERE name = 'Carol'; SELECT cardinality(schedule) FROM sal_emp WHERE name = 'Carol'; - cardinality + cardinality ------------- 4 (1 row) @@ -494,11 +494,7 @@ SELECT array_dims(ARRAY[1,2] || ARRAY[[3,4],[5,6]]); array_prepend, array_append, or array_cat. The first two only support one-dimensional arrays, but array_cat supports multidimensional arrays. - - Note that the concatenation operator discussed above is preferred over - direct use of these functions. In fact, these functions primarily exist for use - in implementing the concatenation operator. However, they might be directly - useful in the creation of user-defined aggregates. Some examples: + Some examples: SELECT array_prepend(1, ARRAY[2,3]); @@ -531,6 +527,45 @@ SELECT array_cat(ARRAY[5,6], ARRAY[[1,2],[3,4]]); {{5,6},{1,2},{3,4}} + + + In simple cases, the concatenation operator discussed above is preferred + over direct use of these functions. However, because the concatenation + operator is overloaded to serve all three cases, there are situations where + use of one of the functions is helpful to avoid ambiguity. For example + consider: + + +SELECT ARRAY[1, 2] || '{3, 4}'; -- the untyped literal is taken as an array + ?column? +----------- + {1,2,3,4} + +SELECT ARRAY[1, 2] || '7'; -- so is this one +ERROR: malformed array literal: "7" + +SELECT ARRAY[1, 2] || NULL; -- so is an undecorated NULL + ?column? +---------- + {1,2} +(1 row) + +SELECT array_append(ARRAY[1, 2], NULL); -- this might have been meant + array_append +-------------- + {1,2,NULL} + + + In the examples above, the parser sees an integer array on one side of the + concatenation operator, and a constant of undetermined type on the other. + The heuristic it uses to resolve the constant's type is to assume it's of + the same type as the operator's other input — in this case, + integer array. So the concatenation operator is presumed to + represent array_cat, not array_append. When + that's the wrong choice, it could be fixed by casting the constant to the + array's element type; but explicit use of array_append might + be a preferable solution. + From ebe8bcd94e7191025e0309718284983891a89064 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Fri, 10 Jul 2015 14:28:34 +0300 Subject: [PATCH 041/442] Copy-edit the docs changes of OWNER TO CURRENT/SESSION_USER additions. Commit 31eae602 added new syntax to many DDL commands to use CURRENT_USER or SESSION_USER instead of role name in ALTER ... OWNER TO, but because of a misplaced '{', the syntax in the docs implied that the syntax was "ALTER ... CURRENT_USER", instead of "ALTER ... OWNER TO CURRENT_USER". Fix that, and also the funny indentation in some of the modified syntax blurps. --- doc/src/sgml/ref/alter_large_object.sgml | 2 +- doc/src/sgml/ref/alter_opclass.sgml | 6 +++--- doc/src/sgml/ref/alter_operator.sgml | 4 ++-- doc/src/sgml/ref/alter_opfamily.sgml | 6 +++--- 4 files changed, 9 insertions(+), 9 deletions(-) diff --git a/doc/src/sgml/ref/alter_large_object.sgml b/doc/src/sgml/ref/alter_large_object.sgml index a0ed6c22f3449..5748d52db115b 100644 --- a/doc/src/sgml/ref/alter_large_object.sgml +++ b/doc/src/sgml/ref/alter_large_object.sgml @@ -21,7 +21,7 @@ PostgreSQL documentation -ALTER LARGE OBJECT large_object_oid { OWNER TO new_owner | CURRENT_USER | SESSION_USER } +ALTER LARGE OBJECT large_object_oid OWNER TO { new_owner | CURRENT_USER | SESSION_USER } diff --git a/doc/src/sgml/ref/alter_opclass.sgml b/doc/src/sgml/ref/alter_opclass.sgml index 2e561be8d78f4..58de603aa4683 100644 --- a/doc/src/sgml/ref/alter_opclass.sgml +++ b/doc/src/sgml/ref/alter_opclass.sgml @@ -22,13 +22,13 @@ PostgreSQL documentation ALTER OPERATOR CLASS name USING index_method - RENAME TO new_name + RENAME TO new_name ALTER OPERATOR CLASS name USING index_method - { OWNER TO new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } ALTER OPERATOR CLASS name USING index_method - SET SCHEMA new_schema + SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_operator.sgml b/doc/src/sgml/ref/alter_operator.sgml index bdb2d029b171a..8a7af50d6049c 100644 --- a/doc/src/sgml/ref/alter_operator.sgml +++ b/doc/src/sgml/ref/alter_operator.sgml @@ -22,10 +22,10 @@ PostgreSQL documentation ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) - { OWNER TO new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } ALTER OPERATOR name ( { left_type | NONE } , { right_type | NONE } ) - SET SCHEMA new_schema + SET SCHEMA new_schema diff --git a/doc/src/sgml/ref/alter_opfamily.sgml b/doc/src/sgml/ref/alter_opfamily.sgml index b0942b6ea7de2..4511c7f7b24c9 100644 --- a/doc/src/sgml/ref/alter_opfamily.sgml +++ b/doc/src/sgml/ref/alter_opfamily.sgml @@ -34,13 +34,13 @@ ALTER OPERATOR FAMILY name USING index_method - RENAME TO new_name + RENAME TO new_name ALTER OPERATOR FAMILY name USING index_method - OWNER TO { new_owner | CURRENT_USER | SESSION_USER } + OWNER TO { new_owner | CURRENT_USER | SESSION_USER } ALTER OPERATOR FAMILY name USING index_method - SET SCHEMA new_schema + SET SCHEMA new_schema From 7236f5b068ca78bb3e771f62ee1365ba945d4869 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Sat, 11 Jul 2015 14:20:01 -0700 Subject: [PATCH 042/442] Add assign_expr_collations() to CreatePolicy() and AlterPolicy(). As noted by Noah Misch, CreatePolicy() and AlterPolicy() omit to call assign_expr_collations() on the node trees. Fix the omission and add his test case to the rowsecurity regression test. --- src/backend/commands/policy.c | 10 ++++++++++ src/test/regress/expected/rowsecurity.out | 21 +++++++++++++++++++++ src/test/regress/sql/rowsecurity.sql | 12 ++++++++++++ 3 files changed, 43 insertions(+) diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 11efc9f30f144..72329834a3137 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -538,6 +538,10 @@ CreatePolicy(CreatePolicyStmt *stmt) EXPR_KIND_WHERE, "POLICY"); + /* Fix up collation information */ + assign_expr_collations(qual_pstate, qual); + assign_expr_collations(with_check_pstate, with_check_qual); + /* Open pg_policy catalog */ pg_policy_rel = heap_open(PolicyRelationId, RowExclusiveLock); @@ -681,6 +685,9 @@ AlterPolicy(AlterPolicyStmt *stmt) EXPR_KIND_WHERE, "POLICY"); + /* Fix up collation information */ + assign_expr_collations(qual_pstate, qual); + qual_parse_rtable = qual_pstate->p_rtable; free_parsestate(qual_pstate); } @@ -701,6 +708,9 @@ AlterPolicy(AlterPolicyStmt *stmt) EXPR_KIND_WHERE, "POLICY"); + /* Fix up collation information */ + assign_expr_collations(with_check_pstate, with_check_qual); + with_check_parse_rtable = with_check_pstate->p_rtable; free_parsestate(with_check_pstate); } diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 4073c1beea511..eabfd932de9b2 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -2730,6 +2730,27 @@ ERROR: permission denied for relation copy_t RESET SESSION AUTHORIZATION; DROP TABLE copy_t; -- +-- Collation support +-- +BEGIN; +SET row_security = force; +CREATE TABLE coll_t (c) AS VALUES ('bar'::text); +CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); +ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; +SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass; + inputcollid +------------------ + inputcollid 950 +(1 row) + +SELECT * FROM coll_t; + c +----- + bar +(1 row) + +ROLLBACK; +-- -- Clean up objects -- RESET SESSION AUTHORIZATION; diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index fdd9b892ce6a8..782824acfdae7 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1087,6 +1087,18 @@ COPY copy_t FROM STDIN; --fail - permission denied. RESET SESSION AUTHORIZATION; DROP TABLE copy_t; +-- +-- Collation support +-- +BEGIN; +SET row_security = force; +CREATE TABLE coll_t (c) AS VALUES ('bar'::text); +CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); +ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; +SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE polrelid = 'coll_t'::regclass; +SELECT * FROM coll_t; +ROLLBACK; + -- -- Clean up objects -- From 5181fc57dfb98b39d059908e04a0628ee6e65efc Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Sat, 11 Jul 2015 22:46:28 -0400 Subject: [PATCH 043/442] doc: fix typo in CREATE POLICY manual page Backpatch through 9.5 --- doc/src/sgml/ref/create_policy.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/ref/create_policy.sgml b/doc/src/sgml/ref/create_policy.sgml index e826984633c47..56f0124db4fe9 100644 --- a/doc/src/sgml/ref/create_policy.sgml +++ b/doc/src/sgml/ref/create_policy.sgml @@ -53,7 +53,7 @@ CREATE POLICY name ON Generally, the system will enforce filter conditions imposed using security policies prior to qualifications that appear in the query itself, - in order to the prevent the inadvertent exposure of the protected data to + in order to prevent the inadvertent exposure of the protected data to user-defined functions which might not be trustworthy. However, functions and operators marked by the system (or the system administrator) as LEAKPROOF may be evaluated before policy From ccd062cfb90e68f7e80c4b31c474db9087289b7d Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 11 Jul 2015 23:34:41 -0400 Subject: [PATCH 044/442] Add now-required #include. Fixes compiler warning induced by 808ea8fc7bb259ddd810353719cac66e85a608c8. --- src/backend/commands/policy.c | 1 + 1 file changed, 1 insertion(+) diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 72329834a3137..17b48d49596b1 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -29,6 +29,7 @@ #include "nodes/makefuncs.h" #include "nodes/pg_list.h" #include "parser/parse_clause.h" +#include "parser/parse_collate.h" #include "parser/parse_node.h" #include "parser/parse_relation.h" #include "rewrite/rewriteManip.h" From 0e8e48b0da6ea00f3dbcb659542b0c81a97d1253 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 12 Jul 2015 22:06:27 +0200 Subject: [PATCH 045/442] Optionally don't error out due to preexisting slots in commandline utilities. pg_receivexlog and pg_recvlogical error out when --create-slot is specified and a slot with the same name already exists. In some cases, especially with pg_receivexlog, that's rather annoying and requires additional scripting. Backpatch to 9.5 as slot control functions have newly been added to pg_receivexlog, and there doesn't seem much point leaving it in a less useful state. Discussion: 20150619144755.GG29350@alap3.anarazel.de --- doc/src/sgml/ref/pg_receivexlog.sgml | 10 ++++++ doc/src/sgml/ref/pg_recvlogical.sgml | 10 ++++++ src/bin/pg_basebackup/pg_receivexlog.c | 11 +++++-- src/bin/pg_basebackup/pg_recvlogical.c | 9 +++++- src/bin/pg_basebackup/streamutil.c | 44 +++++++++++--------------- src/bin/pg_basebackup/streamutil.h | 4 +-- 6 files changed, 58 insertions(+), 30 deletions(-) diff --git a/doc/src/sgml/ref/pg_receivexlog.sgml b/doc/src/sgml/ref/pg_receivexlog.sgml index fd787649e42eb..a4c98921cb8bd 100644 --- a/doc/src/sgml/ref/pg_receivexlog.sgml +++ b/doc/src/sgml/ref/pg_receivexlog.sgml @@ -92,6 +92,16 @@ PostgreSQL documentation + + + + + Do not not error out when is specified + and a slot with the specified name already exists. + + + + diff --git a/doc/src/sgml/ref/pg_recvlogical.sgml b/doc/src/sgml/ref/pg_recvlogical.sgml index a28dbc3f18441..4eda9ebdd1f42 100644 --- a/doc/src/sgml/ref/pg_recvlogical.sgml +++ b/doc/src/sgml/ref/pg_recvlogical.sgml @@ -154,6 +154,16 @@ PostgreSQL documentation + + + + + Do not not error out when is specified + and a slot with the specified name already exists. + + + + diff --git a/src/bin/pg_basebackup/pg_receivexlog.c b/src/bin/pg_basebackup/pg_receivexlog.c index 5d964e4ee6b27..00536bd0972a5 100644 --- a/src/bin/pg_basebackup/pg_receivexlog.c +++ b/src/bin/pg_basebackup/pg_receivexlog.c @@ -38,6 +38,7 @@ static int noloop = 0; static int standby_message_timeout = 10 * 1000; /* 10 sec = default */ static volatile bool time_to_abort = false; static bool do_create_slot = false; +static bool slot_exists_ok = false; static bool do_drop_slot = false; static bool synchronous = false; @@ -66,6 +67,7 @@ usage(void) printf(_(" %s [OPTION]...\n"), progname); printf(_("\nOptions:\n")); printf(_(" -D, --directory=DIR receive transaction log files into this directory\n")); + printf(_(" --if-not-exists do not treat naming conflicts as an error when creating a slot\n")); printf(_(" -n, --no-loop do not loop on connection lost\n")); printf(_(" -s, --status-interval=SECS\n" " time between status packets sent to server (default: %d)\n"), (standby_message_timeout / 1000)); @@ -371,7 +373,8 @@ main(int argc, char **argv) /* action */ {"create-slot", no_argument, NULL, 1}, {"drop-slot", no_argument, NULL, 2}, - {"synchronous", no_argument, NULL, 3}, + {"if-not-exists", no_argument, NULL, 3}, + {"synchronous", no_argument, NULL, 4}, {NULL, 0, NULL, 0} }; @@ -455,6 +458,9 @@ main(int argc, char **argv) do_drop_slot = true; break; case 3: + slot_exists_ok = true; + break; + case 4: synchronous = true; break; default: @@ -575,7 +581,8 @@ main(int argc, char **argv) _("%s: creating replication slot \"%s\"\n"), progname, replication_slot); - if (!CreateReplicationSlot(conn, replication_slot, NULL, NULL, true)) + if (!CreateReplicationSlot(conn, replication_slot, NULL, true, + slot_exists_ok)) disconnect_and_exit(1); } diff --git a/src/bin/pg_basebackup/pg_recvlogical.c b/src/bin/pg_basebackup/pg_recvlogical.c index 50844e700d960..f189f71eff6a1 100644 --- a/src/bin/pg_basebackup/pg_recvlogical.c +++ b/src/bin/pg_basebackup/pg_recvlogical.c @@ -38,6 +38,7 @@ static int standby_message_timeout = 10 * 1000; /* 10 sec = default */ static int fsync_interval = 10 * 1000; /* 10 sec = default */ static XLogRecPtr startpos = InvalidXLogRecPtr; static bool do_create_slot = false; +static bool slot_exists_ok = false; static bool do_start_slot = false; static bool do_drop_slot = false; @@ -75,6 +76,7 @@ usage(void) printf(_(" -f, --file=FILE receive log into this file, - for stdout\n")); printf(_(" -F --fsync-interval=SECS\n" " time between fsyncs to the output file (default: %d)\n"), (fsync_interval / 1000)); + printf(_(" --if-not-exists do not treat naming conflicts as an error when creating a slot\n")); printf(_(" -I, --startpos=LSN where in an existing slot should the streaming start\n")); printf(_(" -n, --no-loop do not loop on connection lost\n")); printf(_(" -o, --option=NAME[=VALUE]\n" @@ -633,6 +635,7 @@ main(int argc, char **argv) {"create-slot", no_argument, NULL, 1}, {"start", no_argument, NULL, 2}, {"drop-slot", no_argument, NULL, 3}, + {"if-not-exists", no_argument, NULL, 4}, {NULL, 0, NULL, 0} }; int c; @@ -764,6 +767,9 @@ main(int argc, char **argv) case 3: do_drop_slot = true; break; + case 4: + slot_exists_ok = true; + break; default: @@ -891,8 +897,9 @@ main(int argc, char **argv) progname, replication_slot); if (!CreateReplicationSlot(conn, replication_slot, plugin, - &startpos, false)) + false, slot_exists_ok)) disconnect_and_exit(1); + startpos = InvalidXLogRecPtr; } if (!do_start_slot) diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c index 0ed61440b0975..a5cad350f8a49 100644 --- a/src/bin/pg_basebackup/streamutil.c +++ b/src/bin/pg_basebackup/streamutil.c @@ -31,6 +31,8 @@ #include "common/fe_memutils.h" #include "datatype/timestamp.h" +#define ERRCODE_DUPLICATE_OBJECT "42710" + const char *progname; char *connection_string = NULL; char *dbhost = NULL; @@ -314,7 +316,7 @@ RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli, */ bool CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, - XLogRecPtr *startpos, bool is_physical) + bool is_physical, bool slot_exists_ok) { PQExpBuffer query; PGresult *res; @@ -336,12 +338,23 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, res = PQexec(conn, query->data); if (PQresultStatus(res) != PGRES_TUPLES_OK) { - fprintf(stderr, _("%s: could not send replication command \"%s\": %s"), - progname, query->data, PQerrorMessage(conn)); + const char *sqlstate = PQresultErrorField(res, PG_DIAG_SQLSTATE); - destroyPQExpBuffer(query); - PQclear(res); - return false; + if (slot_exists_ok && strcmp(sqlstate, ERRCODE_DUPLICATE_OBJECT) == 0) + { + destroyPQExpBuffer(query); + PQclear(res); + return true; + } + else + { + fprintf(stderr, _("%s: could not send replication command \"%s\": %s"), + progname, query->data, PQerrorMessage(conn)); + + destroyPQExpBuffer(query); + PQclear(res); + return false; + } } if (PQntuples(res) != 1 || PQnfields(res) != 4) @@ -356,25 +369,6 @@ CreateReplicationSlot(PGconn *conn, const char *slot_name, const char *plugin, return false; } - /* Get LSN start position if necessary */ - if (startpos != NULL) - { - uint32 hi, - lo; - - if (sscanf(PQgetvalue(res, 0, 1), "%X/%X", &hi, &lo) != 2) - { - fprintf(stderr, - _("%s: could not parse transaction log location \"%s\"\n"), - progname, PQgetvalue(res, 0, 1)); - - destroyPQExpBuffer(query); - PQclear(res); - return false; - } - *startpos = ((uint64) hi) << 32 | lo; - } - destroyPQExpBuffer(query); PQclear(res); return true; diff --git a/src/bin/pg_basebackup/streamutil.h b/src/bin/pg_basebackup/streamutil.h index 01ab5660a14d9..b95f83f87e032 100644 --- a/src/bin/pg_basebackup/streamutil.h +++ b/src/bin/pg_basebackup/streamutil.h @@ -32,8 +32,8 @@ extern PGconn *GetConnection(void); /* Replication commands */ extern bool CreateReplicationSlot(PGconn *conn, const char *slot_name, - const char *plugin, XLogRecPtr *startpos, - bool is_physical); + const char *plugin, bool is_physical, + bool slot_exists_ok); extern bool DropReplicationSlot(PGconn *conn, const char *slot_name); extern bool RunIdentifySystem(PGconn *conn, char **sysid, TimeLineID *starttli, From 1884708e25c444eb9de6b0665b94c268bab25689 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 12 Jul 2015 22:18:57 +0200 Subject: [PATCH 046/442] For consistency add a pfree to ON CONFLICT set_plan_refs code. Backpatch to 9.5 where ON CONFLICT was introduced. Author: Peter Geoghegan --- src/backend/optimizer/plan/setrefs.c | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 820f69dc67c52..46d84d39a00eb 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -776,6 +776,8 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) linitial_int(splan->resultRelations), rtoffset); + pfree(itlist); + splan->exclRelTlist = fix_scan_list(root, splan->exclRelTlist, rtoffset); } From 0e78a610f24463f64d8a03b39f06e995581c923a Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 12 Jul 2015 16:25:51 -0400 Subject: [PATCH 047/442] Fix assorted memory leaks. Per Coverity (not that any of these are so non-obvious that they should not have been caught before commit). The extent of leakage is probably minor to unnoticeable, but a leak is a leak. Back-patch as necessary. Michael Paquier --- src/bin/pg_basebackup/streamutil.c | 1 + src/bin/pg_dump/pg_dump.c | 2 ++ src/bin/pg_rewind/libpq_fetch.c | 8 ++++++-- src/bin/scripts/vacuumdb.c | 7 ++++--- 4 files changed, 13 insertions(+), 5 deletions(-) diff --git a/src/bin/pg_basebackup/streamutil.c b/src/bin/pg_basebackup/streamutil.c index a5cad350f8a49..91f919c34cd6f 100644 --- a/src/bin/pg_basebackup/streamutil.c +++ b/src/bin/pg_basebackup/streamutil.c @@ -414,6 +414,7 @@ DropReplicationSlot(PGconn *conn, const char *slot_name) return false; } + destroyPQExpBuffer(query); PQclear(res); return true; } diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 32ac26f1db476..6664cee3b4cd8 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -6660,6 +6660,7 @@ getTransforms(Archive *fout, int *numTransforms) appendPQExpBuffer(&namebuf, "%s %s", typeInfo->dobj.name, lanname); transforminfo[i].dobj.name = namebuf.data; + free(lanname); } PQclear(res); @@ -15731,6 +15732,7 @@ getExtensionMembership(Archive *fout, DumpOptions *dopt, ExtensionInfo extinfo[] addObjectDependency(&contable->dataObj->dobj, reftable->dataObj->dobj.dumpId); } + PQclear(res); destroyPQExpBuffer(query); } diff --git a/src/bin/pg_rewind/libpq_fetch.c b/src/bin/pg_rewind/libpq_fetch.c index 05aa133cf36ea..1979fbcb8a5f3 100644 --- a/src/bin/pg_rewind/libpq_fetch.c +++ b/src/bin/pg_rewind/libpq_fetch.c @@ -69,7 +69,7 @@ libpqConnect(const char *connstr) pg_free(str); /* - * Also check that full_page_writes is enabled. We can get torn pages if + * Also check that full_page_writes is enabled. We can get torn pages if * a page is modified while we read it with pg_read_binary_file(), and we * rely on full page images to fix them. */ @@ -81,6 +81,7 @@ libpqConnect(const char *connstr) /* * Runs a query that returns a single value. + * The result should be pg_free'd after use. */ static char * run_simple_query(const char *sql) @@ -123,6 +124,8 @@ libpqGetCurrentXlogInsertLocation(void) result = ((uint64) hi) << 32 | lo; + pg_free(val); + return result; } @@ -201,6 +204,7 @@ libpqProcessFileList(void) process_source_file(path, type, filesize, link_target); } + PQclear(res); } /*---- @@ -296,7 +300,7 @@ receiveFileChunks(const char *sql) if (PQgetisnull(res, 0, 2)) { pg_log(PG_DEBUG, - "received NULL chunk for file \"%s\", file has been deleted\n", + "received NULL chunk for file \"%s\", file has been deleted\n", filename); pg_free(filename); PQclear(res); diff --git a/src/bin/scripts/vacuumdb.c b/src/bin/scripts/vacuumdb.c index ca6d00368325f..7e72db1edf7f2 100644 --- a/src/bin/scripts/vacuumdb.c +++ b/src/bin/scripts/vacuumdb.c @@ -393,9 +393,9 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, for (i = 0; i < ntups; i++) { appendPQExpBufferStr(&buf, - fmtQualifiedId(PQserverVersion(conn), - PQgetvalue(res, i, 1), - PQgetvalue(res, i, 0))); + fmtQualifiedId(PQserverVersion(conn), + PQgetvalue(res, i, 1), + PQgetvalue(res, i, 0))); simple_string_list_append(&dbtables, buf.data); resetPQExpBuffer(&buf); @@ -412,6 +412,7 @@ vacuum_one_database(const char *dbname, vacuumingOptions *vacopts, concurrentCons = ntups; if (concurrentCons <= 1) parallel = false; + PQclear(res); } /* From 3096ff924a9d58be7de56e0cae5c8713a51c6b46 Mon Sep 17 00:00:00 2001 From: Bruce Momjian Date: Sun, 12 Jul 2015 17:41:57 -0400 Subject: [PATCH 048/442] release notes: markup: vacuumdb is an application, not command --- doc/src/sgml/release-9.5.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/release-9.5.sgml b/doc/src/sgml/release-9.5.sgml index a010ffcd0f2ab..a1010b29b87ce 100644 --- a/doc/src/sgml/release-9.5.sgml +++ b/doc/src/sgml/release-9.5.sgml @@ -1447,7 +1447,7 @@ - Allow vacuumdb to + Allow vacuumdb to vacuum in parallel using From 6d5031efcbb4bfadc6a7c2f3c68f05a9281315f4 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 14 Jul 2015 11:38:08 +0300 Subject: [PATCH 049/442] Reformat code in ATPostAlterTypeParse. The code in ATPostAlterTypeParse was very deeply indented, mostly because there were two nested switch-case statements, which add a lot of indentation. Use if-else blocks instead, to make the code less indented and more readable. This is in preparation for next patch that makes some actualy changes to the function. These cosmetic parts have been separated to make it easier to see the real changes in the other patch. --- src/backend/commands/tablecmds.c | 104 +++++++++++++++---------------- 1 file changed, 51 insertions(+), 53 deletions(-) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index d3947139c07d5..e7b23f1621cd3 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -8645,69 +8645,67 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, Node *stm = (Node *) lfirst(list_item); AlteredTableInfo *tab; - switch (nodeTag(stm)) + tab = ATGetQueueEntry(wqueue, rel); + + if (IsA(stm, IndexStmt)) + { + IndexStmt *stmt = (IndexStmt *) stm; + AlterTableCmd *newcmd; + + if (!rewrite) + TryReuseIndex(oldId, stmt); + + newcmd = makeNode(AlterTableCmd); + newcmd->subtype = AT_ReAddIndex; + newcmd->def = (Node *) stmt; + tab->subcmds[AT_PASS_OLD_INDEX] = + lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd); + } + else if (IsA(stm, AlterTableStmt)) { - case T_IndexStmt: + AlterTableStmt *stmt = (AlterTableStmt *) stm; + ListCell *lcmd; + + foreach(lcmd, stmt->cmds) + { + AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); + + if (cmd->subtype == AT_AddIndex) { - IndexStmt *stmt = (IndexStmt *) stm; - AlterTableCmd *newcmd; + Assert(IsA(cmd->def, IndexStmt)); if (!rewrite) - TryReuseIndex(oldId, stmt); + TryReuseIndex(get_constraint_index(oldId), + (IndexStmt *) cmd->def); - tab = ATGetQueueEntry(wqueue, rel); - newcmd = makeNode(AlterTableCmd); - newcmd->subtype = AT_ReAddIndex; - newcmd->def = (Node *) stmt; + cmd->subtype = AT_ReAddIndex; tab->subcmds[AT_PASS_OLD_INDEX] = - lappend(tab->subcmds[AT_PASS_OLD_INDEX], newcmd); - break; + lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); } - case T_AlterTableStmt: + else if (cmd->subtype == AT_AddConstraint) { - AlterTableStmt *stmt = (AlterTableStmt *) stm; - ListCell *lcmd; - - tab = ATGetQueueEntry(wqueue, rel); - foreach(lcmd, stmt->cmds) - { - AlterTableCmd *cmd = (AlterTableCmd *) lfirst(lcmd); - Constraint *con; - - switch (cmd->subtype) - { - case AT_AddIndex: - Assert(IsA(cmd->def, IndexStmt)); - if (!rewrite) - TryReuseIndex(get_constraint_index(oldId), - (IndexStmt *) cmd->def); - cmd->subtype = AT_ReAddIndex; - tab->subcmds[AT_PASS_OLD_INDEX] = - lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); - break; - case AT_AddConstraint: - Assert(IsA(cmd->def, Constraint)); - con = (Constraint *) cmd->def; - con->old_pktable_oid = refRelId; - /* rewriting neither side of a FK */ - if (con->contype == CONSTR_FOREIGN && - !rewrite && tab->rewrite == 0) - TryReuseForeignKey(oldId, con); - cmd->subtype = AT_ReAddConstraint; - tab->subcmds[AT_PASS_OLD_CONSTR] = - lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); - break; - default: - elog(ERROR, "unexpected statement type: %d", - (int) cmd->subtype); - } - } - break; + Constraint *con; + + Assert(IsA(cmd->def, Constraint)); + + con = (Constraint *) cmd->def; + con->old_pktable_oid = refRelId; + /* rewriting neither side of a FK */ + if (con->contype == CONSTR_FOREIGN && + !rewrite && tab->rewrite == 0) + TryReuseForeignKey(oldId, con); + cmd->subtype = AT_ReAddConstraint; + tab->subcmds[AT_PASS_OLD_CONSTR] = + lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); } - default: - elog(ERROR, "unexpected statement type: %d", - (int) nodeTag(stm)); + else + elog(ERROR, "unexpected statement type: %d", + (int) cmd->subtype); + } } + else + elog(ERROR, "unexpected statement type: %d", + (int) nodeTag(stm)); } relation_close(rel, NoLock); From 9dee48c94b6eb544dd334ec021ff224454f2020f Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 14 Jul 2015 11:40:22 +0300 Subject: [PATCH 050/442] Retain comments on indexes and constraints at ALTER TABLE ... TYPE ... When a column's datatype is changed, ATExecAlterColumnType() rebuilds all the affected indexes and constraints, and the comments from the old indexes/constraints were not carried over. To fix, create a synthetic COMMENT ON command in the work queue, to re-add any comments on constraints. For indexes, there's a comment field in IndexStmt that is used. This fixes bug #13126, reported by Kirill Simonov. Original patch by Michael Paquier, reviewed by Petr Jelinek and me. This bug is present in all versions, but only backpatch to 9.5. Given how minor the issue is, it doesn't seem worth the work and risk to backpatch further than that. --- src/backend/commands/tablecmds.c | 65 ++++++++++++++++++++++- src/include/nodes/parsenodes.h | 1 + src/test/regress/expected/alter_table.out | 63 ++++++++++++++++++++++ src/test/regress/sql/alter_table.sql | 36 +++++++++++++ 4 files changed, 163 insertions(+), 2 deletions(-) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index e7b23f1621cd3..1c7eded9a79cd 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -386,6 +386,8 @@ static void ATPostAlterTypeCleanup(List **wqueue, AlteredTableInfo *tab, static void ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, List **wqueue, LOCKMODE lockmode, bool rewrite); +static void RebuildConstraintComment(AlteredTableInfo *tab, int pass, + Oid objid, Relation rel, char *conname); static void TryReuseIndex(Oid oldId, IndexStmt *stmt); static void TryReuseForeignKey(Oid oldId, Constraint *con); static void change_owner_fix_column_acls(Oid relationOid, @@ -3514,6 +3516,9 @@ ATExecCmd(List **wqueue, AlteredTableInfo *tab, Relation rel, ATExecAddConstraint(wqueue, tab, rel, (Constraint *) cmd->def, false, true, lockmode); break; + case AT_ReAddComment: /* Re-add existing comment */ + address = CommentObject((CommentStmt *) cmd->def); + break; case AT_AddIndexConstraint: /* ADD CONSTRAINT USING INDEX */ address = ATExecAddIndexConstraint(tab, rel, (IndexStmt *) cmd->def, lockmode); @@ -8654,6 +8659,8 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, if (!rewrite) TryReuseIndex(oldId, stmt); + /* keep the index's comment */ + stmt->idxcomment = GetComment(oldId, RelationRelationId, 0); newcmd = makeNode(AlterTableCmd); newcmd->subtype = AT_ReAddIndex; @@ -8672,15 +8679,29 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, if (cmd->subtype == AT_AddIndex) { + IndexStmt *indstmt; + Oid indoid; + Assert(IsA(cmd->def, IndexStmt)); + indstmt = (IndexStmt *) cmd->def; + indoid = get_constraint_index(oldId); + if (!rewrite) - TryReuseIndex(get_constraint_index(oldId), - (IndexStmt *) cmd->def); + TryReuseIndex(indoid, indstmt); + /* keep any comment on the index */ + indstmt->idxcomment = GetComment(indoid, + RelationRelationId, 0); cmd->subtype = AT_ReAddIndex; tab->subcmds[AT_PASS_OLD_INDEX] = lappend(tab->subcmds[AT_PASS_OLD_INDEX], cmd); + + /* recreate any comment on the constraint */ + RebuildConstraintComment(tab, + AT_PASS_OLD_INDEX, + oldId, + rel, indstmt->idxname); } else if (cmd->subtype == AT_AddConstraint) { @@ -8697,6 +8718,12 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, cmd->subtype = AT_ReAddConstraint; tab->subcmds[AT_PASS_OLD_CONSTR] = lappend(tab->subcmds[AT_PASS_OLD_CONSTR], cmd); + + /* recreate any comment on the constraint */ + RebuildConstraintComment(tab, + AT_PASS_OLD_CONSTR, + oldId, + rel, con->conname); } else elog(ERROR, "unexpected statement type: %d", @@ -8711,6 +8738,40 @@ ATPostAlterTypeParse(Oid oldId, Oid oldRelId, Oid refRelId, char *cmd, relation_close(rel, NoLock); } +/* + * Subroutine for ATPostAlterTypeParse() to recreate a comment entry for + * a constraint that is being re-added. + */ +static void +RebuildConstraintComment(AlteredTableInfo *tab, int pass, Oid objid, + Relation rel, char *conname) +{ + CommentStmt *cmd; + char *comment_str; + AlterTableCmd *newcmd; + + /* Look for comment for object wanted, and leave if none */ + comment_str = GetComment(objid, ConstraintRelationId, 0); + if (comment_str == NULL) + return; + + /* Build node CommentStmt */ + cmd = makeNode(CommentStmt); + cmd->objtype = OBJECT_TABCONSTRAINT; + cmd->objname = list_make3( + makeString(get_namespace_name(RelationGetNamespace(rel))), + makeString(RelationGetRelationName(rel)), + makeString(conname)); + cmd->objargs = NIL; + cmd->comment = comment_str; + + /* Append it to list of commands */ + newcmd = makeNode(AlterTableCmd); + newcmd->subtype = AT_ReAddComment; + newcmd->def = (Node *) cmd; + tab->subcmds[pass] = lappend(tab->subcmds[pass], newcmd); +} + /* * Subroutine for ATPostAlterTypeParse(). Calls out to CheckIndexCompatible() * for the real analysis, then mutates the IndexStmt based on that verdict. diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index 868905b0c16f6..a567c50da7279 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -1474,6 +1474,7 @@ typedef enum AlterTableType AT_AddConstraint, /* add constraint */ AT_AddConstraintRecurse, /* internal to commands/tablecmds.c */ AT_ReAddConstraint, /* internal to commands/tablecmds.c */ + AT_ReAddComment, /* internal to commands/tablecmds.c */ AT_AlterConstraint, /* alter constraint */ AT_ValidateConstraint, /* validate constraint */ AT_ValidateConstraintRecurse, /* internal to commands/tablecmds.c */ diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index 3ad2c55775c64..6b9291b7242bc 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -2400,6 +2400,69 @@ Check constraints: DROP TABLE alter2.tt8; DROP SCHEMA alter2; +-- Check that comments on constraints and indexes are not lost at ALTER TABLE. +CREATE TABLE comment_test ( + id int, + positive_col int CHECK (positive_col > 0), + indexed_col int, + CONSTRAINT comment_test_pk PRIMARY KEY (id)); +CREATE INDEX comment_test_index ON comment_test(indexed_col); +COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test'; +COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test'; +COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col'; +COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test'; +COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test'; +SELECT col_description('comment_test'::regclass, 1) as comment; + comment +----------------------------- + Column 'id' on comment_test +(1 row) + +SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; + index | comment +--------------------+----------------------------------------------- + comment_test_index | Simple index on comment_test + comment_test_pk | Index backing the PRIMARY KEY of comment_test +(2 rows) + +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; + constraint | comment +---------------------------------+----------------------------------------------- + comment_test_pk | PRIMARY KEY constraint of comment_test + comment_test_positive_col_check | CHECK constraint on comment_test.positive_col +(2 rows) + +-- Change the datatype of all the columns. ALTER TABLE is optimized to not +-- rebuild an index if the new data type is binary compatible with the old +-- one. Check do a dummy ALTER TABLE that doesn't change the datatype +-- first, to test that no-op codepath, and another one that does. +ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text; +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; +ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint; +-- Check that the comments are intact. +SELECT col_description('comment_test'::regclass, 1) as comment; + comment +----------------------------- + Column 'id' on comment_test +(1 row) + +SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; + index | comment +--------------------+----------------------------------------------- + comment_test_pk | Index backing the PRIMARY KEY of comment_test + comment_test_index | Simple index on comment_test +(2 rows) + +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; + constraint | comment +---------------------------------+----------------------------------------------- + comment_test_positive_col_check | CHECK constraint on comment_test.positive_col + comment_test_pk | PRIMARY KEY constraint of comment_test +(2 rows) + -- Check that we map relation oids to filenodes and back correctly. Only -- display bad mappings so the test output doesn't change all the time. A -- filenode function call can return NULL for a relation dropped concurrently diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index 29c1875d2eaec..9f755fae33f73 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -1594,6 +1594,42 @@ ALTER TABLE IF EXISTS tt8 SET SCHEMA alter2; DROP TABLE alter2.tt8; DROP SCHEMA alter2; + +-- Check that comments on constraints and indexes are not lost at ALTER TABLE. +CREATE TABLE comment_test ( + id int, + positive_col int CHECK (positive_col > 0), + indexed_col int, + CONSTRAINT comment_test_pk PRIMARY KEY (id)); +CREATE INDEX comment_test_index ON comment_test(indexed_col); + +COMMENT ON COLUMN comment_test.id IS 'Column ''id'' on comment_test'; +COMMENT ON INDEX comment_test_index IS 'Simple index on comment_test'; +COMMENT ON CONSTRAINT comment_test_positive_col_check ON comment_test IS 'CHECK constraint on comment_test.positive_col'; +COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint of comment_test'; +COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test'; + +SELECT col_description('comment_test'::regclass, 1) as comment; +SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; + +-- Change the datatype of all the columns. ALTER TABLE is optimized to not +-- rebuild an index if the new data type is binary compatible with the old +-- one. Check do a dummy ALTER TABLE that doesn't change the datatype +-- first, to test that no-op codepath, and another one that does. +ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN indexed_col SET DATA TYPE text; +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN id SET DATA TYPE text; +ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE int; +ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint; + +-- Check that the comments are intact. +SELECT col_description('comment_test'::regclass, 1) as comment; +SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; + + -- Check that we map relation oids to filenodes and back correctly. Only -- display bad mappings so the test output doesn't change all the time. A -- filenode function call can return NULL for a relation dropped concurrently From fe92a72a2bf6f485fc9f08c3e6191838ac3c6441 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 14 Jul 2015 16:16:23 +0300 Subject: [PATCH 051/442] Make regression test output stable. In the test query I added for ALTER TABLE retaining comments, the order of the result rows was not stable, and varied across systems. Add an ORDER BY to make the order predictable. This should fix the buildfarm failures. --- src/test/regress/expected/alter_table.out | 12 ++++++------ src/test/regress/sql/alter_table.sql | 8 ++++---- 2 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/test/regress/expected/alter_table.out b/src/test/regress/expected/alter_table.out index 6b9291b7242bc..8d124961ce614 100644 --- a/src/test/regress/expected/alter_table.out +++ b/src/test/regress/expected/alter_table.out @@ -2418,14 +2418,14 @@ SELECT col_description('comment_test'::regclass, 1) as comment; Column 'id' on comment_test (1 row) -SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; +SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; index | comment --------------------+----------------------------------------------- comment_test_index | Simple index on comment_test comment_test_pk | Index backing the PRIMARY KEY of comment_test (2 rows) -SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; constraint | comment ---------------------------------+----------------------------------------------- comment_test_pk | PRIMARY KEY constraint of comment_test @@ -2449,18 +2449,18 @@ SELECT col_description('comment_test'::regclass, 1) as comment; Column 'id' on comment_test (1 row) -SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; +SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; index | comment --------------------+----------------------------------------------- - comment_test_pk | Index backing the PRIMARY KEY of comment_test comment_test_index | Simple index on comment_test + comment_test_pk | Index backing the PRIMARY KEY of comment_test (2 rows) -SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; constraint | comment ---------------------------------+----------------------------------------------- - comment_test_positive_col_check | CHECK constraint on comment_test.positive_col comment_test_pk | PRIMARY KEY constraint of comment_test + comment_test_positive_col_check | CHECK constraint on comment_test.positive_col (2 rows) -- Check that we map relation oids to filenodes and back correctly. Only diff --git a/src/test/regress/sql/alter_table.sql b/src/test/regress/sql/alter_table.sql index 9f755fae33f73..3feed7c232dca 100644 --- a/src/test/regress/sql/alter_table.sql +++ b/src/test/regress/sql/alter_table.sql @@ -1610,8 +1610,8 @@ COMMENT ON CONSTRAINT comment_test_pk ON comment_test IS 'PRIMARY KEY constraint COMMENT ON INDEX comment_test_pk IS 'Index backing the PRIMARY KEY of comment_test'; SELECT col_description('comment_test'::regclass, 1) as comment; -SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; -SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; +SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; -- Change the datatype of all the columns. ALTER TABLE is optimized to not -- rebuild an index if the new data type is binary compatible with the old @@ -1626,8 +1626,8 @@ ALTER TABLE comment_test ALTER COLUMN positive_col SET DATA TYPE bigint; -- Check that the comments are intact. SELECT col_description('comment_test'::regclass, 1) as comment; -SELECT indexrelid::regclass as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass; -SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass; +SELECT indexrelid::regclass::text as index, obj_description(indexrelid, 'pg_class') as comment FROM pg_index where indrelid = 'comment_test'::regclass ORDER BY 1, 2; +SELECT conname as constraint, obj_description(oid, 'pg_constraint') as comment FROM pg_constraint where conrelid = 'comment_test'::regclass ORDER BY 1, 2; -- Check that we map relation oids to filenodes and back correctly. Only From 5658b0dc0425f987c3272a792ea0944bce23a959 Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Tue, 14 Jul 2015 22:36:51 +0900 Subject: [PATCH 052/442] Prevent pgstattuple() from reporting BRIN as unknown index. Also this patch removes obsolete comment. Back-patch to 9.5 where BRIN index was added. --- contrib/pgstattuple/pgstattuple.c | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/contrib/pgstattuple/pgstattuple.c b/contrib/pgstattuple/pgstattuple.c index c3a8b1d424ab3..4e221c3682663 100644 --- a/contrib/pgstattuple/pgstattuple.c +++ b/contrib/pgstattuple/pgstattuple.c @@ -151,7 +151,6 @@ build_pgstattuple_type(pgstattuple_type *stat, FunctionCallInfo fcinfo) * * C FUNCTION definition * pgstattuple(text) returns pgstattuple_type - * see pgstattuple.sql for pgstattuple_type * ---------- */ @@ -234,6 +233,9 @@ pgstat_relation(Relation rel, FunctionCallInfo fcinfo) case SPGIST_AM_OID: err = "spgist index"; break; + case BRIN_AM_OID: + err = "brin index"; + break; default: err = "unknown index"; break; From 70446994959d67880373b6478ff3fb5f5efd2c87 Mon Sep 17 00:00:00 2001 From: Robert Haas Date: Tue, 14 Jul 2015 16:19:44 -0400 Subject: [PATCH 053/442] Remove regression test added on auto-pilot. Test does not match the comment which precedes it. Peter Geoghegan --- src/test/regress/expected/insert_conflict.out | 1 - src/test/regress/sql/insert_conflict.sql | 1 - 2 files changed, 2 deletions(-) diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out index eca9690592db1..325e88b572f0e 100644 --- a/src/test/regress/expected/insert_conflict.out +++ b/src/test/regress/expected/insert_conflict.out @@ -245,7 +245,6 @@ ERROR: there is no unique or exclusion constraint matching the ON CONFLICT spec insert into insertconflicttest values (6, 'Passionfruit') on conflict (lower(fruit)) do update set fruit = excluded.fruit; ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification -- Check the target relation can be aliased -insert into insertconflicttest values (6, 'Passionfruits') on conflict (key) do update set fruit = excluded.fruit; insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = excluded.fruit; -- ok, no reference to target table insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = ict.fruit; -- ok, alias insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = insertconflicttest.fruit; -- error, references aliased away name diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql index a0bdd7f536ee9..7dd5032212839 100644 --- a/src/test/regress/sql/insert_conflict.sql +++ b/src/test/regress/sql/insert_conflict.sql @@ -102,7 +102,6 @@ insert into insertconflicttest values (5, 'Lemon') on conflict (fruit) do update insert into insertconflicttest values (6, 'Passionfruit') on conflict (lower(fruit)) do update set fruit = excluded.fruit; -- Check the target relation can be aliased -insert into insertconflicttest values (6, 'Passionfruits') on conflict (key) do update set fruit = excluded.fruit; insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = excluded.fruit; -- ok, no reference to target table insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = ict.fruit; -- ok, alias insert into insertconflicttest AS ict values (6, 'Passionfruit') on conflict (key) do update set fruit = insertconflicttest.fruit; -- error, references aliased away name From 8bc8dd81ed215130ab88f12e8ea736d042692630 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Wed, 15 Jul 2015 17:08:46 +0300 Subject: [PATCH 054/442] Mention table_rewrite as valid event trigger tag This was forgotten in 618c9430a8. --- doc/src/sgml/event-trigger.sgml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/src/sgml/event-trigger.sgml b/doc/src/sgml/event-trigger.sgml index b6cbb1bc249dc..3ed14f08c0f02 100644 --- a/doc/src/sgml/event-trigger.sgml +++ b/doc/src/sgml/event-trigger.sgml @@ -935,7 +935,7 @@ typedef struct EventTriggerData Describes the event for which the function is called, one of "ddl_command_start", "ddl_command_end", - "sql_drop". + "sql_drop", "table_rewrite". See for the meaning of these events. From 21a101848b269e4fff9ccd3a5b5f777911399091 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 15 Jul 2015 21:00:26 -0400 Subject: [PATCH 055/442] MinGW: Link ltree_plpython with plpython. The MSVC build system already did this, and building against Python 3 requires it. Back-patch to 9.5, where the module was introduced. --- contrib/ltree_plpython/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile index 64ca1275f1cb1..0eeb9b83eb3b8 100644 --- a/contrib/ltree_plpython/Makefile +++ b/contrib/ltree_plpython/Makefile @@ -25,7 +25,7 @@ endif ifeq ($(PORTNAME), win32) # This means we need an in-tree build on Windows, not a pgxs build -SHLIB_LINK += $(wildcard ../../src/pl/plpython/libpython*.a) +SHLIB_LINK += $(wildcard ../../src/pl/plpython/libpython*.a) $(wildcard ../../src/pl/plpython/libplpython*.a) endif REGRESS_OPTS += --load-extension=ltree From c2b824e34e2ba9a26e914a41f4dd53f27304dc70 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 15 Jul 2015 21:00:26 -0400 Subject: [PATCH 056/442] AIX: Link the postgres executable with -Wl,-brtllib. This allows PostgreSQL modules and their dependencies to have undefined symbols, resolved at runtime. Perl module shared objects rely on that in Perl 5.8.0 and later. This fixes the crash when PL/PerlU loads such modules, as the hstore_plperl test suite does. Module authors can link using -Wl,-G to permit undefined symbols; by default, linking will fail as it has. Back-patch to 9.0 (all supported versions). --- src/backend/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/Makefile b/src/backend/Makefile index 4f0ea3f60c3a2..98b978f3da475 100644 --- a/src/backend/Makefile +++ b/src/backend/Makefile @@ -88,7 +88,7 @@ endif # win32 ifeq ($(PORTNAME), aix) postgres: $(POSTGRES_IMP) - $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_EX) $(call expand_subsys,$(OBJS)) -Wl,-bE:$(top_builddir)/src/backend/$(POSTGRES_IMP) $(LIBS) -o $@ + $(CC) $(CFLAGS) $(LDFLAGS) $(LDFLAGS_EX) $(call expand_subsys,$(OBJS)) -Wl,-bE:$(top_builddir)/src/backend/$(POSTGRES_IMP) $(LIBS) -Wl,-brtllib -o $@ $(POSTGRES_IMP): $(OBJS) $(LD) $(LDREL) $(LDOUT) SUBSYS.o $(call expand_subsys,$^) From 525a6a0d4580f52c13f0c9b7c9d82a4f96ef92fa Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 15 Jul 2015 21:00:26 -0400 Subject: [PATCH 057/442] AIX: Link TRANSFORM modules with their dependencies. The result closely resembles linking of these modules for the "win32" port. Augment the $(exports_file) header so the file is also usable as an import file. Unfortunately, relocating an AIX installation will now require adding $(pkglibdir) to LD_LIBRARY_PATH. Back-patch to 9.5, where the modules were introduced. --- contrib/hstore_plperl/Makefile | 7 ++++++- contrib/hstore_plpython/Makefile | 7 ++++++- contrib/ltree_plpython/Makefile | 7 ++++++- src/Makefile.shlib | 3 ++- 4 files changed, 20 insertions(+), 4 deletions(-) diff --git a/contrib/hstore_plperl/Makefile b/contrib/hstore_plperl/Makefile index 19a8ab4493c6a..d789b99375af0 100644 --- a/contrib/hstore_plperl/Makefile +++ b/contrib/hstore_plperl/Makefile @@ -23,10 +23,15 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif +# In configurations that forbid undefined symbols in libraries, link with each +# dependency. This does preclude pgxs builds. +ifeq ($(PORTNAME), aix) +rpathdir = $(pkglibdir):$(perl_archlibexp)/CORE +SHLIB_LINK += ../hstore/libhstore.exp $(perl_embed_ldflags) +endif ifeq ($(PORTNAME), win32) # these settings are the same as for plperl override CPPFLAGS += -DPLPERL_HAVE_UID_GID -Wno-comment -# This means we need an in-tree build on Windows, not a pgxs build SHLIB_LINK += ../hstore/libhstore.a $(wildcard ../../src/pl/plperl/libperl*.a) endif diff --git a/contrib/hstore_plpython/Makefile b/contrib/hstore_plpython/Makefile index 6ee434bafa804..395fc7375368f 100644 --- a/contrib/hstore_plpython/Makefile +++ b/contrib/hstore_plpython/Makefile @@ -23,8 +23,13 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif +# In configurations that forbid undefined symbols in libraries, link with each +# dependency. This does preclude pgxs builds. +ifeq ($(PORTNAME), aix) +rpathdir = $(pkglibdir):$(python_libdir) +SHLIB_LINK += ../hstore/libhstore.exp $(python_libspec) $(python_additional_libs) $(wildcard ../../src/pl/plpython/libplpython*.exp) +endif ifeq ($(PORTNAME), win32) -# This means we need an in-tree build on Windows, not a pgxs build SHLIB_LINK += ../hstore/libhstore.a $(wildcard ../../src/pl/plpython/libpython*.a) $(wildcard ../../src/pl/plpython/libplpython*.a) endif diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile index 0eeb9b83eb3b8..20b0dcfbc5872 100644 --- a/contrib/ltree_plpython/Makefile +++ b/contrib/ltree_plpython/Makefile @@ -23,8 +23,13 @@ include $(top_builddir)/src/Makefile.global include $(top_srcdir)/contrib/contrib-global.mk endif +# In configurations that forbid undefined symbols in libraries, link with each +# dependency. This does preclude pgxs builds. +ifeq ($(PORTNAME), aix) +rpathdir = $(pkglibdir):$(python_libdir) +SHLIB_LINK += $(python_libspec) $(python_additional_libs) $(wildcard ../../src/pl/plpython/libplpython*.exp) +endif ifeq ($(PORTNAME), win32) -# This means we need an in-tree build on Windows, not a pgxs build SHLIB_LINK += $(wildcard ../../src/pl/plpython/libpython*.a) $(wildcard ../../src/pl/plpython/libplpython*.a) endif diff --git a/src/Makefile.shlib b/src/Makefile.shlib index 50c3805eec02f..86db52fe5e1f5 100644 --- a/src/Makefile.shlib +++ b/src/Makefile.shlib @@ -111,6 +111,7 @@ ifeq ($(PORTNAME), aix) shlib = lib$(NAME)$(DLSUFFIX).$(SO_MAJOR_VERSION) endif haslibarule = yes + # $(exports_file) is also usable as an import file exports_file = lib$(NAME).exp endif @@ -341,7 +342,7 @@ $(shlib) $(stlib): $(OBJS) | $(SHLIB_PREREQS) rm -f $(stlib) $(LINK.static) $(stlib) $^ $(RANLIB) $(stlib) - $(MKLDEXPORT) $(stlib) >$(exports_file) + $(MKLDEXPORT) $(stlib) $(shlib) >$(exports_file) $(COMPILER) -o $(shlib) $(stlib) -Wl,-bE:$(exports_file) $(LDFLAGS) $(LDFLAGS_SL) $(SHLIB_LINK) rm -f $(stlib) $(AR) $(AROPT) $(stlib) $(shlib) From 34a6c6172e99cb12dd9f079111231052510b78be Mon Sep 17 00:00:00 2001 From: Magnus Hagander Date: Thu, 16 Jul 2015 10:28:44 +0300 Subject: [PATCH 058/442] Fix copy/past error in comment David Christensen --- src/backend/utils/init/miscinit.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index 2b53c19fb9744..acc4752015b32 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -199,8 +199,8 @@ InitPostmasterChild(void) /* * If possible, make this process a group leader, so that the postmaster * can signal any child processes too. Not all processes will have - * children, but for consistency we , but for consistency we make all - * postmaster child processes do this. + * children, but for consistency we make all postmaster child processes do + * this. */ #ifdef HAVE_SETSID if (setsid() < 0) From 095b8e158b064b67239cf7030dba8a3c83c11c85 Mon Sep 17 00:00:00 2001 From: Magnus Hagander Date: Thu, 16 Jul 2015 10:31:58 +0300 Subject: [PATCH 059/442] Fix spelling error David Rowley --- src/backend/optimizer/plan/createplan.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index dc2dcbf93f7a1..8d15c8ede90f9 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -4430,7 +4430,7 @@ make_sort_from_groupcols(PlannerInfo *root, TargetEntry *tle = get_tle_by_resno(sub_tlist, grpColIdx[numsortkeys]); if (!tle) - elog(ERROR, "could not retrive tle for sort-from-groupcols"); + elog(ERROR, "could not retrieve tle for sort-from-groupcols"); sortColIdx[numsortkeys] = tle->resno; sortOperators[numsortkeys] = grpcl->sortop; From fd415ffc9cca3d938d21b24c8513e409af7b751c Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 16 Jul 2015 22:57:46 -0400 Subject: [PATCH 060/442] Fix a low-probability crash in our qsort implementation. It's standard for quicksort implementations, after having partitioned the input into two subgroups, to recurse to process the smaller partition and then handle the larger partition by iterating. This method guarantees that no more than log2(N) levels of recursion can be needed. However, Bentley and McIlroy argued that checking to see which partition is smaller isn't worth the cycles, and so their code doesn't do that but just always recurses on the left partition. In most cases that's fine; but with worst-case input we might need O(N) levels of recursion, and that means that qsort could be driven to stack overflow. Such an overflow seems to be the only explanation for today's report from Yiqing Jin of a SIGSEGV in med3_tuple while creating an index of a couple billion entries with a very large maintenance_work_mem setting. Therefore, let's spend the few additional cycles and lines of code needed to choose the smaller partition for recursion. Also, fix up the qsort code so that it properly uses size_t not int for some intermediate values representing numbers of items. This would only be a live risk when sorting more than INT_MAX bytes (in qsort/qsort_arg) or tuples (in qsort_tuple), which I believe would never happen with any caller in the current core code --- but perhaps it could happen with call sites in third-party modules? In any case, this is trouble waiting to happen, and the corrected code is probably if anything shorter and faster than before, since it removes sign-extension steps that had to happen when converting between int and size_t. In passing, move a couple of CHECK_FOR_INTERRUPTS() calls so that it's not necessary to preserve the value of "r" across them, and prettify the output of gen_qsort_tuple.pl a little. Back-patch to all supported branches. The odds of hitting this issue are probably higher in 9.4 and up than before, due to the new ability to allocate sort workspaces exceeding 1GB, but there's no good reason to believe that it's impossible to crash older branches this way. --- src/backend/utils/sort/gen_qsort_tuple.pl | 86 ++++++++++++++++------- src/port/qsort.c | 67 +++++++++++++----- src/port/qsort_arg.c | 63 ++++++++++++----- 3 files changed, 156 insertions(+), 60 deletions(-) diff --git a/src/backend/utils/sort/gen_qsort_tuple.pl b/src/backend/utils/sort/gen_qsort_tuple.pl index 18dd751b38272..6186d0a5babda 100644 --- a/src/backend/utils/sort/gen_qsort_tuple.pl +++ b/src/backend/utils/sort/gen_qsort_tuple.pl @@ -14,11 +14,13 @@ # # Modifications from vanilla NetBSD source: # Add do ... while() macro fix -# Remove __inline, _DIAGASSERTs, __P -# Remove ill-considered "swap_cnt" switch to insertion sort, -# in favor of a simple check for presorted input. -# Instead of sorting arbitrary objects, we're always sorting SortTuples -# Add CHECK_FOR_INTERRUPTS() +# Remove __inline, _DIAGASSERTs, __P +# Remove ill-considered "swap_cnt" switch to insertion sort, +# in favor of a simple check for presorted input. +# Take care to recurse on the smaller partition, to bound stack usage. +# +# Instead of sorting arbitrary objects, we're always sorting SortTuples. +# Add CHECK_FOR_INTERRUPTS(). # # CAUTION: if you change this file, see also qsort.c and qsort_arg.c # @@ -43,9 +45,11 @@ $EXTRAPARAMS = ', ssup'; $CMPPARAMS = ', ssup'; print <<'EOM'; + #define cmp_ssup(a, b, ssup) \ ApplySortComparator((a)->datum1, (a)->isnull1, \ (b)->datum1, (b)->isnull1, ssup) + EOM emit_qsort_implementation(); @@ -53,7 +57,8 @@ sub emit_qsort_boilerplate { print <<'EOM'; /* - * autogenerated by src/backend/utils/sort/gen_qsort_tuple.pl, do not edit + * autogenerated by src/backend/utils/sort/gen_qsort_tuple.pl, do not edit! + * * This file is included by tuplesort.c, rather than compiled separately. */ @@ -78,7 +83,7 @@ sub emit_qsort_boilerplate * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE + * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) @@ -92,8 +97,16 @@ sub emit_qsort_boilerplate * Qsort routine based on J. L. Bentley and M. D. McIlroy, * "Engineering a sort function", * Software--Practice and Experience 23 (1993) 1249-1265. + * * We have modified their original by adding a check for already-sorted input, * which seems to be a win per discussions on pgsql-hackers around 2006-03-21. + * + * Also, we recurse on the smaller partition and iterate on the larger one, + * which ensures we cannot recurse more than log(N) levels (since the + * partition recursed to is surely no more than half of the input). Bentley + * and McIlroy explicitly rejected doing this on the grounds that it's "not + * worth the effort", but we have seen crashes in the field due to stack + * overrun, so that judgment seems wrong. */ static void @@ -114,7 +127,8 @@ sub emit_qsort_boilerplate *(b) = t; \ } while (0); -#define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n)) +#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n) + EOM } @@ -141,8 +155,9 @@ sub emit_qsort_implementation *pl, *pm, *pn; - int d, - r, + size_t d1, + d2; + int r, presorted; loop: @@ -173,7 +188,8 @@ sub emit_qsort_implementation pn = a + (n - 1); if (n > 40) { - d = (n / 8); + size_t d = (n / 8); + pl = med3_$SUFFIX(pl, pl + d, pl + 2 * d$EXTRAPARAMS); pm = med3_$SUFFIX(pm - d, pm, pm + d$EXTRAPARAMS); pn = med3_$SUFFIX(pn - 2 * d, pn - d, pn$EXTRAPARAMS); @@ -187,23 +203,23 @@ sub emit_qsort_implementation { while (pb <= pc && (r = cmp_$SUFFIX(pb, a$CMPPARAMS)) <= 0) { - CHECK_FOR_INTERRUPTS(); if (r == 0) { swap(pa, pb); pa++; } pb++; + CHECK_FOR_INTERRUPTS(); } while (pb <= pc && (r = cmp_$SUFFIX(pc, a$CMPPARAMS)) >= 0) { - CHECK_FOR_INTERRUPTS(); if (r == 0) { swap(pc, pd); pd--; } pc--; + CHECK_FOR_INTERRUPTS(); } if (pb > pc) break; @@ -212,21 +228,39 @@ sub emit_qsort_implementation pc--; } pn = a + n; - r = Min(pa - a, pb - pa); - vecswap(a, pb - r, r); - r = Min(pd - pc, pn - pd - 1); - vecswap(pb, pn - r, r); - if ((r = pb - pa) > 1) - qsort_$SUFFIX(a, r$EXTRAPARAMS); - if ((r = pd - pc) > 1) + d1 = Min(pa - a, pb - pa); + vecswap(a, pb - d1, d1); + d1 = Min(pd - pc, pn - pd - 1); + vecswap(pb, pn - d1, d1); + d1 = pb - pa; + d2 = pd - pc; + if (d1 <= d2) { - /* Iterate rather than recurse to save stack space */ - a = pn - r; - n = r; - goto loop; + /* Recurse on left partition, then iterate on right partition */ + if (d1 > 1) + qsort_$SUFFIX(a, d1$EXTRAPARAMS); + if (d2 > 1) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_$SUFFIX(pn - d2, d2$EXTRAPARAMS); */ + a = pn - d2; + n = d2; + goto loop; + } + } + else + { + /* Recurse on right partition, then iterate on left partition */ + if (d2 > 1) + qsort_$SUFFIX(pn - d2, d2$EXTRAPARAMS); + if (d1 > 1) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_$SUFFIX(a, d1$EXTRAPARAMS); */ + n = d1; + goto loop; + } } -/* qsort_$SUFFIX(pn - r, r$EXTRAPARAMS);*/ } - EOM } diff --git a/src/port/qsort.c b/src/port/qsort.c index fa35b1b15387d..1a8ee08c8beea 100644 --- a/src/port/qsort.c +++ b/src/port/qsort.c @@ -6,6 +6,7 @@ * Remove __inline, _DIAGASSERTs, __P * Remove ill-considered "swap_cnt" switch to insertion sort, * in favor of a simple check for presorted input. + * Take care to recurse on the smaller partition, to bound stack usage. * * CAUTION: if you change this file, see also qsort_arg.c, gen_qsort_tuple.pl * @@ -54,9 +55,18 @@ static void swapfunc(char *, char *, size_t, int); * Qsort routine based on J. L. Bentley and M. D. McIlroy, * "Engineering a sort function", * Software--Practice and Experience 23 (1993) 1249-1265. + * * We have modified their original by adding a check for already-sorted input, * which seems to be a win per discussions on pgsql-hackers around 2006-03-21. + * + * Also, we recurse on the smaller partition and iterate on the larger one, + * which ensures we cannot recurse more than log(N) levels (since the + * partition recursed to is surely no more than half of the input). Bentley + * and McIlroy explicitly rejected doing this on the grounds that it's "not + * worth the effort", but we have seen crashes in the field due to stack + * overrun, so that judgment seems wrong. */ + #define swapcode(TYPE, parmi, parmj, n) \ do { \ size_t i = (n) / sizeof (TYPE); \ @@ -89,7 +99,7 @@ swapfunc(char *a, char *b, size_t n, int swaptype) } else \ swapfunc(a, b, es, swaptype) -#define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype) +#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype) static char * med3(char *a, char *b, char *c, int (*cmp) (const void *, const void *)) @@ -109,8 +119,9 @@ pg_qsort(void *a, size_t n, size_t es, int (*cmp) (const void *, const void *)) *pl, *pm, *pn; - int d, - r, + size_t d1, + d2; + int r, swaptype, presorted; @@ -141,7 +152,8 @@ loop:SWAPINIT(a, es); pn = (char *) a + (n - 1) * es; if (n > 40) { - d = (n / 8) * es; + size_t d = (n / 8) * es; + pl = med3(pl, pl + d, pl + 2 * d, cmp); pm = med3(pm - d, pm, pm + d, cmp); pn = med3(pn - 2 * d, pn - d, pn, cmp); @@ -178,27 +190,46 @@ loop:SWAPINIT(a, es); pc -= es; } pn = (char *) a + n * es; - r = Min(pa - (char *) a, pb - pa); - vecswap(a, pb - r, r); - r = Min(pd - pc, pn - pd - es); - vecswap(pb, pn - r, r); - if ((r = pb - pa) > es) - qsort(a, r / es, es, cmp); - if ((r = pd - pc) > es) + d1 = Min(pa - (char *) a, pb - pa); + vecswap(a, pb - d1, d1); + d1 = Min(pd - pc, pn - pd - es); + vecswap(pb, pn - d1, d1); + d1 = pb - pa; + d2 = pd - pc; + if (d1 <= d2) { - /* Iterate rather than recurse to save stack space */ - a = pn - r; - n = r / es; - goto loop; + /* Recurse on left partition, then iterate on right partition */ + if (d1 > es) + pg_qsort(a, d1 / es, es, cmp); + if (d2 > es) + { + /* Iterate rather than recurse to save stack space */ + /* pg_qsort(pn - d2, d2 / es, es, cmp); */ + a = pn - d2; + n = d2 / es; + goto loop; + } + } + else + { + /* Recurse on right partition, then iterate on left partition */ + if (d2 > es) + pg_qsort(pn - d2, d2 / es, es, cmp); + if (d1 > es) + { + /* Iterate rather than recurse to save stack space */ + /* pg_qsort(a, d1 / es, es, cmp); */ + n = d1 / es; + goto loop; + } } -/* qsort(pn - r, r / es, es, cmp);*/ } /* - * qsort wrapper for strcmp. + * qsort comparator wrapper for strcmp. */ int pg_qsort_strcmp(const void *a, const void *b) { - return strcmp(*(char *const *) a, *(char *const *) b); + return strcmp(*(const char *const *) a, *(const char *const *) b); } diff --git a/src/port/qsort_arg.c b/src/port/qsort_arg.c index c0aee733be5f6..24acd2cd4e4a4 100644 --- a/src/port/qsort_arg.c +++ b/src/port/qsort_arg.c @@ -6,6 +6,7 @@ * Remove __inline, _DIAGASSERTs, __P * Remove ill-considered "swap_cnt" switch to insertion sort, * in favor of a simple check for presorted input. + * Take care to recurse on the smaller partition, to bound stack usage. * * CAUTION: if you change this file, see also qsort.c, gen_qsort_tuple.pl * @@ -54,9 +55,18 @@ static void swapfunc(char *, char *, size_t, int); * Qsort routine based on J. L. Bentley and M. D. McIlroy, * "Engineering a sort function", * Software--Practice and Experience 23 (1993) 1249-1265. + * * We have modified their original by adding a check for already-sorted input, * which seems to be a win per discussions on pgsql-hackers around 2006-03-21. + * + * Also, we recurse on the smaller partition and iterate on the larger one, + * which ensures we cannot recurse more than log(N) levels (since the + * partition recursed to is surely no more than half of the input). Bentley + * and McIlroy explicitly rejected doing this on the grounds that it's "not + * worth the effort", but we have seen crashes in the field due to stack + * overrun, so that judgment seems wrong. */ + #define swapcode(TYPE, parmi, parmj, n) \ do { \ size_t i = (n) / sizeof (TYPE); \ @@ -89,7 +99,7 @@ swapfunc(char *a, char *b, size_t n, int swaptype) } else \ swapfunc(a, b, es, swaptype) -#define vecswap(a, b, n) if ((n) > 0) swapfunc((a), (b), (size_t)(n), swaptype) +#define vecswap(a, b, n) if ((n) > 0) swapfunc(a, b, n, swaptype) static char * med3(char *a, char *b, char *c, qsort_arg_comparator cmp, void *arg) @@ -109,8 +119,9 @@ qsort_arg(void *a, size_t n, size_t es, qsort_arg_comparator cmp, void *arg) *pl, *pm, *pn; - int d, - r, + size_t d1, + d2; + int r, swaptype, presorted; @@ -141,7 +152,8 @@ loop:SWAPINIT(a, es); pn = (char *) a + (n - 1) * es; if (n > 40) { - d = (n / 8) * es; + size_t d = (n / 8) * es; + pl = med3(pl, pl + d, pl + 2 * d, cmp, arg); pm = med3(pm - d, pm, pm + d, cmp, arg); pn = med3(pn - 2 * d, pn - d, pn, cmp, arg); @@ -178,18 +190,37 @@ loop:SWAPINIT(a, es); pc -= es; } pn = (char *) a + n * es; - r = Min(pa - (char *) a, pb - pa); - vecswap(a, pb - r, r); - r = Min(pd - pc, pn - pd - es); - vecswap(pb, pn - r, r); - if ((r = pb - pa) > es) - qsort_arg(a, r / es, es, cmp, arg); - if ((r = pd - pc) > es) + d1 = Min(pa - (char *) a, pb - pa); + vecswap(a, pb - d1, d1); + d1 = Min(pd - pc, pn - pd - es); + vecswap(pb, pn - d1, d1); + d1 = pb - pa; + d2 = pd - pc; + if (d1 <= d2) { - /* Iterate rather than recurse to save stack space */ - a = pn - r; - n = r / es; - goto loop; + /* Recurse on left partition, then iterate on right partition */ + if (d1 > es) + qsort_arg(a, d1 / es, es, cmp, arg); + if (d2 > es) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_arg(pn - d2, d2 / es, es, cmp, arg); */ + a = pn - d2; + n = d2 / es; + goto loop; + } + } + else + { + /* Recurse on right partition, then iterate on left partition */ + if (d2 > es) + qsort_arg(pn - d2, d2 / es, es, cmp, arg); + if (d1 > es) + { + /* Iterate rather than recurse to save stack space */ + /* qsort_arg(a, d1 / es, es, cmp, arg); */ + n = d1 / es; + goto loop; + } } -/* qsort_arg(pn - r, r / es, es, cmp, arg);*/ } From eb3b93b534e153b0e71ce17a2f48126e3a772167 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Fri, 17 Jul 2015 03:01:14 -0400 Subject: [PATCH 061/442] AIX: Test the -qlonglong option before use. xlc provides "long long" unconditionally at C99-compatible language levels, and this option provokes a warning. The warning interferes with "configure" tests that fail in response to any warning. Notably, before commit 85a2a8903f7e9151793308d0638621003aded5ae, it interfered with the test for -qnoansialias. Back-patch to 9.0 (all supported versions). --- configure | 35 +++++++++++++++++++++++++++++++++++ configure.in | 1 + src/template/aix | 2 +- 3 files changed, 37 insertions(+), 1 deletion(-) diff --git a/configure b/configure index 38cec0fe70c11..d417b507b0c9b 100755 --- a/configure +++ b/configure @@ -4876,6 +4876,41 @@ if test x"$pgac_cv_prog_cc_cflags__qnoansialias" = x"yes"; then CFLAGS="$CFLAGS -qnoansialias" fi + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC supports -qlonglong" >&5 +$as_echo_n "checking whether $CC supports -qlonglong... " >&6; } +if ${pgac_cv_prog_cc_cflags__qlonglong+:} false; then : + $as_echo_n "(cached) " >&6 +else + pgac_save_CFLAGS=$CFLAGS +CFLAGS="$pgac_save_CFLAGS -qlonglong" +ac_save_c_werror_flag=$ac_c_werror_flag +ac_c_werror_flag=yes +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + pgac_cv_prog_cc_cflags__qlonglong=yes +else + pgac_cv_prog_cc_cflags__qlonglong=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_c_werror_flag=$ac_save_c_werror_flag +CFLAGS="$pgac_save_CFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $pgac_cv_prog_cc_cflags__qlonglong" >&5 +$as_echo "$pgac_cv_prog_cc_cflags__qlonglong" >&6; } +if test x"$pgac_cv_prog_cc_cflags__qlonglong" = x"yes"; then + CFLAGS="$CFLAGS -qlonglong" +fi + elif test "$PORTNAME" = "hpux"; then # On some versions of HP-UX, libm functions do not set errno by default. # Fix that by using +Olibmerrno if the compiler recognizes it. diff --git a/configure.in b/configure.in index 143e667ce27eb..99882414478aa 100644 --- a/configure.in +++ b/configure.in @@ -461,6 +461,7 @@ elif test "$ICC" = yes; then elif test "$PORTNAME" = "aix"; then # AIX's xlc has to have strict aliasing turned off too PGAC_PROG_CC_CFLAGS_OPT([-qnoansialias]) + PGAC_PROG_CC_CFLAGS_OPT([-qlonglong]) elif test "$PORTNAME" = "hpux"; then # On some versions of HP-UX, libm functions do not set errno by default. # Fix that by using +Olibmerrno if the compiler recognizes it. diff --git a/src/template/aix b/src/template/aix index 04c97e7bd14e4..b566ff129df60 100644 --- a/src/template/aix +++ b/src/template/aix @@ -7,7 +7,7 @@ if test "$GCC" != yes ; then CFLAGS="-O -qmaxmem=16384 -qsrcmsg" ;; *) - CFLAGS="-O2 -qmaxmem=16384 -qsrcmsg -qlonglong" + CFLAGS="-O2 -qmaxmem=16384 -qsrcmsg" ;; esac fi From 9a5f369adc734e0a8d45192d1b790a6849a391dd Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 17 Jul 2015 15:53:10 -0400 Subject: [PATCH 062/442] Repair mishandling of cached cast-expression trees in plpgsql. In commit 1345cc67bbb014209714af32b5681b1e11eaf964, I introduced caching of expressions representing type-cast operations into plpgsql. However, I supposed that I could cache both the expression trees and the evaluation state trees derived from them for the life of the session. This doesn't work, because we execute the expressions in plpgsql's simple_eval_estate, which has an ecxt_per_query_memory that is only transaction-lifespan. Therefore we can end up putting pointers into the evaluation state tree that point to transaction-lifespan memory; in particular this happens if the cast expression calls a SQL-language function, as reported by Geoff Winkless. The minimum-risk fix seems to be to treat the state trees the same way we do for "simple expression" trees in plpgsql, ie create them in the simple_eval_estate's ecxt_per_query_memory, which means recreating them once per transaction. Since I had to introduce bookkeeping overhead for that anyway, I bought back some of the added cost by sharing the read-only expression trees across all functions in the session, instead of using a per-function table as originally. The simple-expression bookkeeping takes care of the recursive-usage risk that I was concerned about avoiding before. At some point we should take a harder look at how all this works, and see if we can't reduce the amount of tree reinitialization needed. But that won't happen for 9.5. --- src/pl/plpgsql/src/pl_exec.c | 290 +++++++++++++++----------- src/pl/plpgsql/src/plpgsql.h | 4 - src/test/regress/expected/plpgsql.out | 62 ++++++ src/test/regress/sql/plpgsql.sql | 30 +++ 4 files changed, 262 insertions(+), 124 deletions(-) diff --git a/src/pl/plpgsql/src/pl_exec.c b/src/pl/plpgsql/src/pl_exec.c index 79dd6a22fcee4..e7ba0f1250e1b 100644 --- a/src/pl/plpgsql/src/pl_exec.c +++ b/src/pl/plpgsql/src/pl_exec.c @@ -53,21 +53,6 @@ typedef struct bool *freevals; /* which arguments are pfree-able */ } PreparedParamsData; -typedef struct -{ - /* NB: we assume this struct contains no padding bytes */ - Oid srctype; /* source type for cast */ - Oid dsttype; /* destination type for cast */ - int32 srctypmod; /* source typmod for cast */ - int32 dsttypmod; /* destination typmod for cast */ -} plpgsql_CastHashKey; - -typedef struct -{ - plpgsql_CastHashKey key; /* hash key --- MUST BE FIRST */ - ExprState *cast_exprstate; /* cast expression, or NULL if no-op cast */ -} plpgsql_CastHashEntry; - /* * All plpgsql function executions within a single transaction share the same * executor EState for evaluating "simple" expressions. Each function call @@ -104,6 +89,38 @@ typedef struct SimpleEcontextStackEntry static EState *shared_simple_eval_estate = NULL; static SimpleEcontextStackEntry *simple_econtext_stack = NULL; +/* + * We use a session-wide hash table for caching cast information. + * + * Once built, the compiled expression trees (cast_expr fields) survive for + * the life of the session. At some point it might be worth invalidating + * those after pg_cast changes, but for the moment we don't bother. + * + * The evaluation state trees (cast_exprstate) are managed in the same way as + * simple expressions (i.e., we assume cast expressions are always simple). + */ +typedef struct /* lookup key for cast info */ +{ + /* NB: we assume this struct contains no padding bytes */ + Oid srctype; /* source type for cast */ + Oid dsttype; /* destination type for cast */ + int32 srctypmod; /* source typmod for cast */ + int32 dsttypmod; /* destination typmod for cast */ +} plpgsql_CastHashKey; + +typedef struct /* cast_hash table entry */ +{ + plpgsql_CastHashKey key; /* hash key --- MUST BE FIRST */ + Expr *cast_expr; /* cast expression, or NULL if no-op cast */ + /* The ExprState tree is valid only when cast_lxid matches current LXID */ + ExprState *cast_exprstate; /* expression's eval tree */ + bool cast_in_use; /* true while we're executing eval tree */ + LocalTransactionId cast_lxid; +} plpgsql_CastHashEntry; + +static MemoryContext cast_hash_context = NULL; +static HTAB *cast_hash = NULL; + /************************************************************ * Local function forward declarations ************************************************************/ @@ -236,8 +253,9 @@ static Datum exec_cast_value(PLpgSQL_execstate *estate, Datum value, bool *isnull, Oid valtype, int32 valtypmod, Oid reqtype, int32 reqtypmod); -static ExprState *get_cast_expression(PLpgSQL_execstate *estate, - Oid srctype, int32 srctypmod, Oid dsttype, int32 dsttypmod); +static plpgsql_CastHashEntry *get_cast_hashentry(PLpgSQL_execstate *estate, + Oid srctype, int32 srctypmod, + Oid dsttype, int32 dsttypmod); static void exec_init_tuple_store(PLpgSQL_execstate *estate); static void exec_set_found(PLpgSQL_execstate *estate, bool state); static void plpgsql_create_econtext(PLpgSQL_execstate *estate); @@ -5946,12 +5964,12 @@ exec_cast_value(PLpgSQL_execstate *estate, if (valtype != reqtype || (valtypmod != reqtypmod && reqtypmod != -1)) { - ExprState *cast_expr; + plpgsql_CastHashEntry *cast_entry; - cast_expr = get_cast_expression(estate, + cast_entry = get_cast_hashentry(estate, valtype, valtypmod, reqtype, reqtypmod); - if (cast_expr) + if (cast_entry) { ExprContext *econtext = estate->eval_econtext; MemoryContext oldcontext; @@ -5961,7 +5979,12 @@ exec_cast_value(PLpgSQL_execstate *estate, econtext->caseValue_datum = value; econtext->caseValue_isNull = *isnull; - value = ExecEvalExpr(cast_expr, econtext, isnull, NULL); + cast_entry->cast_in_use = true; + + value = ExecEvalExpr(cast_entry->cast_exprstate, econtext, + isnull, NULL); + + cast_entry->cast_in_use = false; MemoryContextSwitchTo(oldcontext); } @@ -5971,46 +5994,44 @@ exec_cast_value(PLpgSQL_execstate *estate, } /* ---------- - * get_cast_expression Look up how to perform a type cast - * - * Returns an expression evaluation tree based on a CaseTestExpr input, - * or NULL if the cast is a mere no-op relabeling. + * get_cast_hashentry Look up how to perform a type cast * - * We cache the results of the lookup in a per-function hash table. - * It's tempting to consider using a session-wide hash table instead, - * but that introduces some corner-case questions that probably aren't - * worth dealing with; in particular that re-entrant use of an evaluation - * tree might occur. That would also set in stone the assumption that - * collation isn't important to a cast function. + * Returns a plpgsql_CastHashEntry if an expression has to be evaluated, + * or NULL if the cast is a mere no-op relabeling. If there's work to be + * done, the cast_exprstate field contains an expression evaluation tree + * based on a CaseTestExpr input, and the cast_in_use field should be set + * TRUE while executing it. * ---------- */ -static ExprState * -get_cast_expression(PLpgSQL_execstate *estate, - Oid srctype, int32 srctypmod, Oid dsttype, int32 dsttypmod) +static plpgsql_CastHashEntry * +get_cast_hashentry(PLpgSQL_execstate *estate, + Oid srctype, int32 srctypmod, + Oid dsttype, int32 dsttypmod) { - HTAB *cast_hash = estate->func->cast_hash; plpgsql_CastHashKey cast_key; plpgsql_CastHashEntry *cast_entry; bool found; - CaseTestExpr *placeholder; - Node *cast_expr; - ExprState *cast_exprstate; + LocalTransactionId curlxid; MemoryContext oldcontext; - /* Create the cast-info hash table if we didn't already */ + /* Create the session-wide cast-info hash table if we didn't already */ if (cast_hash == NULL) { HASHCTL ctl; + cast_hash_context = AllocSetContextCreate(TopMemoryContext, + "PLpgSQL cast info", + ALLOCSET_DEFAULT_MINSIZE, + ALLOCSET_DEFAULT_INITSIZE, + ALLOCSET_DEFAULT_MAXSIZE); memset(&ctl, 0, sizeof(ctl)); ctl.keysize = sizeof(plpgsql_CastHashKey); ctl.entrysize = sizeof(plpgsql_CastHashEntry); - ctl.hcxt = estate->func->fn_cxt; + ctl.hcxt = cast_hash_context; cast_hash = hash_create("PLpgSQL cast cache", 16, /* start small and extend */ &ctl, HASH_ELEM | HASH_BLOBS | HASH_CONTEXT); - estate->func->cast_hash = cast_hash; } /* Look for existing entry */ @@ -6021,102 +6042,131 @@ get_cast_expression(PLpgSQL_execstate *estate, cast_entry = (plpgsql_CastHashEntry *) hash_search(cast_hash, (void *) &cast_key, HASH_FIND, NULL); - if (cast_entry) - return cast_entry->cast_exprstate; - /* Construct expression tree for coercion in function's context */ - oldcontext = MemoryContextSwitchTo(estate->func->fn_cxt); + if (cast_entry == NULL) + { + /* We've not looked up this coercion before */ + Node *cast_expr; + CaseTestExpr *placeholder; - /* - * We use a CaseTestExpr as the base of the coercion tree, since it's very - * cheap to insert the source value for that. - */ - placeholder = makeNode(CaseTestExpr); - placeholder->typeId = srctype; - placeholder->typeMod = srctypmod; - placeholder->collation = get_typcollation(srctype); - if (OidIsValid(estate->func->fn_input_collation) && - OidIsValid(placeholder->collation)) - placeholder->collation = estate->func->fn_input_collation; + /* + * Since we could easily fail (no such coercion), construct a + * temporary coercion expression tree in a short-lived context, then + * if successful copy it to cast_hash_context. + */ + oldcontext = MemoryContextSwitchTo(estate->eval_econtext->ecxt_per_tuple_memory); - /* - * Apply coercion. We use ASSIGNMENT coercion because that's the closest - * match to plpgsql's historical behavior; in particular, EXPLICIT - * coercion would allow silent truncation to a destination - * varchar/bpchar's length, which we do not want. - * - * If source type is UNKNOWN, coerce_to_target_type will fail (it only - * expects to see that for Const input nodes), so don't call it; we'll - * apply CoerceViaIO instead. Likewise, it doesn't currently work for - * coercing RECORD to some other type, so skip for that too. - */ - if (srctype == UNKNOWNOID || srctype == RECORDOID) - cast_expr = NULL; - else - cast_expr = coerce_to_target_type(NULL, - (Node *) placeholder, srctype, - dsttype, dsttypmod, - COERCION_ASSIGNMENT, - COERCE_IMPLICIT_CAST, - -1); + /* + * We use a CaseTestExpr as the base of the coercion tree, since it's + * very cheap to insert the source value for that. + */ + placeholder = makeNode(CaseTestExpr); + placeholder->typeId = srctype; + placeholder->typeMod = srctypmod; + placeholder->collation = get_typcollation(srctype); - /* - * If there's no cast path according to the parser, fall back to using an - * I/O coercion; this is semantically dubious but matches plpgsql's - * historical behavior. We would need something of the sort for UNKNOWN - * literals in any case. - */ - if (cast_expr == NULL) - { - CoerceViaIO *iocoerce = makeNode(CoerceViaIO); - - iocoerce->arg = (Expr *) placeholder; - iocoerce->resulttype = dsttype; - iocoerce->resultcollid = InvalidOid; - iocoerce->coerceformat = COERCE_IMPLICIT_CAST; - iocoerce->location = -1; - cast_expr = (Node *) iocoerce; - if (dsttypmod != -1) + /* + * Apply coercion. We use ASSIGNMENT coercion because that's the + * closest match to plpgsql's historical behavior; in particular, + * EXPLICIT coercion would allow silent truncation to a destination + * varchar/bpchar's length, which we do not want. + * + * If source type is UNKNOWN, coerce_to_target_type will fail (it only + * expects to see that for Const input nodes), so don't call it; we'll + * apply CoerceViaIO instead. Likewise, it doesn't currently work for + * coercing RECORD to some other type, so skip for that too. + */ + if (srctype == UNKNOWNOID || srctype == RECORDOID) + cast_expr = NULL; + else cast_expr = coerce_to_target_type(NULL, - cast_expr, dsttype, + (Node *) placeholder, srctype, dsttype, dsttypmod, COERCION_ASSIGNMENT, COERCE_IMPLICIT_CAST, -1); - } - /* Note: we don't bother labeling the expression tree with collation */ + /* + * If there's no cast path according to the parser, fall back to using + * an I/O coercion; this is semantically dubious but matches plpgsql's + * historical behavior. We would need something of the sort for + * UNKNOWN literals in any case. + */ + if (cast_expr == NULL) + { + CoerceViaIO *iocoerce = makeNode(CoerceViaIO); + + iocoerce->arg = (Expr *) placeholder; + iocoerce->resulttype = dsttype; + iocoerce->resultcollid = InvalidOid; + iocoerce->coerceformat = COERCE_IMPLICIT_CAST; + iocoerce->location = -1; + cast_expr = (Node *) iocoerce; + if (dsttypmod != -1) + cast_expr = coerce_to_target_type(NULL, + cast_expr, dsttype, + dsttype, dsttypmod, + COERCION_ASSIGNMENT, + COERCE_IMPLICIT_CAST, + -1); + } + + /* Note: we don't bother labeling the expression tree with collation */ - /* Detect whether we have a no-op (RelabelType) coercion */ - if (IsA(cast_expr, RelabelType) && - ((RelabelType *) cast_expr)->arg == (Expr *) placeholder) - cast_expr = NULL; + /* Detect whether we have a no-op (RelabelType) coercion */ + if (IsA(cast_expr, RelabelType) && + ((RelabelType *) cast_expr)->arg == (Expr *) placeholder) + cast_expr = NULL; - if (cast_expr) - { - /* ExecInitExpr assumes we've planned the expression */ - cast_expr = (Node *) expression_planner((Expr *) cast_expr); - /* Create an expression eval state tree for it */ - cast_exprstate = ExecInitExpr((Expr *) cast_expr, NULL); + if (cast_expr) + { + /* ExecInitExpr assumes we've planned the expression */ + cast_expr = (Node *) expression_planner((Expr *) cast_expr); + + /* Now copy the tree into cast_hash_context */ + MemoryContextSwitchTo(cast_hash_context); + + cast_expr = copyObject(cast_expr); + } + + MemoryContextSwitchTo(oldcontext); + + /* Now we can fill in a hashtable entry. */ + cast_entry = (plpgsql_CastHashEntry *) hash_search(cast_hash, + (void *) &cast_key, + HASH_ENTER, &found); + Assert(!found); /* wasn't there a moment ago */ + cast_entry->cast_expr = (Expr *) cast_expr; + cast_entry->cast_exprstate = NULL; + cast_entry->cast_in_use = false; + cast_entry->cast_lxid = InvalidLocalTransactionId; } - else - cast_exprstate = NULL; - MemoryContextSwitchTo(oldcontext); + /* Done if we have determined that this is a no-op cast. */ + if (cast_entry->cast_expr == NULL) + return NULL; /* - * Now fill in a hashtable entry. If we fail anywhere up to/including - * this step, we've only leaked some memory in the function context, which - * isn't great but isn't disastrous either. - */ - cast_entry = (plpgsql_CastHashEntry *) hash_search(cast_hash, - (void *) &cast_key, - HASH_ENTER, &found); - Assert(!found); /* wasn't there a moment ago */ - - cast_entry->cast_exprstate = cast_exprstate; + * Prepare the expression for execution, if it's not been done already in + * the current transaction; also, if it's marked busy in the current + * transaction, abandon that expression tree and build a new one, so as to + * avoid potential problems with recursive cast expressions and failed + * executions. (We will leak some memory intra-transaction if that + * happens a lot, but we don't expect it to.) It's okay to update the + * hash table with the new tree because all plpgsql functions within a + * given transaction share the same simple_eval_estate. + */ + curlxid = MyProc->lxid; + if (cast_entry->cast_lxid != curlxid || cast_entry->cast_in_use) + { + oldcontext = MemoryContextSwitchTo(estate->simple_eval_estate->es_query_cxt); + cast_entry->cast_exprstate = ExecInitExpr(cast_entry->cast_expr, NULL); + cast_entry->cast_in_use = false; + cast_entry->cast_lxid = curlxid; + MemoryContextSwitchTo(oldcontext); + } - return cast_exprstate; + return cast_entry; } /* ---------- diff --git a/src/pl/plpgsql/src/plpgsql.h b/src/pl/plpgsql/src/plpgsql.h index 93c2504641fbd..3502f210060b4 100644 --- a/src/pl/plpgsql/src/plpgsql.h +++ b/src/pl/plpgsql/src/plpgsql.h @@ -22,7 +22,6 @@ #include "commands/event_trigger.h" #include "commands/trigger.h" #include "executor/spi.h" -#include "utils/hsearch.h" /********************************************************************** * Definitions @@ -756,9 +755,6 @@ typedef struct PLpgSQL_function PLpgSQL_datum **datums; PLpgSQL_stmt_block *action; - /* table for performing casts needed in this function */ - HTAB *cast_hash; - /* these fields change when the function is used */ struct PLpgSQL_execstate *cur_estate; unsigned long use_count; diff --git a/src/test/regress/expected/plpgsql.out b/src/test/regress/expected/plpgsql.out index 7ce5415f053a0..31182db705e58 100644 --- a/src/test/regress/expected/plpgsql.out +++ b/src/test/regress/expected/plpgsql.out @@ -4702,6 +4702,68 @@ select error2('public.stuffs'); rollback; drop function error2(p_name_table text); drop function error1(text); +-- Test for proper handling of cast-expression caching +create function sql_to_date(integer) returns date as $$ +select $1::text::date +$$ language sql immutable strict; +create cast (integer as date) with function sql_to_date(integer) as assignment; +create function cast_invoker(integer) returns date as $$ +begin + return $1; +end$$ language plpgsql; +select cast_invoker(20150717); + cast_invoker +-------------- + 07-17-2015 +(1 row) + +select cast_invoker(20150718); -- second call crashed in pre-release 9.5 + cast_invoker +-------------- + 07-18-2015 +(1 row) + +begin; +select cast_invoker(20150717); + cast_invoker +-------------- + 07-17-2015 +(1 row) + +select cast_invoker(20150718); + cast_invoker +-------------- + 07-18-2015 +(1 row) + +savepoint s1; +select cast_invoker(20150718); + cast_invoker +-------------- + 07-18-2015 +(1 row) + +select cast_invoker(-1); -- fails +ERROR: invalid input syntax for type date: "-1" +CONTEXT: SQL function "sql_to_date" statement 1 +PL/pgSQL function cast_invoker(integer) while casting return value to function's return type +rollback to savepoint s1; +select cast_invoker(20150719); + cast_invoker +-------------- + 07-19-2015 +(1 row) + +select cast_invoker(20150720); + cast_invoker +-------------- + 07-20-2015 +(1 row) + +commit; +drop function cast_invoker(integer); +drop function sql_to_date(integer) cascade; +NOTICE: drop cascades to cast from integer to date -- Test for consistent reporting of error context create function fail() returns int language plpgsql as $$ begin diff --git a/src/test/regress/sql/plpgsql.sql b/src/test/regress/sql/plpgsql.sql index aaf3e8479fef5..b697c9a935e34 100644 --- a/src/test/regress/sql/plpgsql.sql +++ b/src/test/regress/sql/plpgsql.sql @@ -3806,6 +3806,36 @@ rollback; drop function error2(p_name_table text); drop function error1(text); +-- Test for proper handling of cast-expression caching + +create function sql_to_date(integer) returns date as $$ +select $1::text::date +$$ language sql immutable strict; + +create cast (integer as date) with function sql_to_date(integer) as assignment; + +create function cast_invoker(integer) returns date as $$ +begin + return $1; +end$$ language plpgsql; + +select cast_invoker(20150717); +select cast_invoker(20150718); -- second call crashed in pre-release 9.5 + +begin; +select cast_invoker(20150717); +select cast_invoker(20150718); +savepoint s1; +select cast_invoker(20150718); +select cast_invoker(-1); -- fails +rollback to savepoint s1; +select cast_invoker(20150719); +select cast_invoker(20150720); +commit; + +drop function cast_invoker(integer); +drop function sql_to_date(integer) cascade; + -- Test for consistent reporting of error context create function fail() returns int language plpgsql as $$ From 89ddd29bbd70c31652c6e7a179473753b89a3cac Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Fri, 17 Jul 2015 20:56:13 -0400 Subject: [PATCH 063/442] Support JSON negative array subscripts everywhere Previously, there was an inconsistency across json/jsonb operators that operate on datums containing JSON arrays -- only some operators supported negative array count-from-the-end subscripting. Specifically, only a new-to-9.5 jsonb deletion operator had support (the new "jsonb - integer" operator). This inconsistency seemed likely to be counter-intuitive to users. To fix, allow all places where the user can supply an integer subscript to accept a negative subscript value, including path-orientated operators and functions, as well as other extraction operators. This will need to be called out as an incompatibility in the 9.5 release notes, since it's possible that users are relying on certain established extraction operators changed here yielding NULL in the event of a negative subscript. For the json type, this requires adding a way of cheaply getting the total JSON array element count ahead of time when parsing arrays with a negative subscript involved, necessitating an ad-hoc lex and parse. This is followed by a "conversion" from a negative subscript to its equivalent positive-wise value using the count. From there on, it's as if a positive-wise value was originally provided. Note that there is still a minor inconsistency here across jsonb deletion operators. Unlike the aforementioned new "-" deletion operator that accepts an integer on its right hand side, the new "#-" path orientated deletion variant does not throw an error when it appears like an array subscript (input that could be recognized by as an integer literal) is being used on an object, which is wrong-headed. The reason for not being stricter is that it could be the case that an object pair happens to have a key value that looks like an integer; in general, these two possibilities are impossible to differentiate with rhs path text[] argument elements. However, we still don't allow the "#-" path-orientated deletion operator to perform array-style subscripting. Rather, we just return the original left operand value in the event of a negative subscript (which seems analogous to how the established "jsonb/json #> text[]" path-orientated operator may yield NULL in the event of an invalid subscript). In passing, make SetArrayPath() stricter about not accepting cases where there is trailing non-numeric garbage bytes rather than a clean NUL byte. This means, for example, that strings like "10e10" are now not accepted as an array subscript of 10 by some new-to-9.5 path-orientated jsonb operators (e.g. the new #- operator). Finally, remove dead code for jsonb subscript deletion; arguably, this should have been done in commit b81c7b409. Peter Geoghegan and Andrew Dunstan --- doc/src/sgml/func.sgml | 16 +++-- src/backend/utils/adt/json.c | 39 ++++++++++ src/backend/utils/adt/jsonfuncs.c | 100 +++++++++++++++++++------- src/include/utils/jsonapi.h | 7 ++ src/test/regress/expected/json.out | 14 ++++ src/test/regress/expected/json_1.out | 14 ++++ src/test/regress/expected/jsonb.out | 30 ++++++++ src/test/regress/expected/jsonb_1.out | 30 ++++++++ src/test/regress/sql/json.sql | 5 ++ src/test/regress/sql/jsonb.sql | 5 ++ 10 files changed, 231 insertions(+), 29 deletions(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index 99923f46bcaf2..ef50fa581135b 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -10177,7 +10177,8 @@ table2-mapping -> int - Get JSON array element (indexed from zero) + Get JSON array element (indexed from zero, negative + integers count from the end) '[{"a":"foo"},{"b":"bar"},{"c":"baz"}]'::json->2 {"c":"baz"} @@ -10230,7 +10231,10 @@ table2-mapping returning text, which coerce the value to text. The field/element/path extraction operators return NULL, rather than failing, if the JSON input does not have the right structure to match - the request; for example if no such element exists. + the request; for example if no such element exists. The + field/element/path extraction operators that accept integer JSON + array subscripts all support negative subscripting from the end of + arrays. @@ -10318,7 +10322,8 @@ table2-mapping #- text[] - Delete the field or element with specified path + Delete the field or element with specified path (for + JSON arrays, negative integers count from the end) '["a", {"b":1}]'::jsonb #- '{1,b}' @@ -10858,6 +10863,9 @@ table2-mapping create_missing is true ( default is true) and the item designated by path does not exist. + As with the path orientated operators, negative integers that + appear in path count from the end + of JSON arrays. jsonb_set('[{"f1":1,"f2":null},2,null,3]', '{0,f1}','[2,3,4]', false) jsonb_set('[{"f1":1,"f2":null},2]', '{0,f3}','[2,3,4]') @@ -10872,7 +10880,7 @@ table2-mapping text Returns from_json - as indented json text. + as indented JSON text. jsonb_pretty('[{"f1":1,"f2":null},2,null,3]') diff --git a/src/backend/utils/adt/json.c b/src/backend/utils/adt/json.c index 26d384336933c..8d0434767aafe 100644 --- a/src/backend/utils/adt/json.c +++ b/src/backend/utils/adt/json.c @@ -340,6 +340,45 @@ pg_parse_json(JsonLexContext *lex, JsonSemAction *sem) } +/* + * json_count_array_elements + * + * Returns number of array elements in lex context at start of array token + * until end of array token at same nesting level. + * + * Designed to be called from array_start routines. + */ +int +json_count_array_elements(JsonLexContext *lex) +{ + JsonLexContext copylex; + int count; + + /* + * It's safe to do this with a shallow copy because the lexical routines + * don't scribble on the input. They do scribble on the other pointers etc, + * so doing this with a copy makes that safe. + */ + memcpy(©lex, lex, sizeof(JsonLexContext)); + copylex.strval = NULL; /* not interested in values here */ + copylex.lex_level++; + + count = 0; + lex_expect(JSON_PARSE_ARRAY_START, ©lex, JSON_TOKEN_ARRAY_START); + if (lex_peek(©lex) != JSON_TOKEN_ARRAY_END) + { + do + { + count++; + parse_array_element(©lex, &nullSemAction); + } + while (lex_accept(©lex, JSON_TOKEN_COMMA, NULL)); + } + lex_expect(JSON_PARSE_ARRAY_NEXT, ©lex, JSON_TOKEN_ARRAY_END); + + return count; +} + /* * Recursive Descent parse routines. There is one for each structural * element in a json document: diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 13d5b7af2f47d..424280b929eee 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -597,6 +597,17 @@ jsonb_array_element(PG_FUNCTION_ARGS) if (!JB_ROOT_IS_ARRAY(jb)) PG_RETURN_NULL(); + /* Handle negative subscript */ + if (element < 0) + { + uint32 nelements = JB_ROOT_COUNT(jb); + + if (-element > nelements) + PG_RETURN_NULL(); + else + element += nelements; + } + v = getIthJsonbValueFromContainer(&jb->root, element); if (v != NULL) PG_RETURN_JSONB(JsonbValueToJsonb(v)); @@ -629,6 +640,17 @@ jsonb_array_element_text(PG_FUNCTION_ARGS) if (!JB_ROOT_IS_ARRAY(jb)) PG_RETURN_NULL(); + /* Handle negative subscript */ + if (element < 0) + { + uint32 nelements = JB_ROOT_COUNT(jb); + + if (-element > nelements) + PG_RETURN_NULL(); + else + element += nelements; + } + v = getIthJsonbValueFromContainer(&jb->root, element); if (v != NULL) { @@ -719,7 +741,7 @@ get_path_all(FunctionCallInfo fcinfo, bool as_text) /* * we have no idea at this stage what structure the document is so * just convert anything in the path that we can to an integer and set - * all the other integers to -1 which will never match. + * all the other integers to INT_MIN which will never match. */ if (*tpath[i] != '\0') { @@ -728,13 +750,13 @@ get_path_all(FunctionCallInfo fcinfo, bool as_text) errno = 0; ind = strtol(tpath[i], &endptr, 10); - if (*endptr == '\0' && errno == 0 && ind <= INT_MAX && ind >= 0) + if (*endptr == '\0' && errno == 0 && ind <= INT_MAX && ind >= INT_MIN) ipath[i] = (int) ind; else - ipath[i] = -1; + ipath[i] = INT_MIN; } else - ipath[i] = -1; + ipath[i] = INT_MIN; } result = get_worker(json, tpath, ipath, npath, as_text); @@ -752,14 +774,15 @@ get_path_all(FunctionCallInfo fcinfo, bool as_text) * * json: JSON object (in text form) * tpath[]: field name(s) to extract - * ipath[]: array index(es) (zero-based) to extract + * ipath[]: array index(es) (zero-based) to extract, accepts negatives * npath: length of tpath[] and/or ipath[] * normalize_results: true to de-escape string and null scalars * * tpath can be NULL, or any one tpath[] entry can be NULL, if an object * field is not to be matched at that nesting level. Similarly, ipath can - * be NULL, or any one ipath[] entry can be -1, if an array element is not - * to be matched at that nesting level. + * be NULL, or any one ipath[] entry can be INT_MIN if an array element is + * not to be matched at that nesting level (a json datum should never be + * large enough to have -INT_MIN elements due to MaxAllocSize restriction). */ static text * get_worker(text *json, @@ -964,6 +987,17 @@ get_array_start(void *state) */ _state->result_start = _state->lex->token_start; } + + /* INT_MIN value is reserved to represent invalid subscript */ + if (_state->path_indexes[lex_level] < 0 && + _state->path_indexes[lex_level] != INT_MIN) + { + /* Negative subscript -- convert to positive-wise subscript */ + int nelements = json_count_array_elements(_state->lex); + + if (-_state->path_indexes[lex_level] <= nelements) + _state->path_indexes[lex_level] += nelements; + } } static void @@ -1209,9 +1243,30 @@ get_jsonb_path_all(FunctionCallInfo fcinfo, bool as_text) errno = 0; lindex = strtol(indextext, &endptr, 10); if (endptr == indextext || *endptr != '\0' || errno != 0 || - lindex > INT_MAX || lindex < 0) + lindex > INT_MAX || lindex < INT_MIN) PG_RETURN_NULL(); - index = (uint32) lindex; + + if (lindex >= 0) + { + index = (uint32) lindex; + } + else + { + /* Handle negative subscript */ + uint32 nelements; + + /* Container must be array, but make sure */ + if ((container->header & JB_FARRAY) == 0) + elog(ERROR, "not a jsonb array"); + + nelements = container->header & JB_CMASK; + + if (-lindex > nelements) + PG_RETURN_NULL(); + else + index = nelements + lindex; + } + jbvp = getIthJsonbValueFromContainer(container, index); } else @@ -3411,10 +3466,8 @@ jsonb_delete_idx(PG_FUNCTION_ARGS) it = JsonbIteratorInit(&in->root); r = JsonbIteratorNext(&it, &v, false); - if (r == WJB_BEGIN_ARRAY) - n = v.val.array.nElems; - else - n = v.val.object.nPairs; + Assert (r == WJB_BEGIN_ARRAY); + n = v.val.array.nElems; if (idx < 0) { @@ -3431,14 +3484,10 @@ jsonb_delete_idx(PG_FUNCTION_ARGS) while ((r = JsonbIteratorNext(&it, &v, true)) != 0) { - if (r == WJB_ELEM || r == WJB_KEY) + if (r == WJB_ELEM) { if (i++ == idx) - { - if (r == WJB_KEY) - JsonbIteratorNext(&it, &v, true); /* skip value */ continue; - } } res = pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL); @@ -3657,7 +3706,7 @@ IteratorConcat(JsonbIterator **it1, JsonbIterator **it2, * If newval is null, the element is to be removed. * * If create is true, we create the new value if the key or array index - * does not exist. All path elemnts before the last must already exist + * does not exist. All path elements before the last must already exist * whether or not create is true, or nothing is done. */ static JsonbValue * @@ -3818,7 +3867,8 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, errno = 0; lindex = strtol(c, &badp, 10); - if (errno != 0 || badp == c || lindex > INT_MAX || lindex < INT_MIN) + if (errno != 0 || badp == c || *badp != '\0' || lindex > INT_MAX || + lindex < INT_MIN) idx = nelems; else idx = lindex; @@ -3829,7 +3879,7 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, if (idx < 0) { if (-idx > nelems) - idx = -1; + idx = INT_MIN; else idx = nelems + idx; } @@ -3838,12 +3888,12 @@ setPathArray(JsonbIterator **it, Datum *path_elems, bool *path_nulls, idx = nelems; /* - * if we're creating, and idx == -1, we prepend the new value to the array - * also if the array is empty - in which case we don't really care what - * the idx value is + * if we're creating, and idx == INT_MIN, we prepend the new value to the + * array also if the array is empty - in which case we don't really care + * what the idx value is */ - if ((idx == -1 || nelems == 0) && create && (level == path_len - 1)) + if ((idx == INT_MIN || nelems == 0) && create && (level == path_len - 1)) { Assert(newval != NULL); addJsonbToParseState(st, newval); diff --git a/src/include/utils/jsonapi.h b/src/include/utils/jsonapi.h index 296d20af838a6..55cfb791fa777 100644 --- a/src/include/utils/jsonapi.h +++ b/src/include/utils/jsonapi.h @@ -103,6 +103,13 @@ typedef struct JsonSemAction */ extern void pg_parse_json(JsonLexContext *lex, JsonSemAction *sem); +/* + * json_count_array_elements performs a fast secondary parse to determine the + * number of elements in passed array lex context. It should be called from an + * array_start action. + */ +extern int json_count_array_elements(JsonLexContext *lex); + /* * constructors for JsonLexContext, with or without strval element. * If supplied, the strval element will contain a de-escaped version of diff --git a/src/test/regress/expected/json.out b/src/test/regress/expected/json.out index 3942c3bee9106..43ca67dddfa31 100644 --- a/src/test/regress/expected/json.out +++ b/src/test/regress/expected/json.out @@ -569,6 +569,14 @@ WHERE json_type = 'array'; "two" (1 row) +SELECT test_json -> -1 +FROM test_json +WHERE json_type = 'array'; + ?column? +---------- + {"f1":9} +(1 row) + SELECT test_json -> 2 FROM test_json WHERE json_type = 'object'; @@ -698,6 +706,12 @@ select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1; (1 row) +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1; + ?column? +---------- + +(1 row) + select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z'; ?column? ---------- diff --git a/src/test/regress/expected/json_1.out b/src/test/regress/expected/json_1.out index 38f15262883b0..155f414ea4cb6 100644 --- a/src/test/regress/expected/json_1.out +++ b/src/test/regress/expected/json_1.out @@ -569,6 +569,14 @@ WHERE json_type = 'array'; "two" (1 row) +SELECT test_json -> -1 +FROM test_json +WHERE json_type = 'array'; + ?column? +---------- + {"f1":9} +(1 row) + SELECT test_json -> 2 FROM test_json WHERE json_type = 'object'; @@ -698,6 +706,12 @@ select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1; (1 row) +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1; + ?column? +---------- + +(1 row) + select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z'; ?column? ---------- diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out index 4416d52611f08..0ccc0f7a7957f 100644 --- a/src/test/regress/expected/jsonb.out +++ b/src/test/regress/expected/jsonb.out @@ -2590,6 +2590,18 @@ SELECT '["a","b","c",[1,2],null]'::jsonb -> 5; SELECT '["a","b","c",[1,2],null]'::jsonb -> -1; ?column? ---------- + null +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> -5; + ?column? +---------- + "a" +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> -6; + ?column? +---------- (1 row) @@ -2639,6 +2651,18 @@ SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}'; SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}'; ?column? ---------- + 3 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}'; + ?column? +---------- + 1 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}'; + ?column? +---------- (1 row) @@ -3121,6 +3145,12 @@ select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{ {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} (1 row) +select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript + ?column? +--------------------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}'; ?column? ------------------------------------------------------------------ diff --git a/src/test/regress/expected/jsonb_1.out b/src/test/regress/expected/jsonb_1.out index 6d67655cf6aad..7b23a99357414 100644 --- a/src/test/regress/expected/jsonb_1.out +++ b/src/test/regress/expected/jsonb_1.out @@ -2590,6 +2590,18 @@ SELECT '["a","b","c",[1,2],null]'::jsonb -> 5; SELECT '["a","b","c",[1,2],null]'::jsonb -> -1; ?column? ---------- + null +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> -5; + ?column? +---------- + "a" +(1 row) + +SELECT '["a","b","c",[1,2],null]'::jsonb -> -6; + ?column? +---------- (1 row) @@ -2639,6 +2651,18 @@ SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}'; SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}'; ?column? ---------- + 3 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}'; + ?column? +---------- + 1 +(1 row) + +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}'; + ?column? +---------- (1 row) @@ -3121,6 +3145,12 @@ select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{ {"a": 1, "b": [1], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} (1 row) +select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript + ?column? +--------------------------------------------------------------------- + {"a": 1, "b": [1, 2], "c": {"1": 2}, "d": {"1": [2, 3]}, "n": null} +(1 row) + select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}'; ?column? ------------------------------------------------------------------ diff --git a/src/test/regress/sql/json.sql b/src/test/regress/sql/json.sql index 53832a01fa18f..8c3b73f5b3ed3 100644 --- a/src/test/regress/sql/json.sql +++ b/src/test/regress/sql/json.sql @@ -184,6 +184,10 @@ SELECT test_json -> 2 FROM test_json WHERE json_type = 'array'; +SELECT test_json -> -1 +FROM test_json +WHERE json_type = 'array'; + SELECT test_json -> 2 FROM test_json WHERE json_type = 'object'; @@ -241,6 +245,7 @@ where json_type = 'array'; select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::text; select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> null::int; select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 1; +select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> -1; select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> 'z'; select '{"a": [{"b": "c"}, {"b": "cc"}]}'::json -> ''; select '[{"b": "c"}, {"b": "cc"}]'::json -> 1; diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql index febdeeb7978a0..3d2d8abfc1d93 100644 --- a/src/test/regress/sql/jsonb.sql +++ b/src/test/regress/sql/jsonb.sql @@ -642,6 +642,8 @@ SELECT '["a","b","c",[1,2],null]'::jsonb -> 3 -> 1; SELECT '["a","b","c",[1,2],null]'::jsonb -> 4; SELECT '["a","b","c",[1,2],null]'::jsonb -> 5; SELECT '["a","b","c",[1,2],null]'::jsonb -> -1; +SELECT '["a","b","c",[1,2],null]'::jsonb -> -5; +SELECT '["a","b","c",[1,2],null]'::jsonb -> -6; --nested path extraction SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{0}'; @@ -652,6 +654,8 @@ SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,1}'; SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,2}'; SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,3}'; SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-1}'; +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-3}'; +SELECT '{"a":"b","c":[1,2,3]}'::jsonb #> '{c,-4}'; SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{0}'; SELECT '[0,1,2,[3,4],{"5":"five"}]'::jsonb #> '{3}'; @@ -757,6 +761,7 @@ select jsonb_delete_path('{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2, select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{n}'; select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1}'; +select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{b,-1e}'; -- invalid array subscript select '{"n":null, "a":1, "b":[1,2], "c":{"1":2}, "d":{"1":[2,3]}}'::jsonb #- '{d,1,0}'; From 0beef5af3a4821155251d8d445b9ba1296381645 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Fri, 17 Jul 2015 21:08:03 -0400 Subject: [PATCH 064/442] Release note compatibility item Note that json and jsonb extraction operators no longer consider a negative subscript to be invalid. --- doc/src/sgml/release-9.5.sgml | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/doc/src/sgml/release-9.5.sgml b/doc/src/sgml/release-9.5.sgml index a1010b29b87ce..dff1ba9ae0bbf 100644 --- a/doc/src/sgml/release-9.5.sgml +++ b/doc/src/sgml/release-9.5.sgml @@ -141,6 +141,17 @@ + + + Allow json and jsonb extraction operators to + accept negative subscripts, which count from the end of JSON + arrays. Historically, these operators yielded NULL + in the event of a negative subscript, because negative + subscripts were considered invalid. (Peter Geoghegan, Andrew + Dunstan) + + + From d27fad73e8de2f3f9b68096926735d53d87e7f6a Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Sat, 18 Jul 2015 10:09:04 -0400 Subject: [PATCH 065/442] Enable transforms modules to build and test on Cygwin. This still doesn't work correctly with Python 3, but I am committing this so we can get Cygwin buildfarm members building with Python 2. --- contrib/hstore_plperl/Makefile | 4 ++++ contrib/hstore_plpython/Makefile | 5 +++++ contrib/ltree_plpython/Makefile | 5 +++++ 3 files changed, 14 insertions(+) diff --git a/contrib/hstore_plperl/Makefile b/contrib/hstore_plperl/Makefile index d789b99375af0..8f7b171bcd611 100644 --- a/contrib/hstore_plperl/Makefile +++ b/contrib/hstore_plperl/Makefile @@ -35,6 +35,10 @@ override CPPFLAGS += -DPLPERL_HAVE_UID_GID -Wno-comment SHLIB_LINK += ../hstore/libhstore.a $(wildcard ../../src/pl/plperl/libperl*.a) endif +ifeq ($(PORTNAME), cygwin) +SHLIB_LINK += -L../hstore -l hstore $(perl_embed_ldflags) +endif + # As with plperl we need to make sure that the CORE directory is included # last, probably because it sometimes contains some header files with names # that clash with some of ours, or with some that we include, notably on diff --git a/contrib/hstore_plpython/Makefile b/contrib/hstore_plpython/Makefile index 395fc7375368f..2de00a2c43c9c 100644 --- a/contrib/hstore_plpython/Makefile +++ b/contrib/hstore_plpython/Makefile @@ -33,6 +33,11 @@ ifeq ($(PORTNAME), win32) SHLIB_LINK += ../hstore/libhstore.a $(wildcard ../../src/pl/plpython/libpython*.a) $(wildcard ../../src/pl/plpython/libplpython*.a) endif +ifeq ($(PORTNAME), cygwin) +SHLIB_LINK += -L../hstore -lhstore -L../../src/pl/plpython \ + -lplpython$(python_majorversion) $(python_libspec) +endif + REGRESS_OPTS += --load-extension=hstore ifeq ($(python_majorversion),2) REGRESS_OPTS += --load-extension=plpythonu --load-extension=hstore_plpythonu diff --git a/contrib/ltree_plpython/Makefile b/contrib/ltree_plpython/Makefile index 20b0dcfbc5872..7eacb40115944 100644 --- a/contrib/ltree_plpython/Makefile +++ b/contrib/ltree_plpython/Makefile @@ -33,6 +33,11 @@ ifeq ($(PORTNAME), win32) SHLIB_LINK += $(wildcard ../../src/pl/plpython/libpython*.a) $(wildcard ../../src/pl/plpython/libplpython*.a) endif +ifeq ($(PORTNAME), cygwin) +SHLIB_LINK += -L../ltree -lltree -L../../src/pl/plpython \ + -lplpython$(python_majorversion) $(python_libspec) +endif + REGRESS_OPTS += --load-extension=ltree ifeq ($(python_majorversion),2) REGRESS_OPTS += --load-extension=plpythonu --load-extension=ltree_plpythonu From fd735e976cab3a95374c710ff5d2865102e3145a Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 18 Jul 2015 11:47:13 -0400 Subject: [PATCH 066/442] Make WaitLatchOrSocket's timeout detection more robust. In the previous coding, timeout would be noticed and reported only when poll() or socket() returned zero (or the equivalent behavior on Windows). Ordinarily that should work well enough, but it seems conceivable that we could get into a state where poll() always returns a nonzero value --- for example, if it is noticing a condition on one of the file descriptors that we do not think is reason to exit the loop. If that happened, we'd be in a busy-wait loop that would fail to terminate even when the timeout expires. We can make this more robust at essentially no cost, by deciding to exit of our own accord if we compute a zero or negative time-remaining-to-wait. Previously the code noted this but just clamped the time-remaining to zero, expecting that we'd detect timeout on the next loop iteration. Back-patch to 9.2. While 9.1 had a version of WaitLatchOrSocket, it was primitive compared to later versions, and did not guarantee reliable detection of timeouts anyway. (Essentially, this is a refinement of commit 3e7fdcffd6f77187, which was back-patched only as far as 9.2.) --- src/backend/port/unix_latch.c | 20 +++++++++++++------- src/backend/port/win32_latch.c | 9 ++++++--- 2 files changed, 19 insertions(+), 10 deletions(-) diff --git a/src/backend/port/unix_latch.c b/src/backend/port/unix_latch.c index 147e22cee4eee..90ec4f81d9e2b 100644 --- a/src/backend/port/unix_latch.c +++ b/src/backend/port/unix_latch.c @@ -460,7 +460,8 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock, result |= WL_SOCKET_WRITEABLE; } if ((wakeEvents & WL_POSTMASTER_DEATH) && - FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH], &input_mask)) + FD_ISSET(postmaster_alive_fds[POSTMASTER_FD_WATCH], + &input_mask)) { /* * According to the select(2) man page on Linux, select(2) may @@ -479,17 +480,22 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock, #endif /* HAVE_POLL */ /* If we're not done, update cur_timeout for next iteration */ - if (result == 0 && cur_timeout >= 0) + if (result == 0 && (wakeEvents & WL_TIMEOUT)) { INSTR_TIME_SET_CURRENT(cur_time); INSTR_TIME_SUBTRACT(cur_time, start_time); cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time); - if (cur_timeout < 0) - cur_timeout = 0; - + if (cur_timeout <= 0) + { + /* Timeout has expired, no need to continue looping */ + result |= WL_TIMEOUT; + } #ifndef HAVE_POLL - tv.tv_sec = cur_timeout / 1000L; - tv.tv_usec = (cur_timeout % 1000L) * 1000L; + else + { + tv.tv_sec = cur_timeout / 1000L; + tv.tv_usec = (cur_timeout % 1000L) * 1000L; + } #endif } } while (result == 0); diff --git a/src/backend/port/win32_latch.c b/src/backend/port/win32_latch.c index ee9526245fd48..0e3aaeec69eae 100644 --- a/src/backend/port/win32_latch.c +++ b/src/backend/port/win32_latch.c @@ -265,13 +265,16 @@ WaitLatchOrSocket(volatile Latch *latch, int wakeEvents, pgsocket sock, elog(ERROR, "unexpected return code from WaitForMultipleObjects(): %lu", rc); /* If we're not done, update cur_timeout for next iteration */ - if (result == 0 && cur_timeout != INFINITE) + if (result == 0 && (wakeEvents & WL_TIMEOUT)) { INSTR_TIME_SET_CURRENT(cur_time); INSTR_TIME_SUBTRACT(cur_time, start_time); cur_timeout = timeout - (long) INSTR_TIME_GET_MILLISEC(cur_time); - if (cur_timeout < 0) - cur_timeout = 0; + if (cur_timeout <= 0) + { + /* Timeout has expired, no need to continue looping */ + result |= WL_TIMEOUT; + } } } while (result == 0); From 0ed1e572b644e064e480e14d2f6afe03d11638a7 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Sun, 19 Jul 2015 13:19:38 -0400 Subject: [PATCH 067/442] Remove dead code. Defect noticed by Coverity. --- src/backend/utils/adt/jsonfuncs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 424280b929eee..17e787b60a2ed 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -3480,7 +3480,7 @@ jsonb_delete_idx(PG_FUNCTION_ARGS) if (idx >= n) PG_RETURN_JSONB(in); - pushJsonbValue(&state, r, r < WJB_BEGIN_ARRAY ? &v : NULL); + pushJsonbValue(&state, r, NULL); while ((r = JsonbIteratorNext(&it, &v, true)) != 0) { From e66e31958fc5f5346394099a6481a7949cc1f02a Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 20 Jul 2015 10:19:22 +0300 Subject: [PATCH 068/442] Handle AT_ReAddComment in test_ddl_deparse, and add a catch-all default. In the passing, also move AT_ReAddComment to more logical position in the enum, after all the Constraint-related subcommands. This fixes a compiler warning, added by commit e42375fc. Backpatch to 9.5, like that patch. --- src/include/nodes/parsenodes.h | 2 +- src/test/modules/test_ddl_deparse/test_ddl_deparse.c | 5 +++++ 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index a567c50da7279..cd7b19eac5a17 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -1474,7 +1474,6 @@ typedef enum AlterTableType AT_AddConstraint, /* add constraint */ AT_AddConstraintRecurse, /* internal to commands/tablecmds.c */ AT_ReAddConstraint, /* internal to commands/tablecmds.c */ - AT_ReAddComment, /* internal to commands/tablecmds.c */ AT_AlterConstraint, /* alter constraint */ AT_ValidateConstraint, /* validate constraint */ AT_ValidateConstraintRecurse, /* internal to commands/tablecmds.c */ @@ -1483,6 +1482,7 @@ typedef enum AlterTableType AT_AddIndexConstraint, /* add constraint using existing index */ AT_DropConstraint, /* drop constraint */ AT_DropConstraintRecurse, /* internal to commands/tablecmds.c */ + AT_ReAddComment, /* internal to commands/tablecmds.c */ AT_AlterColumnType, /* alter column type */ AT_AlterColumnGenericOptions, /* alter column OPTIONS (...) */ AT_ChangeOwner, /* change owner */ diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index 44a5cb0277e13..a216e422e9f0d 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -151,6 +151,9 @@ get_altertable_subcmdtypes(PG_FUNCTION_ARGS) case AT_DropConstraintRecurse: strtype = "DROP CONSTRAINT (and recurse)"; break; + case AT_ReAddComment: + strtype = "(re) ADD COMMENT"; + break; case AT_AlterColumnType: strtype = "ALTER COLUMN SET TYPE"; break; @@ -253,6 +256,8 @@ get_altertable_subcmdtypes(PG_FUNCTION_ARGS) case AT_GenericOptions: strtype = "SET OPTIONS"; break; + default: + strtype = "unrecognized"; } astate = From f1f3434f210450af2e3dab08fbc05a9edd0b67a4 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Mon, 20 Jul 2015 11:20:40 +0200 Subject: [PATCH 069/442] Add some comments to test_ddl_deparse and a README Per comments from Heikki Linnakangas. Backpatch to 9.5, where this module was introduced. --- src/test/modules/test_ddl_deparse/README | 8 ++++++ .../test_ddl_deparse/test_ddl_deparse.c | 26 +++++++++++++++++++ 2 files changed, 34 insertions(+) create mode 100644 src/test/modules/test_ddl_deparse/README diff --git a/src/test/modules/test_ddl_deparse/README b/src/test/modules/test_ddl_deparse/README new file mode 100644 index 0000000000000..f02640731e8c2 --- /dev/null +++ b/src/test/modules/test_ddl_deparse/README @@ -0,0 +1,8 @@ +test_ddl_deparse is an example of how to use the pg_ddl_command datatype. +It is not intended to do anything useful on its own; rather, it is a +demonstration of how to use the datatype, and to provide some unit tests for +it. + +The functions in this extension are intended to be able to process some +part of the struct and produce some readable output, preferrably handling +all possible cases so that SQL test code can be written. diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index a216e422e9f0d..5ae17f866ed8e 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -1,3 +1,13 @@ +/*---------------------------------------------------------------------- + * test_ddl_deparse.c + * Support functions for the test_ddl_deparse module + * + * Copyright (C) 2014-2015, PostgreSQL Global Development Group + * + * IDENTIFICATION + * src/test/modules/test_ddl_deparse/test_ddl_deparse.c + *---------------------------------------------------------------------- + */ #include "postgres.h" #include "catalog/pg_type.h" @@ -11,6 +21,10 @@ PG_FUNCTION_INFO_V1(get_command_type); PG_FUNCTION_INFO_V1(get_command_tag); PG_FUNCTION_INFO_V1(get_altertable_subcmdtypes); +/* + * Return the textual representation of the struct type used to represent a + * command in struct CollectedCommand format. + */ Datum get_command_type(PG_FUNCTION_ARGS) { @@ -48,6 +62,10 @@ get_command_type(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(type)); } +/* + * Return the command tag corresponding to a parse node contained in a + * CollectedCommand struct. + */ Datum get_command_tag(PG_FUNCTION_ARGS) { @@ -59,6 +77,10 @@ get_command_tag(PG_FUNCTION_ARGS) PG_RETURN_TEXT_P(cstring_to_text(CreateCommandTag(cmd->parsetree))); } +/* + * Return a text array representation of the subcommands of an ALTER TABLE + * command. + */ Datum get_altertable_subcmdtypes(PG_FUNCTION_ARGS) { @@ -130,6 +152,9 @@ get_altertable_subcmdtypes(PG_FUNCTION_ARGS) case AT_ReAddConstraint: strtype = "(re) ADD CONSTRAINT"; break; + case AT_ReAddComment: + strtype = "(re) ADD COMMENT"; + break; case AT_AlterConstraint: strtype = "ALTER CONSTRAINT"; break; @@ -258,6 +283,7 @@ get_altertable_subcmdtypes(PG_FUNCTION_ARGS) break; default: strtype = "unrecognized"; + break; } astate = From b2f01a731647b84d7c88d2028e8dc7be5599740f Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Mon, 20 Jul 2015 11:59:31 +0200 Subject: [PATCH 070/442] Fix mis-merge in previous commit --- src/test/modules/test_ddl_deparse/test_ddl_deparse.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c index 5ae17f866ed8e..e2dc4b5c768ce 100644 --- a/src/test/modules/test_ddl_deparse/test_ddl_deparse.c +++ b/src/test/modules/test_ddl_deparse/test_ddl_deparse.c @@ -152,9 +152,6 @@ get_altertable_subcmdtypes(PG_FUNCTION_ARGS) case AT_ReAddConstraint: strtype = "(re) ADD CONSTRAINT"; break; - case AT_ReAddComment: - strtype = "(re) ADD COMMENT"; - break; case AT_AlterConstraint: strtype = "ALTER CONSTRAINT"; break; From 38b03caebc5de44704567d8422f256c3e66b4784 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Mon, 20 Jul 2015 12:16:40 +0200 Subject: [PATCH 071/442] Improve BRIN documentation somewhat This removes some info about support procedures being used, which was obsoleted by commit db5f98ab4f, as well as add some more documentation on how to create new opclasses using the Minmax infrastructure. (Hopefully we can get something similar for Inclusion as well.) In passing, fix some obsolete mentions of "mmtuples" in source code comments. Backpatch to 9.5, where BRIN was introduced. --- doc/src/sgml/brin.sgml | 81 ++++++++++++++++++++++++++--- src/backend/access/brin/brin.c | 2 +- src/backend/access/brin/brin_xlog.c | 6 +-- 3 files changed, 77 insertions(+), 12 deletions(-) diff --git a/doc/src/sgml/brin.sgml b/doc/src/sgml/brin.sgml index cdfe5dec565b8..c8c3de72e3354 100644 --- a/doc/src/sgml/brin.sgml +++ b/doc/src/sgml/brin.sgml @@ -529,14 +529,79 @@ typedef struct BrinOpcInfo - To implement these methods in a generic way, the operator class - defines its own internal support functions. - (For instance, the min/max operator classes implement - support functions for the four inequality operators for their data type.) - Additionally, the operator class must supply appropriate - operator entries, - to enable the optimizer to use the index when those operators are - used in queries. + The core distribution includes support for two types of operator classes: + minmax and inclusion. Operator class definitions using them are shipped for + in-core data types as appropriate. Additional operator classes can be + defined by the user for other datatypes using equivalent definitions, + without having to write any source code; appropriate catalog entries being + declared is enough. Note that assumptions about the semantics of operator + strategies are embedded in the support procedures's source code. + + + Operator classes that implement completely different semantics are also + possible, provided implementations of the four main support procedures + described above are written. Note that backwards compatibility across major + releases is not guaranteed: for example, additional support procedures might + be required in later releases. + + + + To write an operator class for a datatype that implements a totally + ordered set, it is possible to use the Minmax support procedures + alongside the corresponding operators, as shown in + . + All operator class members (procedures and operators) are mandatory. + + + + Procedure and Support Numbers for Minmax Operator Classes + + + + Operator class member + Object + + + + + Support Procedure 1 + function brin_minmax_opcinfo() + + + Support Procedure 2 + function brin_minmax_add_value() + + + Support Procedure 3 + function brin_minmax_consistent() + + + Support Procedure 4 + function brin_minmax_union() + + + Operator Strategy 1 + operator less-than + + + Operator Strategy 2 + operator less-than-or-equal-to + + + Operator Strategy 3 + operator equal-to + + + Operator Strategy 4 + operator greater-than-or-equal-to + + + Operator Strategy 5 + operator greater-than + + + +
diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index ff18b220c2b8f..268a55e71f93a 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -688,7 +688,7 @@ brinbuildempty(PG_FUNCTION_ARGS) * * XXX we could mark item tuples as "dirty" (when a minimum or maximum heap * tuple is deleted), meaning the need to re-run summarization on the affected - * range. Need to an extra flag in mmtuples for that. + * range. Need to an extra flag in brintuples for that. */ Datum brinbulkdelete(PG_FUNCTION_ARGS) diff --git a/src/backend/access/brin/brin_xlog.c b/src/backend/access/brin/brin_xlog.c index 09001552920b5..760f0daf024df 100644 --- a/src/backend/access/brin/brin_xlog.c +++ b/src/backend/access/brin/brin_xlog.c @@ -180,11 +180,11 @@ brin_xlog_samepage_update(XLogReaderState *record) if (action == BLK_NEEDS_REDO) { Size tuplen; - BrinTuple *mmtuple; + BrinTuple *brintuple; Page page; OffsetNumber offnum; - mmtuple = (BrinTuple *) XLogRecGetBlockData(record, 0, &tuplen); + brintuple = (BrinTuple *) XLogRecGetBlockData(record, 0, &tuplen); page = (Page) BufferGetPage(buffer); @@ -193,7 +193,7 @@ brin_xlog_samepage_update(XLogReaderState *record) elog(PANIC, "brin_xlog_samepage_update: invalid max offset number"); PageIndexDeleteNoCompact(page, &offnum, 1); - offnum = PageAddItem(page, (Item) mmtuple, tuplen, offnum, true, false); + offnum = PageAddItem(page, (Item) brintuple, tuplen, offnum, true, false); if (offnum == InvalidOffsetNumber) elog(PANIC, "brin_xlog_samepage_update: failed to add tuple"); From b0b6f8d71f03463854576b30c1b01e5d772076d8 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Mon, 20 Jul 2015 14:18:08 +0200 Subject: [PATCH 072/442] Fix (some of) pltcl memory usage MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit As reported by Bill Parker, PL/Tcl did not validate some malloc() calls against NULL return. Fix by using palloc() in a new long-lived memory context instead. This allows us to simplify error handling too, by simply deleting the memory context instead of doing retail frees. There's still a lot that could be done to improve PL/Tcl's memory handling ... This is pretty ancient, so backpatch all the way back. Author: Michael Paquier and Ãlvaro Herrera Discussion: https://www.postgresql.org/message-id/CAFrbyQwyLDYXfBOhPfoBGqnvuZO_Y90YgqFM11T2jvnxjLFmqw@mail.gmail.com --- src/pl/tcl/pltcl.c | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index d6b72f7d68623..48a3206da1ce8 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -2104,6 +2104,7 @@ static int pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp, int argc, CONST84 char *argv[]) { + volatile MemoryContext plan_cxt = NULL; int nargs; CONST84 char **args; pltcl_query_desc *qdesc; @@ -2132,13 +2133,24 @@ pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp, /************************************************************ * Allocate the new querydesc structure + * + * struct qdesc and subsidiary data all live in plan_cxt. Note that if the + * function is recompiled for whatever reason, permanent memory leaks + * occur. FIXME someday. ************************************************************/ - qdesc = (pltcl_query_desc *) malloc(sizeof(pltcl_query_desc)); + plan_cxt = AllocSetContextCreate(TopMemoryContext, + "PL/TCL spi_prepare query", + ALLOCSET_SMALL_MINSIZE, + ALLOCSET_SMALL_INITSIZE, + ALLOCSET_SMALL_MAXSIZE); + MemoryContextSwitchTo(plan_cxt); + qdesc = (pltcl_query_desc *) palloc0(sizeof(pltcl_query_desc)); snprintf(qdesc->qname, sizeof(qdesc->qname), "%p", qdesc); qdesc->nargs = nargs; - qdesc->argtypes = (Oid *) malloc(nargs * sizeof(Oid)); - qdesc->arginfuncs = (FmgrInfo *) malloc(nargs * sizeof(FmgrInfo)); - qdesc->argtypioparams = (Oid *) malloc(nargs * sizeof(Oid)); + qdesc->argtypes = (Oid *) palloc(nargs * sizeof(Oid)); + qdesc->arginfuncs = (FmgrInfo *) palloc(nargs * sizeof(FmgrInfo)); + qdesc->argtypioparams = (Oid *) palloc(nargs * sizeof(Oid)); + MemoryContextSwitchTo(oldcontext); /************************************************************ * Execute the prepare inside a sub-transaction, so we can cope with @@ -2166,7 +2178,7 @@ pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp, getTypeInputInfo(typId, &typInput, &typIOParam); qdesc->argtypes[i] = typId; - perm_fmgr_info(typInput, &(qdesc->arginfuncs[i])); + fmgr_info_cxt(typInput, &(qdesc->arginfuncs[i]), plan_cxt); qdesc->argtypioparams[i] = typIOParam; } @@ -2193,10 +2205,7 @@ pltcl_SPI_prepare(ClientData cdata, Tcl_Interp *interp, { pltcl_subtrans_abort(interp, oldcontext, oldowner); - free(qdesc->argtypes); - free(qdesc->arginfuncs); - free(qdesc->argtypioparams); - free(qdesc); + MemoryContextDelete(plan_cxt); ckfree((char *) args); return TCL_ERROR; From 691c32f69a7efd6af9cda100c7e5ebf3b0c1937c Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Mon, 20 Jul 2015 15:37:17 +0200 Subject: [PATCH 073/442] Improve tab-completion for DROP POLICY MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Backpatch to 9.5. Author: Pavel StÄ›hule --- src/bin/psql/tab-complete.c | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index 4fd1dba6515fc..b2d627f471b82 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -742,6 +742,19 @@ static const SchemaQuery Query_for_list_of_matviews = { " FROM pg_catalog.pg_tablesample_method "\ " WHERE substring(pg_catalog.quote_ident(tsmname),1,%d)='%s'" +#define Query_for_list_of_policies \ +" SELECT pg_catalog.quote_ident(polname) "\ +" FROM pg_catalog.pg_policy " \ +" WHERE substring(pg_catalog.quote_ident(polname),1,%d)='%s'" + +#define Query_for_list_of_tables_for_policy \ +"SELECT pg_catalog.quote_ident(relname) "\ +" FROM pg_catalog.pg_class"\ +" WHERE (%d = pg_catalog.length('%s'))"\ +" AND oid IN "\ +" (SELECT polrelid FROM pg_catalog.pg_policy "\ +" WHERE pg_catalog.quote_ident(polname)='%s')" + /* * This is a list of all "things" in Pgsql, which can show up after CREATE or * DROP; and there is also a query to get a list of them. @@ -2891,15 +2904,26 @@ psql_completion(const char *text, int start, int end) COMPLETE_WITH_QUERY(Query_for_list_of_event_triggers); } + /* DROP POLICY */ + else if (pg_strcasecmp(prev2_wd, "DROP") == 0 && + pg_strcasecmp(prev_wd, "POLICY") == 0) + { + COMPLETE_WITH_QUERY(Query_for_list_of_policies); + } /* DROP POLICY ON */ else if (pg_strcasecmp(prev3_wd, "DROP") == 0 && pg_strcasecmp(prev2_wd, "POLICY") == 0) + { COMPLETE_WITH_CONST("ON"); + } /* DROP POLICY ON */ else if (pg_strcasecmp(prev4_wd, "DROP") == 0 && pg_strcasecmp(prev3_wd, "POLICY") == 0 && pg_strcasecmp(prev_wd, "ON") == 0) - COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL); + { + completion_info_charp = prev2_wd; + COMPLETE_WITH_QUERY(Query_for_list_of_tables_for_policy); + } /* DROP RULE */ else if (pg_strcasecmp(prev3_wd, "DROP") == 0 && From 869eb8416255da99fe5ba1f6d98e52a41999d30e Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Mon, 20 Jul 2015 18:47:15 +0200 Subject: [PATCH 074/442] Don't handle PUBLIC/NONE separately Since those role specifiers are checked in the grammar, there's no need for the old checks to remain in place after 31eae6028ec. Remove them. Backpatch to 9.5. Noted and patch by Jeevan Chalke --- src/backend/commands/user.c | 14 -------------- 1 file changed, 14 deletions(-) diff --git a/src/backend/commands/user.c b/src/backend/commands/user.c index 5b20994028db9..afbf2763be600 100644 --- a/src/backend/commands/user.c +++ b/src/backend/commands/user.c @@ -311,13 +311,6 @@ CreateRole(CreateRoleStmt *stmt) errmsg("permission denied to create role"))); } - if (strcmp(stmt->role, "public") == 0 || - strcmp(stmt->role, "none") == 0) - ereport(ERROR, - (errcode(ERRCODE_RESERVED_NAME), - errmsg("role name \"%s\" is reserved", - stmt->role))); - /* * Check the pg_authid relation to be certain the role doesn't already * exist. @@ -1159,13 +1152,6 @@ RenameRole(const char *oldname, const char *newname) (errcode(ERRCODE_DUPLICATE_OBJECT), errmsg("role \"%s\" already exists", newname))); - if (strcmp(newname, "public") == 0 || - strcmp(newname, "none") == 0) - ereport(ERROR, - (errcode(ERRCODE_RESERVED_NAME), - errmsg("role name \"%s\" is reserved", - newname))); - /* * createrole is enough privilege unless you want to mess with a superuser */ From e015c3e51f76a05cc026c8323c51a373172adaa3 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 20 Jul 2015 16:02:28 +0300 Subject: [PATCH 075/442] Sanity-check that a page zeroed by redo routine is marked with WILL_INIT. There was already a sanity-check in the other direction: if a page was marked with WILL_INIT, it had to be initialized by the redo routine. It's not strictly necessary for correctness that a page is marked with WILL_INIT if it's going to be initialized at redo, but it's a missed optimization if nothing else. Fix a few instances of this issue in SP-GiST, where a block in WAL record was not marked with WILL_INIT, but was in fact always initialized at redo. We were creating a full-page image of the page unnecessarily in those cases. Backpatch to 9.5, where the new WILL_INIT flag was added. --- src/backend/access/spgist/spgdoinsert.c | 31 ++++++++++++++++++++----- src/backend/access/transam/xlogutils.c | 19 ++++++++++----- 2 files changed, 38 insertions(+), 12 deletions(-) diff --git a/src/backend/access/spgist/spgdoinsert.c b/src/backend/access/spgist/spgdoinsert.c index b3fda13fa0662..db5d962b885c2 100644 --- a/src/backend/access/spgist/spgdoinsert.c +++ b/src/backend/access/spgist/spgdoinsert.c @@ -291,12 +291,16 @@ addLeafTuple(Relation index, SpGistState *state, SpGistLeafTuple leafTuple, if (RelationNeedsWAL(index)) { XLogRecPtr recptr; + int flags; XLogBeginInsert(); XLogRegisterData((char *) &xlrec, sizeof(xlrec)); XLogRegisterData((char *) leafTuple, leafTuple->size); - XLogRegisterBuffer(0, current->buffer, REGBUF_STANDARD); + flags = REGBUF_STANDARD; + if (xlrec.newPage) + flags |= REGBUF_WILL_INIT; + XLogRegisterBuffer(0, current->buffer, flags); if (xlrec.offnumParent != InvalidOffsetNumber) XLogRegisterBuffer(1, parent->buffer, REGBUF_STANDARD); @@ -1348,12 +1352,16 @@ doPickSplit(Relation index, SpGistState *state, XLogRegisterData((char *) innerTuple, innerTuple->size); XLogRegisterData(leafdata, leafptr - leafdata); - flags = REGBUF_STANDARD; - if (xlrec.initSrc) - flags |= REGBUF_WILL_INIT; + /* Old leaf page */ if (BufferIsValid(saveCurrent.buffer)) + { + flags = REGBUF_STANDARD; + if (xlrec.initSrc) + flags |= REGBUF_WILL_INIT; XLogRegisterBuffer(0, saveCurrent.buffer, flags); + } + /* New leaf page */ if (BufferIsValid(newLeafBuffer)) { flags = REGBUF_STANDARD; @@ -1361,7 +1369,14 @@ doPickSplit(Relation index, SpGistState *state, flags |= REGBUF_WILL_INIT; XLogRegisterBuffer(1, newLeafBuffer, flags); } - XLogRegisterBuffer(2, current->buffer, REGBUF_STANDARD); + + /* Inner page */ + flags = REGBUF_STANDARD; + if (xlrec.initInner) + flags |= REGBUF_WILL_INIT; + XLogRegisterBuffer(2, current->buffer, flags); + + /* Parent page, if different from inner page */ if (parent->buffer != InvalidBuffer) { if (parent->buffer != current->buffer) @@ -1631,13 +1646,17 @@ spgAddNodeAction(Relation index, SpGistState *state, if (RelationNeedsWAL(index)) { XLogRecPtr recptr; + int flags; XLogBeginInsert(); /* orig page */ XLogRegisterBuffer(0, saveCurrent.buffer, REGBUF_STANDARD); /* new page */ - XLogRegisterBuffer(1, current->buffer, REGBUF_STANDARD); + flags = REGBUF_STANDARD; + if (xlrec.newPage) + flags |= REGBUF_WILL_INIT; + XLogRegisterBuffer(1, current->buffer, flags); /* parent page (if different from orig and new) */ if (xlrec.parentBlk == 2) XLogRegisterBuffer(2, parent->buffer, REGBUF_STANDARD); diff --git a/src/backend/access/transam/xlogutils.c b/src/backend/access/transam/xlogutils.c index fa98b8294e827..a5003c3b92277 100644 --- a/src/backend/access/transam/xlogutils.c +++ b/src/backend/access/transam/xlogutils.c @@ -328,6 +328,8 @@ XLogReadBufferForRedoExtended(XLogReaderState *record, ForkNumber forknum; BlockNumber blkno; Page page; + bool zeromode; + bool willinit; if (!XLogRecGetBlockTag(record, block_id, &rnode, &forknum, &blkno)) { @@ -335,6 +337,17 @@ XLogReadBufferForRedoExtended(XLogReaderState *record, elog(PANIC, "failed to locate backup block with ID %d", block_id); } + /* + * Make sure that if the block is marked with WILL_INIT, the caller is + * going to initialize it. And vice versa. + */ + zeromode = (mode == RBM_ZERO_AND_LOCK || mode == RBM_ZERO_AND_CLEANUP_LOCK); + willinit = (record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0; + if (willinit && !zeromode) + elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine"); + if (!willinit && zeromode) + elog(PANIC, "block to be initialized in redo routine must be marked with WILL_INIT flag in the WAL record"); + /* If it's a full-page image, restore it. */ if (XLogRecHasBlockImage(record, block_id)) { @@ -359,12 +372,6 @@ XLogReadBufferForRedoExtended(XLogReaderState *record, } else { - if ((record->blocks[block_id].flags & BKPBLOCK_WILL_INIT) != 0 && - mode != RBM_ZERO_AND_LOCK && mode != RBM_ZERO_AND_CLEANUP_LOCK) - { - elog(PANIC, "block with WILL_INIT flag in WAL record must be zeroed by redo routine"); - } - *buf = XLogReadBufferExtended(rnode, forknum, blkno, mode); if (BufferIsValid(*buf)) { From d6ec181cf14a1d3f3d8ca9400a404b9828776ca3 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Tue, 21 Jul 2015 13:20:53 +0200 Subject: [PATCH 076/442] Fix omission of OCLASS_TRANSFORM in object_classes[] This was forgotten in cac76582053e (and its fixup ad89a5d115). Since it seems way too easy to miss this, this commit also introduces a mechanism to enforce that the array is consistent with the enum. Problem reported independently by Robert Haas and Jaimin Pan. Patches proposed by Jaimin Pan, Jim Nasby, Michael Paquier and myself, though I didn't use any of these and instead went with a cleaner approach suggested by Tom Lane. Backpatch to 9.5. Discussion: https://www.postgresql.org/message-id/CA+Tgmoa6SgDaxW_n_7SEhwBAc=mniYga+obUj5fmw4rU9_mLvA@mail.gmail.com https://www.postgresql.org/message-id/29788.1437411581@sss.pgh.pa.us --- src/backend/catalog/dependency.c | 11 +++++++++-- src/backend/commands/event_trigger.c | 9 --------- src/include/catalog/dependency.h | 7 ++++--- 3 files changed, 13 insertions(+), 14 deletions(-) diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index c1212e9075a21..5d7c441739cec 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -126,7 +126,7 @@ typedef struct * This constant table maps ObjectClasses to the corresponding catalog OIDs. * See also getObjectClass(). */ -static const Oid object_classes[MAX_OCLASS] = { +static const Oid object_classes[] = { RelationRelationId, /* OCLASS_CLASS */ ProcedureRelationId, /* OCLASS_PROC */ TypeRelationId, /* OCLASS_TYPE */ @@ -158,7 +158,8 @@ static const Oid object_classes[MAX_OCLASS] = { DefaultAclRelationId, /* OCLASS_DEFACL */ ExtensionRelationId, /* OCLASS_EXTENSION */ EventTriggerRelationId, /* OCLASS_EVENT_TRIGGER */ - PolicyRelationId /* OCLASS_POLICY */ + PolicyRelationId, /* OCLASS_POLICY */ + TransformRelationId /* OCLASS_TRANSFORM */ }; @@ -2037,6 +2038,12 @@ add_object_address(ObjectClass oclass, Oid objectId, int32 subId, { ObjectAddress *item; + /* + * Make sure object_classes is kept up to date with the ObjectClass enum. + */ + StaticAssertStmt(lengthof(object_classes) == LAST_OCLASS + 1, + "object_classes[] must cover all ObjectClasses"); + /* enlarge array if needed */ if (addrs->numrefs >= addrs->maxrefs) { diff --git a/src/backend/commands/event_trigger.c b/src/backend/commands/event_trigger.c index bf40881037cbc..3d1cb0b8e3057 100644 --- a/src/backend/commands/event_trigger.c +++ b/src/backend/commands/event_trigger.c @@ -1168,15 +1168,6 @@ EventTriggerSupportsObjectClass(ObjectClass objclass) case OCLASS_EXTENSION: case OCLASS_POLICY: return true; - - case MAX_OCLASS: - - /* - * This shouldn't ever happen, but we keep the case to avoid a - * compiler warning without a "default" clause in the switch. - */ - Assert(false); - break; } return true; diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h index 5da18c25444f8..aa3f3d90a18e6 100644 --- a/src/include/catalog/dependency.h +++ b/src/include/catalog/dependency.h @@ -112,7 +112,7 @@ typedef struct ObjectAddresses ObjectAddresses; /* * This enum covers all system catalogs whose OIDs can appear in - * pg_depend.classId or pg_shdepend.classId. + * pg_depend.classId or pg_shdepend.classId. Keep object_classes[] in sync. */ typedef enum ObjectClass { @@ -148,10 +148,11 @@ typedef enum ObjectClass OCLASS_EXTENSION, /* pg_extension */ OCLASS_EVENT_TRIGGER, /* pg_event_trigger */ OCLASS_POLICY, /* pg_policy */ - OCLASS_TRANSFORM, /* pg_transform */ - MAX_OCLASS /* MUST BE LAST */ + OCLASS_TRANSFORM /* pg_transform */ } ObjectClass; +#define LAST_OCLASS OCLASS_TRANSFORM + /* in dependency.c */ From 29f171c81a0d064ab556417374b8809a8ebe2c08 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Tue, 21 Jul 2015 09:53:16 -0400 Subject: [PATCH 077/442] Fix location of output logs of pg_regress initdb.log and postmaster.log were moved to within the temporary instance path by commit dcae5fa. This directory now gets removed at the end of the run of pg_regress when there are no failures found, which makes analysis of after-run issues difficult in some cases, and reduces the output verbosity of the buildfarm after a run. Fix by Michael Paquier Backpatch to 9.5 --- src/test/regress/pg_regress.c | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/src/test/regress/pg_regress.c b/src/test/regress/pg_regress.c index ed8c369e5cb57..dd65ab5a949a7 100644 --- a/src/test/regress/pg_regress.c +++ b/src/test/regress/pg_regress.c @@ -2207,7 +2207,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc make_directory(temp_instance); /* and a directory for log files */ - snprintf(buf, sizeof(buf), "%s/log", temp_instance); + snprintf(buf, sizeof(buf), "%s/log", outputdir); if (!directory_exists(buf)) make_directory(buf); @@ -2220,10 +2220,10 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc temp_instance, debug ? " --debug" : "", nolocale ? " --no-locale" : "", - temp_instance); + outputdir); if (system(buf)) { - fprintf(stderr, _("\n%s: initdb failed\nExamine %s/log/initdb.log for the reason.\nCommand was: %s\n"), progname, temp_instance, buf); + fprintf(stderr, _("\n%s: initdb failed\nExamine %s/log/initdb.log for the reason.\nCommand was: %s\n"), progname, outputdir, buf); exit(2); } @@ -2324,7 +2324,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc bindir ? "/" : "", temp_instance, debug ? " -d 5" : "", hostname ? hostname : "", sockdir ? sockdir : "", - temp_instance); + outputdir); postmaster_pid = spawn_process(buf); if (postmaster_pid == INVALID_PID) { @@ -2353,7 +2353,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc if (WaitForSingleObject(postmaster_pid, 0) == WAIT_OBJECT_0) #endif { - fprintf(stderr, _("\n%s: postmaster failed\nExamine %s/log/postmaster.log for the reason\n"), progname, temp_instance); + fprintf(stderr, _("\n%s: postmaster failed\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir); exit(2); } @@ -2361,7 +2361,7 @@ regression_main(int argc, char *argv[], init_function ifunc, test_function tfunc } if (i >= 60) { - fprintf(stderr, _("\n%s: postmaster did not respond within 60 seconds\nExamine %s/log/postmaster.log for the reason\n"), progname, temp_instance); + fprintf(stderr, _("\n%s: postmaster did not respond within 60 seconds\nExamine %s/log/postmaster.log for the reason\n"), progname, outputdir); /* * If we get here, the postmaster is probably wedged somewhere in From 35ac618a7c4602b792160ae0d77b6dfb289f517e Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 21 Jul 2015 13:38:24 -0400 Subject: [PATCH 078/442] Fix some oversights in BRIN patch. Remove HeapScanDescData.rs_initblock, which wasn't being used for anything in the final version of the patch. Fix IndexBuildHeapScan so that it supports syncscan again; the patch broke synchronous scanning for index builds by forcing rs_startblk to zero even when the caller did not care about that and had asked for syncscan. Add some commentary and usage defenses to heap_setscanlimits(). Fix heapam so that asking for rs_numblocks == 0 does what you would reasonably expect. As coded it amounted to requesting a whole-table scan, because those "--x <= 0" tests on an unsigned variable would behave surprisingly. --- src/backend/access/heap/heapam.c | 30 ++++++++++++++++++++---------- src/backend/catalog/index.c | 12 ++++++++++-- src/include/access/relscan.h | 4 ++-- 3 files changed, 32 insertions(+), 14 deletions(-) diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 86a2e6bae6abd..6f4ff2718fed8 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -277,7 +277,6 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) scan->rs_startblock = 0; } - scan->rs_initblock = 0; scan->rs_numblocks = InvalidBlockNumber; scan->rs_inited = false; scan->rs_ctup.t_data = NULL; @@ -302,11 +301,22 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) pgstat_count_heap_scan(scan->rs_rd); } +/* + * heap_setscanlimits - restrict range of a heapscan + * + * startBlk is the page to start at + * numBlks is number of pages to scan (InvalidBlockNumber means "all") + */ void heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, BlockNumber numBlks) { + Assert(!scan->rs_inited); /* else too late to change */ + Assert(!scan->rs_syncscan); /* else rs_startblock is significant */ + + /* Check startBlk is valid (but allow case of zero blocks...) */ + Assert(startBlk == 0 || startBlk < scan->rs_nblocks); + scan->rs_startblock = startBlk; - scan->rs_initblock = startBlk; scan->rs_numblocks = numBlks; } @@ -477,7 +487,7 @@ heapgettup(HeapScanDesc scan, /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0) + if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) { Assert(!BufferIsValid(scan->rs_cbuf)); tuple->t_data = NULL; @@ -511,7 +521,7 @@ heapgettup(HeapScanDesc scan, /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0) + if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) { Assert(!BufferIsValid(scan->rs_cbuf)); tuple->t_data = NULL; @@ -651,7 +661,7 @@ heapgettup(HeapScanDesc scan, if (backward) { finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false); + (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); if (page == 0) page = scan->rs_nblocks; page--; @@ -662,7 +672,7 @@ heapgettup(HeapScanDesc scan, if (page >= scan->rs_nblocks) page = 0; finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false); + (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); /* * Report our new scan position for synchronization purposes. We @@ -754,7 +764,7 @@ heapgettup_pagemode(HeapScanDesc scan, /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0) + if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) { Assert(!BufferIsValid(scan->rs_cbuf)); tuple->t_data = NULL; @@ -785,7 +795,7 @@ heapgettup_pagemode(HeapScanDesc scan, /* * return null immediately if relation is empty */ - if (scan->rs_nblocks == 0) + if (scan->rs_nblocks == 0 || scan->rs_numblocks == 0) { Assert(!BufferIsValid(scan->rs_cbuf)); tuple->t_data = NULL; @@ -914,7 +924,7 @@ heapgettup_pagemode(HeapScanDesc scan, if (backward) { finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false); + (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); if (page == 0) page = scan->rs_nblocks; page--; @@ -925,7 +935,7 @@ heapgettup_pagemode(HeapScanDesc scan, if (page >= scan->rs_nblocks) page = 0; finished = (page == scan->rs_startblock) || - (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks <= 0 : false); + (scan->rs_numblocks != InvalidBlockNumber ? --scan->rs_numblocks == 0 : false); /* * Report our new scan position for synchronization purposes. We diff --git a/src/backend/catalog/index.c b/src/backend/catalog/index.c index 4246554d19d21..69f35c9330979 100644 --- a/src/backend/catalog/index.c +++ b/src/backend/catalog/index.c @@ -2168,7 +2168,8 @@ IndexBuildHeapScan(Relation heapRelation, /* * As above, except that instead of scanning the complete heap, only the given * number of blocks are scanned. Scan to end-of-rel can be signalled by - * passing InvalidBlockNumber as numblocks. + * passing InvalidBlockNumber as numblocks. Note that restricting the range + * to scan cannot be done when requesting syncscan. */ double IndexBuildHeapRangeScan(Relation heapRelation, @@ -2251,7 +2252,14 @@ IndexBuildHeapRangeScan(Relation heapRelation, allow_sync); /* syncscan OK? */ /* set our scan endpoints */ - heap_setscanlimits(scan, start_blockno, numblocks); + if (!allow_sync) + heap_setscanlimits(scan, start_blockno, numblocks); + else + { + /* syncscan can only be requested on whole relation */ + Assert(start_blockno == 0); + Assert(numblocks == InvalidBlockNumber); + } reltuples = 0; diff --git a/src/include/access/relscan.h b/src/include/access/relscan.h index f2482e99d6c56..6e6231971fdca 100644 --- a/src/include/access/relscan.h +++ b/src/include/access/relscan.h @@ -38,8 +38,8 @@ typedef struct HeapScanDescData /* state set up at initscan time */ BlockNumber rs_nblocks; /* total number of blocks in rel */ BlockNumber rs_startblock; /* block # to start at */ - BlockNumber rs_initblock; /* block # to consider initial of rel */ - BlockNumber rs_numblocks; /* number of blocks to scan */ + BlockNumber rs_numblocks; /* max number of blocks to scan */ + /* rs_numblocks is usually InvalidBlockNumber, meaning "scan whole rel" */ BufferAccessStrategy rs_strategy; /* access strategy for reads */ bool rs_syncscan; /* report location to syncscan logic? */ From 41ae3b74d987d5d42f2c432812285c7d12d6f4c1 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 21 Jul 2015 20:03:58 -0400 Subject: [PATCH 079/442] Fix add_rte_to_flat_rtable() for recent feature additions. The TABLESAMPLE and row security patches each overlooked this function, though their errors of omission were opposite: RLS failed to zero out the securityQuals field, leading to wasteful copying of useless expression trees in finished plans, while TABLESAMPLE neglected to add a comment saying that it intentionally *isn't* deleting the tablesample subtree. There probably should be a similar comment about ctename, too. Back-patch as appropriate. --- src/backend/optimizer/plan/setrefs.c | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 46d84d39a00eb..258e541754aa1 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -372,10 +372,11 @@ flatten_rtes_walker(Node *node, PlannerGlobal *glob) * * In the flat rangetable, we zero out substructure pointers that are not * needed by the executor; this reduces the storage space and copying cost - * for cached plans. We keep only the alias and eref Alias fields, which are - * needed by EXPLAIN, and the selectedCols, insertedCols and updatedCols - * bitmaps, which are needed for executor-startup permissions checking and for - * trigger event checking. + * for cached plans. We keep only the tablesample field (which we'd otherwise + * have to put in the plan tree, anyway); the ctename, alias and eref Alias + * fields, which are needed by EXPLAIN; and the selectedCols, insertedCols and + * updatedCols bitmaps, which are needed for executor-startup permissions + * checking and for trigger event checking. */ static void add_rte_to_flat_rtable(PlannerGlobal *glob, RangeTblEntry *rte) @@ -395,6 +396,7 @@ add_rte_to_flat_rtable(PlannerGlobal *glob, RangeTblEntry *rte) newrte->ctecoltypes = NIL; newrte->ctecoltypmods = NIL; newrte->ctecolcollations = NIL; + newrte->securityQuals = NIL; glob->finalrtable = lappend(glob->finalrtable, newrte); @@ -1199,7 +1201,7 @@ set_customscan_references(PlannerInfo *root, } /* Adjust child plan-nodes recursively, if needed */ - foreach (lc, cscan->custom_plans) + foreach(lc, cscan->custom_plans) { lfirst(lc) = set_plan_refs(root, (Plan *) lfirst(lc), rtoffset); } From a9b3a22aa18345451a20696fe272b6e02f5a2bbb Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 23 Jul 2015 01:30:07 +0300 Subject: [PATCH 080/442] Fix off-by-one error in calculating subtrans/multixact truncation point. If there were no subtransactions (or multixacts) active, we would calculate the oldestxid == next xid. That's correct, but if next XID happens to be on the next pg_subtrans (pg_multixact) page, the page does not exist yet, and SimpleLruTruncate will produce an "apparent wraparound" warning. The warning is harmless in this case, but looks very alarming to users. Backpatch to all supported versions. Patch and analysis by Thomas Munro. --- src/backend/access/transam/multixact.c | 13 ++++++++++--- src/backend/access/transam/subtrans.c | 7 ++++++- 2 files changed, 16 insertions(+), 4 deletions(-) diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index 377d0842bdd9d..1933a87d6568e 100644 --- a/src/backend/access/transam/multixact.c +++ b/src/backend/access/transam/multixact.c @@ -173,6 +173,8 @@ #define MULTIXACT_MEMBER_DANGER_THRESHOLD \ (MaxMultiXactOffset - MaxMultiXactOffset / 4) +#define PreviousMultiXactId(xid) \ + ((xid) == FirstMultiXactId ? MaxMultiXactId : (xid) - 1) /* * Links to shared-memory data structures for MultiXact control @@ -3057,10 +3059,15 @@ TruncateMultiXact(void) SlruScanDirectory(MultiXactMemberCtl, SlruScanDirCbRemoveMembers, &range); - /* Now we can truncate MultiXactOffset */ + /* + * Now we can truncate MultiXactOffset. We step back one multixact to + * avoid passing a cutoff page that hasn't been created yet in the rare + * case that oldestMXact would be the first item on a page and oldestMXact + * == nextMXact. In that case, if we didn't subtract one, we'd trigger + * SimpleLruTruncate's wraparound detection. + */ SimpleLruTruncate(MultiXactOffsetCtl, - MultiXactIdToOffsetPage(oldestMXact)); - + MultiXactIdToOffsetPage(PreviousMultiXactId(oldestMXact))); /* * Now, and only now, we can advance the stop point for multixact members. diff --git a/src/backend/access/transam/subtrans.c b/src/backend/access/transam/subtrans.c index 4bc24d9bbcbc4..6b70982322780 100644 --- a/src/backend/access/transam/subtrans.c +++ b/src/backend/access/transam/subtrans.c @@ -340,8 +340,13 @@ TruncateSUBTRANS(TransactionId oldestXact) /* * The cutoff point is the start of the segment containing oldestXact. We - * pass the *page* containing oldestXact to SimpleLruTruncate. + * pass the *page* containing oldestXact to SimpleLruTruncate. We step + * back one transaction to avoid passing a cutoff page that hasn't been + * created yet in the rare case that oldestXact would be the first item on + * a page and oldestXact == next XID. In that case, if we didn't subtract + * one, we'd trigger SimpleLruTruncate's wraparound detection. */ + TransactionIdRetreat(oldestXact); cutoffPage = TransactionIdToPage(oldestXact); SimpleLruTruncate(SubTransCtl, cutoffPage); From fbf8dc21738749470f73f91a95ac01912c9deb10 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Thu, 23 Jul 2015 09:44:20 -0400 Subject: [PATCH 081/442] Redirect install output of make check into a log file dbf2ec1a changed make check so that the installation logs get directed to stdout and stderr. Per discussion on -hackers, this patch restores saving it to a file. It is now saved in /tmp_install/log, which is created once per invocation of any make target doing regression tests. Along the way, add a missing /log/ entry to test_ddl_deparse's .gitignore. Michael Paquier. --- src/Makefile.global.in | 5 +++-- src/test/modules/test_ddl_deparse/.gitignore | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 8eab178ebd382..46331194a3a43 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -307,9 +307,10 @@ temp-install: ifndef NO_TEMP_INSTALL ifeq ($(MAKELEVEL),0) rm -rf '$(abs_top_builddir)'/tmp_install - $(MAKE) -C '$(top_builddir)' DESTDIR='$(abs_top_builddir)'/tmp_install install + $(MKDIR_P) '$(abs_top_builddir)'/tmp_install/log + $(MAKE) -C '$(top_builddir)' DESTDIR='$(abs_top_builddir)'/tmp_install install >'$(abs_top_builddir)'/tmp_install/log/install.log 2>&1 endif - $(if $(EXTRA_INSTALL),for extra in $(EXTRA_INSTALL); do $(MAKE) -C '$(top_builddir)'/$$extra DESTDIR='$(abs_top_builddir)'/tmp_install install || exit; done) + $(if $(EXTRA_INSTALL),for extra in $(EXTRA_INSTALL); do $(MAKE) -C '$(top_builddir)'/$$extra DESTDIR='$(abs_top_builddir)'/tmp_install install >>'$(abs_top_builddir)'/tmp_install/log/install.log || exit; done) endif PROVE = @PROVE@ diff --git a/src/test/modules/test_ddl_deparse/.gitignore b/src/test/modules/test_ddl_deparse/.gitignore index 6628455c0ad8b..3337b3d294433 100644 --- a/src/test/modules/test_ddl_deparse/.gitignore +++ b/src/test/modules/test_ddl_deparse/.gitignore @@ -1 +1,2 @@ +/log/ /results/ From bb0203f26fa5f09fe2689a9db4bc632c1435edec Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Fri, 24 Jul 2015 11:48:53 +0200 Subject: [PATCH 082/442] Fix bug around assignment expressions containing indirections. Handling of assigned-to expressions with indirection (e.g. set f1[1] = 3) was broken for ON CONFLICT DO UPDATE. The problem was that ParseState was consulted to determine if an INSERT-appropriate or UPDATE-appropriate behavior should be used when transforming expressions with indirections. When the wrong path was taken the old row was substituted with NULL, leading to wrong results.. To fix remove p_is_update and only use p_is_insert to decide how to transform the assignment expression, and uset p_is_insert while parsing the on conflict statement. This isn't particularly pretty, but it's not any worse than before. Author: Peter Geoghegan, slightly edited by me Discussion: CAM3SWZS8RPvA=KFxADZWw3wAHnnbxMxDzkEC6fNaFc7zSm411w@mail.gmail.com Backpatch: 9.5, where the feature was introduced --- src/backend/parser/analyze.c | 8 +++++++- src/include/parser/parse_node.h | 1 - src/test/regress/expected/arrays.out | 21 +++++++++++++++++++++ src/test/regress/sql/arrays.sql | 13 +++++++++++++ 4 files changed, 41 insertions(+), 2 deletions(-) diff --git a/src/backend/parser/analyze.c b/src/backend/parser/analyze.c index fc463faa6be64..a0dfbf900a924 100644 --- a/src/backend/parser/analyze.c +++ b/src/backend/parser/analyze.c @@ -891,6 +891,12 @@ transformOnConflictClause(ParseState *pstate, /* Process DO UPDATE */ if (onConflictClause->action == ONCONFLICT_UPDATE) { + /* + * All INSERT expressions have been parsed, get ready for potentially + * existing SET statements that need to be processed like an UPDATE. + */ + pstate->p_is_insert = false; + exclRte = addRangeTableEntryForRelation(pstate, pstate->p_target_relation, makeAlias("excluded", NIL), @@ -1999,7 +2005,7 @@ transformUpdateStmt(ParseState *pstate, UpdateStmt *stmt) Node *qual; qry->commandType = CMD_UPDATE; - pstate->p_is_update = true; + pstate->p_is_insert = false; /* process the WITH clause independently of all else */ if (stmt->withClause) diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h index 3103b71594471..7ecaffc0dc37d 100644 --- a/src/include/parser/parse_node.h +++ b/src/include/parser/parse_node.h @@ -152,7 +152,6 @@ struct ParseState bool p_hasSubLinks; bool p_hasModifyingCTE; bool p_is_insert; - bool p_is_update; bool p_locked_from_parent; Relation p_target_relation; RangeTblEntry *p_target_rangetblentry; diff --git a/src/test/regress/expected/arrays.out b/src/test/regress/expected/arrays.out index 5f1532f2371e9..73fb5a248b47c 100644 --- a/src/test/regress/expected/arrays.out +++ b/src/test/regress/expected/arrays.out @@ -1116,6 +1116,27 @@ select * from arr_tbl where f1 >= '{1,2,3}' and f1 < '{1,5,3}'; {1,2,10} (2 rows) +-- test ON CONFLICT DO UPDATE with arrays +create temp table arr_pk_tbl (pk int4 primary key, f1 int[]); +insert into arr_pk_tbl values (1, '{1,2,3}'); +insert into arr_pk_tbl values (1, '{3,4,5}') on conflict (pk) + do update set f1[1] = excluded.f1[1], f1[3] = excluded.f1[3] + returning pk, f1; + pk | f1 +----+--------- + 1 | {3,2,5} +(1 row) + +insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk) + do update set f1[1] = excluded.f1[1], + f1[2] = excluded.f1[2], + f1[3] = excluded.f1[3] + returning pk, f1; + pk | f1 +----+------------ + 1 | {6,7,NULL} +(1 row) + -- note: if above selects don't produce the expected tuple order, -- then you didn't get an indexscan plan, and something is busted. reset enable_seqscan; diff --git a/src/test/regress/sql/arrays.sql b/src/test/regress/sql/arrays.sql index 562134b2863ae..b1dd65144050b 100644 --- a/src/test/regress/sql/arrays.sql +++ b/src/test/regress/sql/arrays.sql @@ -306,6 +306,19 @@ set enable_seqscan to off; set enable_bitmapscan to off; select * from arr_tbl where f1 > '{1,2,3}' and f1 <= '{1,5,3}'; select * from arr_tbl where f1 >= '{1,2,3}' and f1 < '{1,5,3}'; + +-- test ON CONFLICT DO UPDATE with arrays +create temp table arr_pk_tbl (pk int4 primary key, f1 int[]); +insert into arr_pk_tbl values (1, '{1,2,3}'); +insert into arr_pk_tbl values (1, '{3,4,5}') on conflict (pk) + do update set f1[1] = excluded.f1[1], f1[3] = excluded.f1[3] + returning pk, f1; +insert into arr_pk_tbl(pk, f1[1:2]) values (1, '{6,7,8}') on conflict (pk) + do update set f1[1] = excluded.f1[1], + f1[2] = excluded.f1[2], + f1[3] = excluded.f1[3] + returning pk, f1; + -- note: if above selects don't produce the expected tuple order, -- then you didn't get an indexscan plan, and something is busted. reset enable_seqscan; From 016f28ad3dbf3bec14319cf2a49925b0063251aa Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Fri, 24 Jul 2015 09:40:46 -0400 Subject: [PATCH 083/442] Fix treatment of nulls in jsonb_agg and jsonb_object_agg The wrong is_null flag was being passed to datum_to_json. Also, null object key values are not permitted, and this was not being checked for. Add regression tests covering these cases, and also add those tests to the json set, even though it was doing the right thing. Fixes bug #13514, initially diagnosed by Tom Lane. --- src/backend/utils/adt/jsonb.c | 12 +++++++++--- src/test/regress/expected/json.out | 21 ++++++++++++++++++++- src/test/regress/expected/json_1.out | 21 ++++++++++++++++++++- src/test/regress/expected/jsonb.out | 19 ++++++++++++++++++- src/test/regress/expected/jsonb_1.out | 19 ++++++++++++++++++- src/test/regress/sql/json.sql | 13 +++++++++++-- src/test/regress/sql/jsonb.sql | 12 +++++++++++- 7 files changed, 107 insertions(+), 10 deletions(-) diff --git a/src/backend/utils/adt/jsonb.c b/src/backend/utils/adt/jsonb.c index e68972221ab7c..154bc3626c94f 100644 --- a/src/backend/utils/adt/jsonb.c +++ b/src/backend/utils/adt/jsonb.c @@ -705,6 +705,7 @@ datum_to_jsonb(Datum val, bool is_null, JsonbInState *result, if (is_null) { + Assert(!key_scalar); jb.type = jbvNull; } else if (key_scalar && @@ -1606,7 +1607,7 @@ jsonb_agg_transfn(PG_FUNCTION_ARGS) memset(&elem, 0, sizeof(JsonbInState)); - datum_to_jsonb(val, false, &elem, tcategory, outfuncoid, false); + datum_to_jsonb(val, PG_ARGISNULL(1), &elem, tcategory, outfuncoid, false); jbelem = JsonbValueToJsonb(elem.res); @@ -1752,7 +1753,12 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("could not determine input data type"))); - val = PG_ARGISNULL(1) ? (Datum) 0 : PG_GETARG_DATUM(1); + if (PG_ARGISNULL(1)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("field name must not be null"))); + + val = PG_GETARG_DATUM(1); jsonb_categorize_type(val_type, &tcategory, &outfuncoid); @@ -1777,7 +1783,7 @@ jsonb_object_agg_transfn(PG_FUNCTION_ARGS) memset(&elem, 0, sizeof(JsonbInState)); - datum_to_jsonb(val, false, &elem, tcategory, outfuncoid, false); + datum_to_jsonb(val, PG_ARGISNULL(2), &elem, tcategory, outfuncoid, false); jbval = JsonbValueToJsonb(elem.res); diff --git a/src/test/regress/expected/json.out b/src/test/regress/expected/json.out index 43ca67dddfa31..eb6b26b241e8f 100644 --- a/src/test/regress/expected/json.out +++ b/src/test/regress/expected/json.out @@ -465,7 +465,7 @@ SELECT json_agg(q) {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] (1 row) -SELECT json_agg(q) +SELECT json_agg(q ORDER BY x, y) FROM rows q; json_agg ----------------------- @@ -474,6 +474,16 @@ SELECT json_agg(q) {"x":3,"y":"txt3"}] (1 row) +UPDATE rows SET x = NULL WHERE x = 1; +SELECT json_agg(q ORDER BY x NULLS FIRST, y) + FROM rows q; + json_agg +-------------------------- + [{"x":null,"y":"txt1"}, + + {"x":2,"y":"txt2"}, + + {"x":3,"y":"txt3"}] +(1 row) + -- non-numeric output SELECT row_to_json(q) FROM (SELECT 'NaN'::float8 AS "float8field") q; @@ -1574,6 +1584,15 @@ FROM foo; {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }} (1 row) +SELECT json_object_agg(name, type) FROM foo; + json_object_agg +---------------------------------------------------------------- + { "t15" : "GE1043", "t16" : "GE1043", "sub-alpha" : "GESS90" } +(1 row) + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT json_object_agg(name, type) FROM foo; +ERROR: field name must not be null -- json_object -- one dimension SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); diff --git a/src/test/regress/expected/json_1.out b/src/test/regress/expected/json_1.out index 155f414ea4cb6..48543e8c35623 100644 --- a/src/test/regress/expected/json_1.out +++ b/src/test/regress/expected/json_1.out @@ -465,7 +465,7 @@ SELECT json_agg(q) {"b":"a2","c":5,"z":[{"f1":2,"f2":[1,2,3]},{"f1":5,"f2":[4,5,6]}]}] (1 row) -SELECT json_agg(q) +SELECT json_agg(q ORDER BY x, y) FROM rows q; json_agg ----------------------- @@ -474,6 +474,16 @@ SELECT json_agg(q) {"x":3,"y":"txt3"}] (1 row) +UPDATE rows SET x = NULL WHERE x = 1; +SELECT json_agg(q ORDER BY x NULLS FIRST, y) + FROM rows q; + json_agg +-------------------------- + [{"x":null,"y":"txt1"}, + + {"x":2,"y":"txt2"}, + + {"x":3,"y":"txt3"}] +(1 row) + -- non-numeric output SELECT row_to_json(q) FROM (SELECT 'NaN'::float8 AS "float8field") q; @@ -1570,6 +1580,15 @@ FROM foo; {"turbines" : { "847001" : {"name" : "t15", "type" : "GE1043"}, "847002" : {"name" : "t16", "type" : "GE1043"}, "847003" : {"name" : "sub-alpha", "type" : "GESS90"} }} (1 row) +SELECT json_object_agg(name, type) FROM foo; + json_object_agg +---------------------------------------------------------------- + { "t15" : "GE1043", "t16" : "GE1043", "sub-alpha" : "GESS90" } +(1 row) + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT json_object_agg(name, type) FROM foo; +ERROR: field name must not be null -- json_object -- one dimension SELECT json_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); diff --git a/src/test/regress/expected/jsonb.out b/src/test/regress/expected/jsonb.out index 0ccc0f7a7957f..17656d4413aed 100644 --- a/src/test/regress/expected/jsonb.out +++ b/src/test/regress/expected/jsonb.out @@ -369,13 +369,21 @@ SELECT jsonb_agg(q) [{"b": "a1", "c": 4, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a1", "c": 5, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 4, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 5, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}] (1 row) -SELECT jsonb_agg(q) +SELECT jsonb_agg(q ORDER BY x, y) FROM rows q; jsonb_agg ----------------------------------------------------------------------- [{"x": 1, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] (1 row) +UPDATE rows SET x = NULL WHERE x = 1; +SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y) + FROM rows q; + jsonb_agg +-------------------------------------------------------------------------- + [{"x": null, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] +(1 row) + -- jsonb extraction functions CREATE TEMP TABLE test_jsonb ( json_type text, @@ -1393,6 +1401,15 @@ FROM foo; {"turbines": {"847001": {"name": "t15", "type": "GE1043"}, "847002": {"name": "t16", "type": "GE1043"}, "847003": {"name": "sub-alpha", "type": "GESS90"}}} (1 row) +SELECT jsonb_object_agg(name, type) FROM foo; + jsonb_object_agg +----------------------------------------------------------- + {"t15": "GE1043", "t16": "GE1043", "sub-alpha": "GESS90"} +(1 row) + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT jsonb_object_agg(name, type) FROM foo; +ERROR: field name must not be null -- jsonb_object -- one dimension SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); diff --git a/src/test/regress/expected/jsonb_1.out b/src/test/regress/expected/jsonb_1.out index 7b23a99357414..86b1162ac2946 100644 --- a/src/test/regress/expected/jsonb_1.out +++ b/src/test/regress/expected/jsonb_1.out @@ -369,13 +369,21 @@ SELECT jsonb_agg(q) [{"b": "a1", "c": 4, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a1", "c": 5, "z": [{"f1": 1, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 4, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 4, "f2": [4, 5, 6]}]}, {"b": "a2", "c": 5, "z": [{"f1": 2, "f2": [1, 2, 3]}, {"f1": 5, "f2": [4, 5, 6]}]}] (1 row) -SELECT jsonb_agg(q) +SELECT jsonb_agg(q ORDER BY x, y) FROM rows q; jsonb_agg ----------------------------------------------------------------------- [{"x": 1, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] (1 row) +UPDATE rows SET x = NULL WHERE x = 1; +SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y) + FROM rows q; + jsonb_agg +-------------------------------------------------------------------------- + [{"x": null, "y": "txt1"}, {"x": 2, "y": "txt2"}, {"x": 3, "y": "txt3"}] +(1 row) + -- jsonb extraction functions CREATE TEMP TABLE test_jsonb ( json_type text, @@ -1393,6 +1401,15 @@ FROM foo; {"turbines": {"847001": {"name": "t15", "type": "GE1043"}, "847002": {"name": "t16", "type": "GE1043"}, "847003": {"name": "sub-alpha", "type": "GESS90"}}} (1 row) +SELECT jsonb_object_agg(name, type) FROM foo; + jsonb_object_agg +----------------------------------------------------------- + {"t15": "GE1043", "t16": "GE1043", "sub-alpha": "GESS90"} +(1 row) + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT jsonb_object_agg(name, type) FROM foo; +ERROR: field name must not be null -- jsonb_object -- one dimension SELECT jsonb_object('{a,1,b,2,3,NULL,"d e f","a b c"}'); diff --git a/src/test/regress/sql/json.sql b/src/test/regress/sql/json.sql index 8c3b73f5b3ed3..f631480f9676a 100644 --- a/src/test/regress/sql/json.sql +++ b/src/test/regress/sql/json.sql @@ -126,7 +126,12 @@ SELECT json_agg(q) FROM generate_series(1,2) x, generate_series(4,5) y) q; -SELECT json_agg(q) +SELECT json_agg(q ORDER BY x, y) + FROM rows q; + +UPDATE rows SET x = NULL WHERE x = 1; + +SELECT json_agg(q ORDER BY x NULLS FIRST, y) FROM rows q; -- non-numeric output @@ -442,7 +447,6 @@ SELECT json_build_object( 'd', json_build_object('e',array[9,8,7]::int[], 'f', (select row_to_json(r) from ( select relkind, oid::regclass as name from pg_class where relname = 'pg_class') r))); - -- empty objects/arrays SELECT json_build_array(); @@ -468,6 +472,11 @@ INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); SELECT json_build_object('turbines',json_object_agg(serial_num,json_build_object('name',name,'type',type))) FROM foo; +SELECT json_object_agg(name, type) FROM foo; + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT json_object_agg(name, type) FROM foo; + -- json_object -- one dimension diff --git a/src/test/regress/sql/jsonb.sql b/src/test/regress/sql/jsonb.sql index 3d2d8abfc1d93..83ed4ebd93f86 100644 --- a/src/test/regress/sql/jsonb.sql +++ b/src/test/regress/sql/jsonb.sql @@ -93,7 +93,12 @@ SELECT jsonb_agg(q) FROM generate_series(1,2) x, generate_series(4,5) y) q; -SELECT jsonb_agg(q) +SELECT jsonb_agg(q ORDER BY x, y) + FROM rows q; + +UPDATE rows SET x = NULL WHERE x = 1; + +SELECT jsonb_agg(q ORDER BY x NULLS FIRST, y) FROM rows q; -- jsonb extraction functions @@ -334,6 +339,11 @@ INSERT INTO foo VALUES (847003,'sub-alpha','GESS90'); SELECT jsonb_build_object('turbines',jsonb_object_agg(serial_num,jsonb_build_object('name',name,'type',type))) FROM foo; +SELECT jsonb_object_agg(name, type) FROM foo; + +INSERT INTO foo VALUES (999999, NULL, 'bar'); +SELECT jsonb_object_agg(name, type) FROM foo; + -- jsonb_object -- one dimension From 7d4240d6cd91d83d263a45501cc2f44fb1d0a537 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Fri, 24 Jul 2015 12:56:25 -0700 Subject: [PATCH 084/442] Make RLS work with UPDATE ... WHERE CURRENT OF UPDATE ... WHERE CURRENT OF would not work in conjunction with RLS. Arrange to allow the CURRENT OF expression to be pushed down. Issue noted by Peter Geoghegan. Patch by Dean Rasheed. Back patch to 9.5 where RLS was introduced. --- src/backend/optimizer/path/allpaths.c | 40 ++++++++ src/backend/optimizer/util/clauses.c | 10 ++ src/test/regress/expected/rowsecurity.out | 107 ++++++++++++++++++++++ src/test/regress/sql/rowsecurity.sql | 49 ++++++++++ 4 files changed, 206 insertions(+) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 0b831891fcb42..888eeac515184 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -2177,6 +2177,46 @@ subquery_push_qual(Query *subquery, RangeTblEntry *rte, Index rti, Node *qual) recurse_push_qual(subquery->setOperations, subquery, rte, rti, qual); } + else if (IsA(qual, CurrentOfExpr)) + { + /* + * This is possible when a WHERE CURRENT OF expression is applied to a + * table with row-level security. In that case, the subquery should + * contain precisely one rtable entry for the table, and we can safely + * push the expression down into the subquery. This will cause a TID + * scan subquery plan to be generated allowing the target relation to + * be updated. + * + * Someday we might also be able to use a WHERE CURRENT OF expression + * on a view, but currently the rewriter prevents that, so we should + * never see any other case here, but generate sane error messages in + * case it does somehow happen. + */ + if (subquery->rtable == NIL) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("WHERE CURRENT OF is not supported on a view with no underlying relation"))); + + if (list_length(subquery->rtable) > 1) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("WHERE CURRENT OF is not supported on a view with more than one underlying relation"))); + + if (subquery->hasAggs || subquery->groupClause || subquery->groupingSets || subquery->havingQual) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("WHERE CURRENT OF is not supported on a view with grouping or aggregation"))); + + /* + * Adjust the CURRENT OF expression to refer to the underlying table + * in the subquery, and attach it to the subquery's WHERE clause. + */ + qual = copyObject(qual); + ((CurrentOfExpr *) qual)->cvarno = 1; + + subquery->jointree->quals = + make_and_qual(subquery->jointree->quals, qual); + } else { /* diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index d40083d396ea2..0137e0ecfced2 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -1492,6 +1492,16 @@ contain_leaked_vars_walker(Node *node, void *context) } break; + case T_CurrentOfExpr: + + /* + * WHERE CURRENT OF doesn't contain function calls. Moreover, it + * is important that this can be pushed down into a + * security_barrier view, since the planner must always generate + * a TID scan when CURRENT OF is present -- c.f. cost_tidscan. + */ + return false; + default: /* diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index eabfd932de9b2..414299a694114 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -2729,6 +2729,113 @@ COPY copy_t FROM STDIN; --fail - permission denied. ERROR: permission denied for relation copy_t RESET SESSION AUTHORIZATION; DROP TABLE copy_t; +-- Check WHERE CURRENT OF +SET SESSION AUTHORIZATION rls_regress_user0; +CREATE TABLE current_check (currentid int, payload text, rlsuser text); +GRANT ALL ON current_check TO PUBLIC; +INSERT INTO current_check VALUES + (1, 'abc', 'rls_regress_user1'), + (2, 'bcd', 'rls_regress_user1'), + (3, 'cde', 'rls_regress_user1'), + (4, 'def', 'rls_regress_user1'); +CREATE POLICY p1 ON current_check FOR SELECT USING (currentid % 2 = 0); +CREATE POLICY p2 ON current_check FOR DELETE USING (currentid = 4 AND rlsuser = current_user); +CREATE POLICY p3 ON current_check FOR UPDATE USING (currentid = 4) WITH CHECK (rlsuser = current_user); +ALTER TABLE current_check ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION rls_regress_user1; +-- Can SELECT even rows +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+------------------- + 2 | bcd | rls_regress_user1 + 4 | def | rls_regress_user1 +(2 rows) + +-- Cannot UPDATE row 2 +UPDATE current_check SET payload = payload || '_new' WHERE currentid = 2 RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +BEGIN; +DECLARE current_check_cursor SCROLL CURSOR FOR SELECT * FROM current_check; +-- Returns rows that can be seen according to SELECT policy, like plain SELECT +-- above (even rows) +FETCH ABSOLUTE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+------------------- + 2 | bcd | rls_regress_user1 +(1 row) + +-- Still cannot UPDATE row 2 through cursor +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +-- Can update row 4 through cursor, which is the next visible row +FETCH RELATIVE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+------------------- + 4 | def | rls_regress_user1 +(1 row) + +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+------------------- + 4 | def_new | rls_regress_user1 +(1 row) + +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+------------------- + 2 | bcd | rls_regress_user1 + 4 | def_new | rls_regress_user1 +(2 rows) + +-- Plan should be a subquery TID scan +EXPLAIN (COSTS OFF) UPDATE current_check SET payload = payload WHERE CURRENT OF current_check_cursor; + QUERY PLAN +--------------------------------------------------------------- + Update on current_check current_check_1 + -> Subquery Scan on current_check + -> LockRows + -> Tid Scan on current_check current_check_2 + TID Cond: CURRENT OF current_check_cursor + Filter: (currentid = 4) +(6 rows) + +-- Similarly can only delete row 4 +FETCH ABSOLUTE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+------------------- + 2 | bcd | rls_regress_user1 +(1 row) + +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+--------- +(0 rows) + +FETCH RELATIVE 1 FROM current_check_cursor; + currentid | payload | rlsuser +-----------+---------+------------------- + 4 | def | rls_regress_user1 +(1 row) + +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; + currentid | payload | rlsuser +-----------+---------+------------------- + 4 | def_new | rls_regress_user1 +(1 row) + +SELECT * FROM current_check; + currentid | payload | rlsuser +-----------+---------+------------------- + 2 | bcd | rls_regress_user1 +(1 row) + +COMMIT; -- -- Collation support -- diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 782824acfdae7..039070b85b733 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1087,6 +1087,55 @@ COPY copy_t FROM STDIN; --fail - permission denied. RESET SESSION AUTHORIZATION; DROP TABLE copy_t; +-- Check WHERE CURRENT OF +SET SESSION AUTHORIZATION rls_regress_user0; + +CREATE TABLE current_check (currentid int, payload text, rlsuser text); +GRANT ALL ON current_check TO PUBLIC; + +INSERT INTO current_check VALUES + (1, 'abc', 'rls_regress_user1'), + (2, 'bcd', 'rls_regress_user1'), + (3, 'cde', 'rls_regress_user1'), + (4, 'def', 'rls_regress_user1'); + +CREATE POLICY p1 ON current_check FOR SELECT USING (currentid % 2 = 0); +CREATE POLICY p2 ON current_check FOR DELETE USING (currentid = 4 AND rlsuser = current_user); +CREATE POLICY p3 ON current_check FOR UPDATE USING (currentid = 4) WITH CHECK (rlsuser = current_user); + +ALTER TABLE current_check ENABLE ROW LEVEL SECURITY; + +SET SESSION AUTHORIZATION rls_regress_user1; + +-- Can SELECT even rows +SELECT * FROM current_check; + +-- Cannot UPDATE row 2 +UPDATE current_check SET payload = payload || '_new' WHERE currentid = 2 RETURNING *; + +BEGIN; + +DECLARE current_check_cursor SCROLL CURSOR FOR SELECT * FROM current_check; +-- Returns rows that can be seen according to SELECT policy, like plain SELECT +-- above (even rows) +FETCH ABSOLUTE 1 FROM current_check_cursor; +-- Still cannot UPDATE row 2 through cursor +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; +-- Can update row 4 through cursor, which is the next visible row +FETCH RELATIVE 1 FROM current_check_cursor; +UPDATE current_check SET payload = payload || '_new' WHERE CURRENT OF current_check_cursor RETURNING *; +SELECT * FROM current_check; +-- Plan should be a subquery TID scan +EXPLAIN (COSTS OFF) UPDATE current_check SET payload = payload WHERE CURRENT OF current_check_cursor; +-- Similarly can only delete row 4 +FETCH ABSOLUTE 1 FROM current_check_cursor; +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; +FETCH RELATIVE 1 FROM current_check_cursor; +DELETE FROM current_check WHERE CURRENT OF current_check_cursor RETURNING *; +SELECT * FROM current_check; + +COMMIT; + -- -- Collation support -- From 6fcb337fa507723d6940ed8e5658d3da1fac6195 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 25 Jul 2015 14:39:00 -0400 Subject: [PATCH 085/442] Redesign tablesample method API, and do extensive code review. The original implementation of TABLESAMPLE modeled the tablesample method API on index access methods, which wasn't a good choice because, without specialized DDL commands, there's no way to build an extension that can implement a TSM. (Raw inserts into system catalogs are not an acceptable thing to do, because we can't undo them during DROP EXTENSION, nor will pg_upgrade behave sanely.) Instead adopt an API more like procedural language handlers or foreign data wrappers, wherein the only SQL-level support object needed is a single handler function identified by having a special return type. This lets us get rid of the supporting catalog altogether, so that no custom DDL support is needed for the feature. Adjust the API so that it can support non-constant tablesample arguments (the original coding assumed we could evaluate the argument expressions at ExecInitSampleScan time, which is undesirable even if it weren't outright unsafe), and discourage sampling methods from looking at invisible tuples. Make sure that the BERNOULLI and SYSTEM methods are genuinely repeatable within and across queries, as required by the SQL standard, and deal more honestly with methods that can't support that requirement. Make a full code-review pass over the tablesample additions, and fix assorted bugs, omissions, infelicities, and cosmetic issues (such as failure to put the added code stanzas in a consistent ordering). Improve EXPLAIN's output of tablesample plans, too. Back-patch to 9.5 so that we don't have to support the original API in production. --- .../pg_stat_statements/pg_stat_statements.c | 10 + contrib/tsm_system_rows/Makefile | 4 +- .../expected/tsm_system_rows.out | 96 +++- .../tsm_system_rows/sql/tsm_system_rows.sql | 41 +- .../tsm_system_rows/tsm_system_rows--1.0.sql | 43 +- contrib/tsm_system_rows/tsm_system_rows.c | 445 ++++++++++------- .../tsm_system_rows/tsm_system_rows.control | 2 +- contrib/tsm_system_time/Makefile | 4 +- .../expected/tsm_system_time.out | 138 ++++-- .../tsm_system_time/sql/tsm_system_time.sql | 53 +- .../tsm_system_time/tsm_system_time--1.0.sql | 38 +- contrib/tsm_system_time/tsm_system_time.c | 453 ++++++++++-------- .../tsm_system_time/tsm_system_time.control | 2 +- doc/src/sgml/catalogs.sgml | 120 ----- doc/src/sgml/datatype.sgml | 11 +- doc/src/sgml/postgres.sgml | 2 +- doc/src/sgml/ref/select.sgml | 126 ++--- doc/src/sgml/tablesample-method.sgml | 322 +++++++++---- doc/src/sgml/tsm-system-rows.sgml | 39 +- doc/src/sgml/tsm-system-time.sgml | 42 +- src/backend/access/heap/heapam.c | 61 ++- src/backend/access/tablesample/Makefile | 6 +- src/backend/access/tablesample/bernoulli.c | 326 +++++++------ src/backend/access/tablesample/system.c | 312 +++++++----- src/backend/access/tablesample/tablesample.c | 355 +------------- src/backend/catalog/Makefile | 5 +- src/backend/catalog/dependency.c | 8 + src/backend/commands/explain.c | 107 ++++- src/backend/executor/execAmi.c | 7 +- src/backend/executor/nodeSamplescan.c | 437 +++++++++++++++-- src/backend/nodes/copyfuncs.c | 115 +++-- src/backend/nodes/equalfuncs.c | 64 ++- src/backend/nodes/nodeFuncs.c | 75 +-- src/backend/nodes/outfuncs.c | 88 ++-- src/backend/nodes/readfuncs.c | 61 +-- src/backend/optimizer/path/allpaths.c | 100 +++- src/backend/optimizer/path/costsize.c | 57 +-- src/backend/optimizer/plan/createplan.c | 34 +- src/backend/optimizer/plan/initsplan.c | 4 +- src/backend/optimizer/plan/planner.c | 19 +- src/backend/optimizer/plan/setrefs.c | 18 +- src/backend/optimizer/plan/subselect.c | 7 +- src/backend/optimizer/prep/prepjointree.c | 13 +- src/backend/optimizer/util/pathnode.c | 8 +- src/backend/parser/gram.y | 27 +- src/backend/parser/parse_clause.c | 190 +++++--- src/backend/parser/parse_func.c | 144 ------ src/backend/rewrite/rewriteHandler.c | 4 + src/backend/utils/adt/pseudotypes.c | 27 ++ src/backend/utils/adt/ruleutils.c | 94 ++-- src/backend/utils/cache/lsyscache.c | 27 -- src/backend/utils/cache/syscache.c | 23 - src/backend/utils/errcodes.txt | 2 + src/backend/utils/misc/sampling.c | 2 +- src/bin/psql/tab-complete.c | 10 +- src/include/access/heapam.h | 4 +- src/include/access/tablesample.h | 61 --- src/include/access/tsmapi.h | 81 ++++ src/include/catalog/catversion.h | 2 +- src/include/catalog/indexing.h | 5 - src/include/catalog/pg_proc.h | 37 +- src/include/catalog/pg_tablesample_method.h | 81 ---- src/include/catalog/pg_type.h | 2 + src/include/executor/nodeSamplescan.h | 2 +- src/include/nodes/execnodes.h | 15 +- src/include/nodes/nodes.h | 9 +- src/include/nodes/parsenodes.h | 59 ++- src/include/nodes/plannodes.h | 7 +- src/include/optimizer/cost.h | 3 +- src/include/parser/parse_func.h | 5 - src/include/port.h | 4 - src/include/utils/builtins.h | 8 + src/include/utils/lsyscache.h | 1 - src/include/utils/syscache.h | 2 - src/port/erand48.c | 3 + src/test/regress/expected/rowsecurity.out | 24 +- src/test/regress/expected/rules.out | 4 + src/test/regress/expected/sanity_check.out | 1 - src/test/regress/expected/tablesample.out | 284 +++++++---- src/test/regress/output/misc.source | 5 +- src/test/regress/serial_schedule | 2 +- src/test/regress/sql/rowsecurity.sql | 8 +- src/test/regress/sql/tablesample.sql | 90 ++-- 83 files changed, 3116 insertions(+), 2521 deletions(-) delete mode 100644 src/include/access/tablesample.h create mode 100644 src/include/access/tsmapi.h delete mode 100644 src/include/catalog/pg_tablesample_method.h diff --git a/contrib/pg_stat_statements/pg_stat_statements.c b/contrib/pg_stat_statements/pg_stat_statements.c index 0eb991cdf0e86..59b8a2e2b3d9c 100644 --- a/contrib/pg_stat_statements/pg_stat_statements.c +++ b/contrib/pg_stat_statements/pg_stat_statements.c @@ -2297,6 +2297,7 @@ JumbleRangeTable(pgssJumbleState *jstate, List *rtable) { case RTE_RELATION: APP_JUMB(rte->relid); + JumbleExpr(jstate, (Node *) rte->tablesample); break; case RTE_SUBQUERY: JumbleQuery(jstate, rte->subquery); @@ -2767,6 +2768,15 @@ JumbleExpr(pgssJumbleState *jstate, Node *node) JumbleExpr(jstate, rtfunc->funcexpr); } break; + case T_TableSampleClause: + { + TableSampleClause *tsc = (TableSampleClause *) node; + + APP_JUMB(tsc->tsmhandler); + JumbleExpr(jstate, (Node *) tsc->args); + JumbleExpr(jstate, (Node *) tsc->repeatable); + } + break; default: /* Only a warning, since we can stumble along anyway */ elog(WARNING, "unrecognized node type: %d", diff --git a/contrib/tsm_system_rows/Makefile b/contrib/tsm_system_rows/Makefile index 700ab276db2e9..609af463c5c24 100644 --- a/contrib/tsm_system_rows/Makefile +++ b/contrib/tsm_system_rows/Makefile @@ -1,8 +1,8 @@ -# src/test/modules/tsm_system_rows/Makefile +# contrib/tsm_system_rows/Makefile MODULE_big = tsm_system_rows OBJS = tsm_system_rows.o $(WIN32RES) -PGFILEDESC = "tsm_system_rows - SYSTEM TABLESAMPLE method which accepts number of rows as a limit" +PGFILEDESC = "tsm_system_rows - TABLESAMPLE method which accepts number of rows as a limit" EXTENSION = tsm_system_rows DATA = tsm_system_rows--1.0.sql diff --git a/contrib/tsm_system_rows/expected/tsm_system_rows.out b/contrib/tsm_system_rows/expected/tsm_system_rows.out index 7e0f72b02b7df..87b4a8fc64bd2 100644 --- a/contrib/tsm_system_rows/expected/tsm_system_rows.out +++ b/contrib/tsm_system_rows/expected/tsm_system_rows.out @@ -1,31 +1,83 @@ CREATE EXTENSION tsm_system_rows; -CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages -INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i; +CREATE TABLE test_tablesample (id int, name text); +INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) + FROM generate_series(0, 30) s(i); ANALYZE test_tablesample; -SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (1000); +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (0); + count +------- + 0 +(1 row) + +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (1); + count +------- + 1 +(1 row) + +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (10); + count +------- + 10 +(1 row) + +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (100); count ------- 31 (1 row) -SELECT id FROM test_tablesample TABLESAMPLE system_rows (8) REPEATABLE (5432); - id ----- - 7 - 14 - 21 - 28 - 4 - 11 - 18 - 25 -(8 rows) - -EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_rows (20) REPEATABLE (10); - QUERY PLAN ------------------------------------------------------------------------------------ - Sample Scan (system_rows) on test_tablesample (cost=0.00..80.20 rows=20 width=4) +-- bad parameters should get through planning, but not execution: +EXPLAIN (COSTS OFF) +SELECT id FROM test_tablesample TABLESAMPLE system_rows (-1); + QUERY PLAN +---------------------------------------- + Sample Scan on test_tablesample + Sampling: system_rows ('-1'::bigint) +(2 rows) + +SELECT id FROM test_tablesample TABLESAMPLE system_rows (-1); +ERROR: sample size must not be negative +-- fail, this method is not repeatable: +SELECT * FROM test_tablesample TABLESAMPLE system_rows (10) REPEATABLE (0); +ERROR: tablesample method system_rows does not support REPEATABLE +LINE 1: SELECT * FROM test_tablesample TABLESAMPLE system_rows (10) ... + ^ +-- but a join should be allowed: +EXPLAIN (COSTS OFF) +SELECT * FROM + (VALUES (0),(10),(100)) v(nrows), + LATERAL (SELECT count(*) FROM test_tablesample + TABLESAMPLE system_rows (nrows)) ss; + QUERY PLAN +---------------------------------------------------------- + Nested Loop + -> Values Scan on "*VALUES*" + -> Aggregate + -> Sample Scan on test_tablesample + Sampling: system_rows ("*VALUES*".column1) +(5 rows) + +SELECT * FROM + (VALUES (0),(10),(100)) v(nrows), + LATERAL (SELECT count(*) FROM test_tablesample + TABLESAMPLE system_rows (nrows)) ss; + nrows | count +-------+------- + 0 | 0 + 10 | 10 + 100 | 31 +(3 rows) + +CREATE VIEW vv AS + SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (20); +SELECT * FROM vv; + count +------- + 20 (1 row) --- done -DROP TABLE test_tablesample CASCADE; +DROP EXTENSION tsm_system_rows; -- fail, view depends on extension +ERROR: cannot drop extension tsm_system_rows because other objects depend on it +DETAIL: view vv depends on function system_rows(internal) +HINT: Use DROP ... CASCADE to drop the dependent objects too. diff --git a/contrib/tsm_system_rows/sql/tsm_system_rows.sql b/contrib/tsm_system_rows/sql/tsm_system_rows.sql index bd812220ed98d..e3ab4204eea5a 100644 --- a/contrib/tsm_system_rows/sql/tsm_system_rows.sql +++ b/contrib/tsm_system_rows/sql/tsm_system_rows.sql @@ -1,14 +1,39 @@ CREATE EXTENSION tsm_system_rows; -CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages - -INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i; +CREATE TABLE test_tablesample (id int, name text); +INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) + FROM generate_series(0, 30) s(i); ANALYZE test_tablesample; -SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (1000); -SELECT id FROM test_tablesample TABLESAMPLE system_rows (8) REPEATABLE (5432); +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (0); +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (1); +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (10); +SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (100); + +-- bad parameters should get through planning, but not execution: +EXPLAIN (COSTS OFF) +SELECT id FROM test_tablesample TABLESAMPLE system_rows (-1); + +SELECT id FROM test_tablesample TABLESAMPLE system_rows (-1); + +-- fail, this method is not repeatable: +SELECT * FROM test_tablesample TABLESAMPLE system_rows (10) REPEATABLE (0); + +-- but a join should be allowed: +EXPLAIN (COSTS OFF) +SELECT * FROM + (VALUES (0),(10),(100)) v(nrows), + LATERAL (SELECT count(*) FROM test_tablesample + TABLESAMPLE system_rows (nrows)) ss; + +SELECT * FROM + (VALUES (0),(10),(100)) v(nrows), + LATERAL (SELECT count(*) FROM test_tablesample + TABLESAMPLE system_rows (nrows)) ss; + +CREATE VIEW vv AS + SELECT count(*) FROM test_tablesample TABLESAMPLE system_rows (20); -EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_rows (20) REPEATABLE (10); +SELECT * FROM vv; --- done -DROP TABLE test_tablesample CASCADE; +DROP EXTENSION tsm_system_rows; -- fail, view depends on extension diff --git a/contrib/tsm_system_rows/tsm_system_rows--1.0.sql b/contrib/tsm_system_rows/tsm_system_rows--1.0.sql index 1a29c584b5a83..de508ed72675f 100644 --- a/contrib/tsm_system_rows/tsm_system_rows--1.0.sql +++ b/contrib/tsm_system_rows/tsm_system_rows--1.0.sql @@ -1,44 +1,9 @@ -/* src/test/modules/tablesample/tsm_system_rows--1.0.sql */ +/* contrib/tsm_system_rows/tsm_system_rows--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION tsm_system_rows" to load this file. \quit -CREATE FUNCTION tsm_system_rows_init(internal, int4, int4) -RETURNS void -AS 'MODULE_PATHNAME' +CREATE FUNCTION system_rows(internal) +RETURNS tsm_handler +AS 'MODULE_PATHNAME', 'tsm_system_rows_handler' LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_rows_nextblock(internal) -RETURNS int4 -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_rows_nexttuple(internal, int4, int2) -RETURNS int2 -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_rows_examinetuple(internal, int4, internal, bool) -RETURNS bool -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_rows_end(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_rows_reset(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_rows_cost(internal, internal, internal, internal, internal, internal, internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -INSERT INTO pg_tablesample_method VALUES('system_rows', false, true, - 'tsm_system_rows_init', 'tsm_system_rows_nextblock', - 'tsm_system_rows_nexttuple', 'tsm_system_rows_examinetuple', - 'tsm_system_rows_end', 'tsm_system_rows_reset', 'tsm_system_rows_cost'); diff --git a/contrib/tsm_system_rows/tsm_system_rows.c b/contrib/tsm_system_rows/tsm_system_rows.c index e325eaff49897..f251e3e5e06dd 100644 --- a/contrib/tsm_system_rows/tsm_system_rows.c +++ b/contrib/tsm_system_rows/tsm_system_rows.c @@ -1,240 +1,356 @@ /*------------------------------------------------------------------------- * * tsm_system_rows.c - * interface routines for system_rows tablesample method + * support routines for SYSTEM_ROWS tablesample method * + * The desire here is to produce a random sample with a given number of rows + * (or the whole relation, if that is fewer rows). We use a block-sampling + * approach. To ensure that the whole relation will be visited if necessary, + * we start at a randomly chosen block and then advance with a stride that + * is randomly chosen but is relatively prime to the relation's nblocks. * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Because of the dependence on nblocks, this method cannot be repeatable + * across queries. (Even if the user hasn't explicitly changed the relation, + * maintenance activities such as autovacuum might change nblocks.) However, + * we can at least make it repeatable across scans, by determining the + * sampling pattern only once on the first scan. This means that rescans + * won't visit blocks added after the first scan, but that is fine since + * such blocks shouldn't contain any visible tuples anyway. + * + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * contrib/tsm_system_rows_rowlimit/tsm_system_rows.c + * contrib/tsm_system_rows/tsm_system_rows.c * *------------------------------------------------------------------------- */ #include "postgres.h" -#include "fmgr.h" - -#include "access/tablesample.h" #include "access/relscan.h" +#include "access/tsmapi.h" +#include "catalog/pg_type.h" #include "miscadmin.h" -#include "nodes/execnodes.h" -#include "nodes/relation.h" #include "optimizer/clauses.h" -#include "storage/bufmgr.h" +#include "optimizer/cost.h" #include "utils/sampling.h" PG_MODULE_MAGIC; -/* - * State - */ +PG_FUNCTION_INFO_V1(tsm_system_rows_handler); + + +/* Private state */ typedef struct { - SamplerRandomState randstate; uint32 seed; /* random seed */ - BlockNumber nblocks; /* number of block in relation */ - int32 ntuples; /* number of tuples to return */ - int32 donetuples; /* tuples already returned */ + int64 ntuples; /* number of tuples to return */ + int64 donetuples; /* number of tuples already returned */ OffsetNumber lt; /* last tuple returned from current block */ - BlockNumber step; /* step size */ + BlockNumber doneblocks; /* number of already-scanned blocks */ BlockNumber lb; /* last block visited */ - BlockNumber doneblocks; /* number of already returned blocks */ -} SystemSamplerData; - - -PG_FUNCTION_INFO_V1(tsm_system_rows_init); -PG_FUNCTION_INFO_V1(tsm_system_rows_nextblock); -PG_FUNCTION_INFO_V1(tsm_system_rows_nexttuple); -PG_FUNCTION_INFO_V1(tsm_system_rows_examinetuple); -PG_FUNCTION_INFO_V1(tsm_system_rows_end); -PG_FUNCTION_INFO_V1(tsm_system_rows_reset); -PG_FUNCTION_INFO_V1(tsm_system_rows_cost); - + /* these three values are not changed during a rescan: */ + BlockNumber nblocks; /* number of blocks in relation */ + BlockNumber firstblock; /* first block to sample from */ + BlockNumber step; /* step size, or 0 if not set yet */ +} SystemRowsSamplerData; + +static void system_rows_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples); +static void system_rows_initsamplescan(SampleScanState *node, + int eflags); +static void system_rows_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed); +static BlockNumber system_rows_nextsampleblock(SampleScanState *node); +static OffsetNumber system_rows_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset); +static bool SampleOffsetVisible(OffsetNumber tupoffset, HeapScanDesc scan); static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate); + /* - * Initializes the state. + * Create a TsmRoutine descriptor for the SYSTEM_ROWS method. */ Datum -tsm_system_rows_init(PG_FUNCTION_ARGS) +tsm_system_rows_handler(PG_FUNCTION_ARGS) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - uint32 seed = PG_GETARG_UINT32(1); - int32 ntuples = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2); - HeapScanDesc scan = tsdesc->heapScan; - SystemSamplerData *sampler; + TsmRoutine *tsm = makeNode(TsmRoutine); - if (ntuples < 1) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("invalid sample size"), - errhint("Sample size must be positive integer value."))); + tsm->parameterTypes = list_make1_oid(INT8OID); - sampler = palloc0(sizeof(SystemSamplerData)); + /* See notes at head of file */ + tsm->repeatable_across_queries = false; + tsm->repeatable_across_scans = true; - /* Remember initial values for reinit */ - sampler->seed = seed; - sampler->nblocks = scan->rs_nblocks; - sampler->ntuples = ntuples; - sampler->donetuples = 0; - sampler->lt = InvalidOffsetNumber; - sampler->doneblocks = 0; - - sampler_random_init_state(sampler->seed, sampler->randstate); - - /* Find relative prime as step size for linear probing. */ - sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate); - - /* - * Randomize start position so that blocks close to step size don't have - * higher probability of being chosen on very short scan. - */ - sampler->lb = sampler_random_fract(sampler->randstate) * - (sampler->nblocks / sampler->step); + tsm->SampleScanGetSampleSize = system_rows_samplescangetsamplesize; + tsm->InitSampleScan = system_rows_initsamplescan; + tsm->BeginSampleScan = system_rows_beginsamplescan; + tsm->NextSampleBlock = system_rows_nextsampleblock; + tsm->NextSampleTuple = system_rows_nextsampletuple; + tsm->EndSampleScan = NULL; - tsdesc->tsmdata = (void *) sampler; - - PG_RETURN_VOID(); + PG_RETURN_POINTER(tsm); } /* - * Get next block number or InvalidBlockNumber when we're done. - * - * Uses linear probing algorithm for picking next block. + * Sample size estimation. */ -Datum -tsm_system_rows_nextblock(PG_FUNCTION_ARGS) +static void +system_rows_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; + Node *limitnode; + int64 ntuples; + double npages; - sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks; - sampler->doneblocks++; + /* Try to extract an estimate for the limit rowcount */ + limitnode = (Node *) linitial(paramexprs); + limitnode = estimate_expression_value(root, limitnode); - /* All blocks have been read, we're done */ - if (sampler->doneblocks > sampler->nblocks || - sampler->donetuples >= sampler->ntuples) - PG_RETURN_UINT32(InvalidBlockNumber); + if (IsA(limitnode, Const) && + !((Const *) limitnode)->constisnull) + { + ntuples = DatumGetInt64(((Const *) limitnode)->constvalue); + if (ntuples < 0) + { + /* Default ntuples if the value is bogus */ + ntuples = 1000; + } + } + else + { + /* Default ntuples if we didn't obtain a non-null Const */ + ntuples = 1000; + } - PG_RETURN_UINT32(sampler->lb); -} + /* Clamp to the estimated relation size */ + if (ntuples > baserel->tuples) + ntuples = (int64) baserel->tuples; + ntuples = clamp_row_est(ntuples); -/* - * Get next tuple offset in current block or InvalidOffsetNumber if we are done - * with this block. - */ -Datum -tsm_system_rows_nexttuple(PG_FUNCTION_ARGS) -{ - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - OffsetNumber maxoffset = PG_GETARG_UINT16(2); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; - OffsetNumber tupoffset = sampler->lt; + if (baserel->tuples > 0 && baserel->pages > 0) + { + /* Estimate number of pages visited based on tuple density */ + double density = baserel->tuples / (double) baserel->pages; - if (tupoffset == InvalidOffsetNumber) - tupoffset = FirstOffsetNumber; + npages = ntuples / density; + } else - tupoffset++; - - if (tupoffset > maxoffset || - sampler->donetuples >= sampler->ntuples) - tupoffset = InvalidOffsetNumber; + { + /* For lack of data, assume one tuple per page */ + npages = ntuples; + } - sampler->lt = tupoffset; + /* Clamp to sane value */ + npages = clamp_row_est(Min((double) baserel->pages, npages)); - PG_RETURN_UINT16(tupoffset); + *pages = npages; + *tuples = ntuples; } /* - * Examine tuple and decide if it should be returned. + * Initialize during executor setup. */ -Datum -tsm_system_rows_examinetuple(PG_FUNCTION_ARGS) +static void +system_rows_initsamplescan(SampleScanState *node, int eflags) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - bool visible = PG_GETARG_BOOL(3); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; - - if (!visible) - PG_RETURN_BOOL(false); - - sampler->donetuples++; - - PG_RETURN_BOOL(true); + node->tsm_state = palloc0(sizeof(SystemRowsSamplerData)); + /* Note the above leaves tsm_state->step equal to zero */ } /* - * Cleanup method. + * Examine parameters and prepare for a sample scan. */ -Datum -tsm_system_rows_end(PG_FUNCTION_ARGS) +static void +system_rows_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); + SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state; + int64 ntuples = DatumGetInt64(params[0]); + + if (ntuples < 0) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), + errmsg("sample size must not be negative"))); - pfree(tsdesc->tsmdata); + sampler->seed = seed; + sampler->ntuples = ntuples; + sampler->donetuples = 0; + sampler->lt = InvalidOffsetNumber; + sampler->doneblocks = 0; + /* lb will be initialized during first NextSampleBlock call */ + /* we intentionally do not change nblocks/firstblock/step here */ - PG_RETURN_VOID(); + /* + * We *must* use pagemode visibility checking in this module, so force + * that even though it's currently default. + */ + node->use_pagemode = true; } /* - * Reset state (called by ReScan). + * Select next block to sample. + * + * Uses linear probing algorithm for picking next block. */ -Datum -tsm_system_rows_reset(PG_FUNCTION_ARGS) +static BlockNumber +system_rows_nextsampleblock(SampleScanState *node) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; + SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state; + HeapScanDesc scan = node->ss.ss_currentScanDesc; - sampler->lt = InvalidOffsetNumber; - sampler->donetuples = 0; - sampler->doneblocks = 0; + /* First call within scan? */ + if (sampler->doneblocks == 0) + { + /* First scan within query? */ + if (sampler->step == 0) + { + /* Initialize now that we have scan descriptor */ + SamplerRandomState randstate; + + /* If relation is empty, there's nothing to scan */ + if (scan->rs_nblocks == 0) + return InvalidBlockNumber; + + /* We only need an RNG during this setup step */ + sampler_random_init_state(sampler->seed, randstate); + + /* Compute nblocks/firstblock/step only once per query */ + sampler->nblocks = scan->rs_nblocks; - sampler_random_init_state(sampler->seed, sampler->randstate); - sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate); - sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step); + /* Choose random starting block within the relation */ + /* (Actually this is the predecessor of the first block visited) */ + sampler->firstblock = sampler_random_fract(randstate) * + sampler->nblocks; + + /* Find relative prime as step size for linear probing */ + sampler->step = random_relative_prime(sampler->nblocks, randstate); + } + + /* Reinitialize lb */ + sampler->lb = sampler->firstblock; + } + + /* If we've read all blocks or returned all needed tuples, we're done */ + if (++sampler->doneblocks > sampler->nblocks || + sampler->donetuples >= sampler->ntuples) + return InvalidBlockNumber; + + /* + * It's probably impossible for scan->rs_nblocks to decrease between scans + * within a query; but just in case, loop until we select a block number + * less than scan->rs_nblocks. We don't care if scan->rs_nblocks has + * increased since the first scan. + */ + do + { + /* Advance lb, using uint64 arithmetic to forestall overflow */ + sampler->lb = ((uint64) sampler->lb + sampler->step) % sampler->nblocks; + } while (sampler->lb >= scan->rs_nblocks); - PG_RETURN_VOID(); + return sampler->lb; } /* - * Costing function. + * Select next sampled tuple in current block. + * + * In block sampling, we just want to sample all the tuples in each selected + * block. + * + * When we reach end of the block, return InvalidOffsetNumber which tells + * SampleScan to go to next block. */ -Datum -tsm_system_rows_cost(PG_FUNCTION_ARGS) +static OffsetNumber +system_rows_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset) { - PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); - Path *path = (Path *) PG_GETARG_POINTER(1); - RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2); - List *args = (List *) PG_GETARG_POINTER(3); - BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4); - double *tuples = (double *) PG_GETARG_POINTER(5); - Node *limitnode; - int32 ntuples; + SystemRowsSamplerData *sampler = (SystemRowsSamplerData *) node->tsm_state; + HeapScanDesc scan = node->ss.ss_currentScanDesc; + OffsetNumber tupoffset = sampler->lt; - limitnode = linitial(args); - limitnode = estimate_expression_value(root, limitnode); + /* Quit if we've returned all needed tuples */ + if (sampler->donetuples >= sampler->ntuples) + return InvalidOffsetNumber; - if (IsA(limitnode, RelabelType)) - limitnode = (Node *) ((RelabelType *) limitnode)->arg; + /* + * Because we should only count visible tuples as being returned, we need + * to search for a visible tuple rather than just let the core code do it. + */ - if (IsA(limitnode, Const)) - ntuples = DatumGetInt32(((Const *) limitnode)->constvalue); - else + /* We rely on the data accumulated in pagemode access */ + Assert(scan->rs_pageatatime); + for (;;) { - /* Default ntuples if the estimation didn't return Const. */ - ntuples = 1000; + /* Advance to next possible offset on page */ + if (tupoffset == InvalidOffsetNumber) + tupoffset = FirstOffsetNumber; + else + tupoffset++; + + /* Done? */ + if (tupoffset > maxoffset) + { + tupoffset = InvalidOffsetNumber; + break; + } + + /* Found a candidate? */ + if (SampleOffsetVisible(tupoffset, scan)) + { + sampler->donetuples++; + break; + } } - *pages = Min(baserel->pages, ntuples); - *tuples = ntuples; - path->rows = *tuples; + sampler->lt = tupoffset; - PG_RETURN_VOID(); + return tupoffset; } +/* + * Check if tuple offset is visible + * + * In pageatatime mode, heapgetpage() already did visibility checks, + * so just look at the info it left in rs_vistuples[]. + */ +static bool +SampleOffsetVisible(OffsetNumber tupoffset, HeapScanDesc scan) +{ + int start = 0, + end = scan->rs_ntuples - 1; + + while (start <= end) + { + int mid = (start + end) / 2; + OffsetNumber curoffset = scan->rs_vistuples[mid]; + + if (tupoffset == curoffset) + return true; + else if (tupoffset < curoffset) + end = mid - 1; + else + start = mid + 1; + } + + return false; +} +/* + * Compute greatest common divisor of two uint32's. + */ static uint32 gcd(uint32 a, uint32 b) { @@ -250,22 +366,29 @@ gcd(uint32 a, uint32 b) return b; } +/* + * Pick a random value less than and relatively prime to n, if possible + * (else return 1). + */ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate) { - /* Pick random starting number, with some limits on what it can be. */ - uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4, - t; + uint32 r; + + /* Safety check to avoid infinite loop or zero result for small n. */ + if (n <= 1) + return 1; /* * This should only take 2 or 3 iterations as the probability of 2 numbers - * being relatively prime is ~61%. + * being relatively prime is ~61%; but just in case, we'll include a + * CHECK_FOR_INTERRUPTS in the loop. */ - while ((t = gcd(r, n)) > 1) + do { CHECK_FOR_INTERRUPTS(); - r /= t; - } + r = (uint32) (sampler_random_fract(randstate) * n); + } while (r == 0 || gcd(r, n) > 1); return r; } diff --git a/contrib/tsm_system_rows/tsm_system_rows.control b/contrib/tsm_system_rows/tsm_system_rows.control index 84ea7adb49a26..4bd0232f97215 100644 --- a/contrib/tsm_system_rows/tsm_system_rows.control +++ b/contrib/tsm_system_rows/tsm_system_rows.control @@ -1,5 +1,5 @@ # tsm_system_rows extension -comment = 'SYSTEM TABLESAMPLE method which accepts number rows as a limit' +comment = 'TABLESAMPLE method which accepts number of rows as a limit' default_version = '1.0' module_pathname = '$libdir/tsm_system_rows' relocatable = true diff --git a/contrib/tsm_system_time/Makefile b/contrib/tsm_system_time/Makefile index c42c1c6bb61f2..168becf54e2ff 100644 --- a/contrib/tsm_system_time/Makefile +++ b/contrib/tsm_system_time/Makefile @@ -1,8 +1,8 @@ -# src/test/modules/tsm_system_time/Makefile +# contrib/tsm_system_time/Makefile MODULE_big = tsm_system_time OBJS = tsm_system_time.o $(WIN32RES) -PGFILEDESC = "tsm_system_time - SYSTEM TABLESAMPLE method which accepts number rows of as a limit" +PGFILEDESC = "tsm_system_time - TABLESAMPLE method which accepts time in milliseconds as a limit" EXTENSION = tsm_system_time DATA = tsm_system_time--1.0.sql diff --git a/contrib/tsm_system_time/expected/tsm_system_time.out b/contrib/tsm_system_time/expected/tsm_system_time.out index 32ad03c4bdcef..ac44f30be9038 100644 --- a/contrib/tsm_system_time/expected/tsm_system_time.out +++ b/contrib/tsm_system_time/expected/tsm_system_time.out @@ -1,54 +1,100 @@ CREATE EXTENSION tsm_system_time; -CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages -INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i; +CREATE TABLE test_tablesample (id int, name text); +INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) + FROM generate_series(0, 30) s(i); ANALYZE test_tablesample; -SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (1000); +-- It's a bit tricky to test SYSTEM_TIME in a platform-independent way. +-- We can test the zero-time corner case ... +SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (0); count ------- - 31 + 0 (1 row) -SELECT id FROM test_tablesample TABLESAMPLE system_time (1000) REPEATABLE (5432); - id ----- - 7 - 14 - 21 - 28 - 4 - 11 - 18 - 25 - 1 - 8 - 15 - 22 - 29 - 5 - 12 - 19 - 26 - 2 - 9 - 16 - 23 - 30 - 6 - 13 - 20 - 27 - 3 - 10 - 17 - 24 - 0 -(31 rows) - -EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_time (100) REPEATABLE (10); - QUERY PLAN ------------------------------------------------------------------------------------- - Sample Scan (system_time) on test_tablesample (cost=0.00..100.25 rows=25 width=4) +-- ... and we assume that this will finish before running out of time: +SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (100000); + count +------- + 31 (1 row) --- done -DROP TABLE test_tablesample CASCADE; +-- bad parameters should get through planning, but not execution: +EXPLAIN (COSTS OFF) +SELECT id FROM test_tablesample TABLESAMPLE system_time (-1); + QUERY PLAN +-------------------------------------------------- + Sample Scan on test_tablesample + Sampling: system_time ('-1'::double precision) +(2 rows) + +SELECT id FROM test_tablesample TABLESAMPLE system_time (-1); +ERROR: sample collection time must not be negative +-- fail, this method is not repeatable: +SELECT * FROM test_tablesample TABLESAMPLE system_time (10) REPEATABLE (0); +ERROR: tablesample method system_time does not support REPEATABLE +LINE 1: SELECT * FROM test_tablesample TABLESAMPLE system_time (10) ... + ^ +-- since it's not repeatable, we expect a Materialize node in these plans: +EXPLAIN (COSTS OFF) +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (100000)) ss; + QUERY PLAN +------------------------------------------------------------------------ + Nested Loop + -> Aggregate + -> Materialize + -> Sample Scan on test_tablesample + Sampling: system_time ('100000'::double precision) + -> Values Scan on "*VALUES*" +(6 rows) + +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (100000)) ss; + time | count +--------+------- + 0 | 31 + 100000 | 31 +(2 rows) + +EXPLAIN (COSTS OFF) +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (time)) ss; + QUERY PLAN +---------------------------------------------------------------- + Nested Loop + -> Values Scan on "*VALUES*" + -> Aggregate + -> Materialize + -> Sample Scan on test_tablesample + Sampling: system_time ("*VALUES*".column1) +(6 rows) + +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (time)) ss; + time | count +--------+------- + 0 | 0 + 100000 | 31 +(2 rows) + +CREATE VIEW vv AS + SELECT * FROM test_tablesample TABLESAMPLE system_time (20); +EXPLAIN (COSTS OFF) SELECT * FROM vv; + QUERY PLAN +-------------------------------------------------- + Sample Scan on test_tablesample + Sampling: system_time ('20'::double precision) +(2 rows) + +DROP EXTENSION tsm_system_time; -- fail, view depends on extension +ERROR: cannot drop extension tsm_system_time because other objects depend on it +DETAIL: view vv depends on function system_time(internal) +HINT: Use DROP ... CASCADE to drop the dependent objects too. diff --git a/contrib/tsm_system_time/sql/tsm_system_time.sql b/contrib/tsm_system_time/sql/tsm_system_time.sql index 68dbbf98afd2d..117de163d8505 100644 --- a/contrib/tsm_system_time/sql/tsm_system_time.sql +++ b/contrib/tsm_system_time/sql/tsm_system_time.sql @@ -1,14 +1,51 @@ CREATE EXTENSION tsm_system_time; -CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages - -INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) FROM generate_series(0, 30) s(i) ORDER BY i; +CREATE TABLE test_tablesample (id int, name text); +INSERT INTO test_tablesample SELECT i, repeat(i::text, 1000) + FROM generate_series(0, 30) s(i); ANALYZE test_tablesample; -SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (1000); -SELECT id FROM test_tablesample TABLESAMPLE system_time (1000) REPEATABLE (5432); +-- It's a bit tricky to test SYSTEM_TIME in a platform-independent way. +-- We can test the zero-time corner case ... +SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (0); +-- ... and we assume that this will finish before running out of time: +SELECT count(*) FROM test_tablesample TABLESAMPLE system_time (100000); + +-- bad parameters should get through planning, but not execution: +EXPLAIN (COSTS OFF) +SELECT id FROM test_tablesample TABLESAMPLE system_time (-1); + +SELECT id FROM test_tablesample TABLESAMPLE system_time (-1); + +-- fail, this method is not repeatable: +SELECT * FROM test_tablesample TABLESAMPLE system_time (10) REPEATABLE (0); + +-- since it's not repeatable, we expect a Materialize node in these plans: +EXPLAIN (COSTS OFF) +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (100000)) ss; + +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (100000)) ss; + +EXPLAIN (COSTS OFF) +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (time)) ss; + +SELECT * FROM + (VALUES (0),(100000)) v(time), + LATERAL (SELECT COUNT(*) FROM test_tablesample + TABLESAMPLE system_time (time)) ss; + +CREATE VIEW vv AS + SELECT * FROM test_tablesample TABLESAMPLE system_time (20); -EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE system_time (100) REPEATABLE (10); +EXPLAIN (COSTS OFF) SELECT * FROM vv; --- done -DROP TABLE test_tablesample CASCADE; +DROP EXTENSION tsm_system_time; -- fail, view depends on extension diff --git a/contrib/tsm_system_time/tsm_system_time--1.0.sql b/contrib/tsm_system_time/tsm_system_time--1.0.sql index 1f390d6ed7aca..c59d2e84efdab 100644 --- a/contrib/tsm_system_time/tsm_system_time--1.0.sql +++ b/contrib/tsm_system_time/tsm_system_time--1.0.sql @@ -1,39 +1,9 @@ -/* src/test/modules/tablesample/tsm_system_time--1.0.sql */ +/* contrib/tsm_system_time/tsm_system_time--1.0.sql */ -- complain if script is sourced in psql, rather than via CREATE EXTENSION \echo Use "CREATE EXTENSION tsm_system_time" to load this file. \quit -CREATE FUNCTION tsm_system_time_init(internal, int4, int4) -RETURNS void -AS 'MODULE_PATHNAME' +CREATE FUNCTION system_time(internal) +RETURNS tsm_handler +AS 'MODULE_PATHNAME', 'tsm_system_time_handler' LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_time_nextblock(internal) -RETURNS int4 -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_time_nexttuple(internal, int4, int2) -RETURNS int2 -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_time_end(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_time_reset(internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -CREATE FUNCTION tsm_system_time_cost(internal, internal, internal, internal, internal, internal, internal) -RETURNS void -AS 'MODULE_PATHNAME' -LANGUAGE C STRICT; - -INSERT INTO pg_tablesample_method VALUES('system_time', false, true, - 'tsm_system_time_init', 'tsm_system_time_nextblock', - 'tsm_system_time_nexttuple', '-', 'tsm_system_time_end', - 'tsm_system_time_reset', 'tsm_system_time_cost'); diff --git a/contrib/tsm_system_time/tsm_system_time.c b/contrib/tsm_system_time/tsm_system_time.c index 7708fc0761748..83f1455c5fa24 100644 --- a/contrib/tsm_system_time/tsm_system_time.c +++ b/contrib/tsm_system_time/tsm_system_time.c @@ -1,286 +1,320 @@ /*------------------------------------------------------------------------- * * tsm_system_time.c - * interface routines for system_time tablesample method + * support routines for SYSTEM_TIME tablesample method * + * The desire here is to produce a random sample with as many rows as possible + * in no more than the specified amount of time. We use a block-sampling + * approach. To ensure that the whole relation will be visited if necessary, + * we start at a randomly chosen block and then advance with a stride that + * is randomly chosen but is relatively prime to the relation's nblocks. * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Because of the time dependence, this method is necessarily unrepeatable. + * However, we do what we can to reduce surprising behavior by selecting + * the sampling pattern just once per query, much as in tsm_system_rows. + * + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * contrib/tsm_system_time_rowlimit/tsm_system_time.c + * contrib/tsm_system_time/tsm_system_time.c * *------------------------------------------------------------------------- */ #include "postgres.h" -#include "fmgr.h" +#ifdef _MSC_VER +#include /* for _isnan */ +#endif +#include -#include "access/tablesample.h" #include "access/relscan.h" +#include "access/tsmapi.h" +#include "catalog/pg_type.h" #include "miscadmin.h" -#include "nodes/execnodes.h" -#include "nodes/relation.h" #include "optimizer/clauses.h" -#include "storage/bufmgr.h" +#include "optimizer/cost.h" #include "utils/sampling.h" #include "utils/spccache.h" -#include "utils/timestamp.h" PG_MODULE_MAGIC; -/* - * State - */ +PG_FUNCTION_INFO_V1(tsm_system_time_handler); + + +/* Private state */ typedef struct { - SamplerRandomState randstate; uint32 seed; /* random seed */ - BlockNumber nblocks; /* number of block in relation */ - int32 time; /* time limit for sampling */ - TimestampTz start_time; /* start time of sampling */ - TimestampTz end_time; /* end time of sampling */ + double millis; /* time limit for sampling */ + instr_time start_time; /* scan start time */ OffsetNumber lt; /* last tuple returned from current block */ - BlockNumber step; /* step size */ + BlockNumber doneblocks; /* number of already-scanned blocks */ BlockNumber lb; /* last block visited */ - BlockNumber estblocks; /* estimated number of returned blocks - * (moving) */ - BlockNumber doneblocks; /* number of already returned blocks */ -} SystemSamplerData; - - -PG_FUNCTION_INFO_V1(tsm_system_time_init); -PG_FUNCTION_INFO_V1(tsm_system_time_nextblock); -PG_FUNCTION_INFO_V1(tsm_system_time_nexttuple); -PG_FUNCTION_INFO_V1(tsm_system_time_end); -PG_FUNCTION_INFO_V1(tsm_system_time_reset); -PG_FUNCTION_INFO_V1(tsm_system_time_cost); - + /* these three values are not changed during a rescan: */ + BlockNumber nblocks; /* number of blocks in relation */ + BlockNumber firstblock; /* first block to sample from */ + BlockNumber step; /* step size, or 0 if not set yet */ +} SystemTimeSamplerData; + +static void system_time_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples); +static void system_time_initsamplescan(SampleScanState *node, + int eflags); +static void system_time_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed); +static BlockNumber system_time_nextsampleblock(SampleScanState *node); +static OffsetNumber system_time_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset); static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate); + /* - * Initializes the state. + * Create a TsmRoutine descriptor for the SYSTEM_TIME method. */ Datum -tsm_system_time_init(PG_FUNCTION_ARGS) +tsm_system_time_handler(PG_FUNCTION_ARGS) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - uint32 seed = PG_GETARG_UINT32(1); - int32 time = PG_ARGISNULL(2) ? -1 : PG_GETARG_INT32(2); - HeapScanDesc scan = tsdesc->heapScan; - SystemSamplerData *sampler; - - if (time < 1) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("invalid time limit"), - errhint("Time limit must be positive integer value."))); + TsmRoutine *tsm = makeNode(TsmRoutine); - sampler = palloc0(sizeof(SystemSamplerData)); + tsm->parameterTypes = list_make1_oid(FLOAT8OID); - /* Remember initial values for reinit */ - sampler->seed = seed; - sampler->nblocks = scan->rs_nblocks; - sampler->lt = InvalidOffsetNumber; - sampler->estblocks = 2; - sampler->doneblocks = 0; - sampler->time = time; - sampler->start_time = GetCurrentTimestamp(); - sampler->end_time = TimestampTzPlusMilliseconds(sampler->start_time, - sampler->time); + /* See notes at head of file */ + tsm->repeatable_across_queries = false; + tsm->repeatable_across_scans = false; - sampler_random_init_state(sampler->seed, sampler->randstate); + tsm->SampleScanGetSampleSize = system_time_samplescangetsamplesize; + tsm->InitSampleScan = system_time_initsamplescan; + tsm->BeginSampleScan = system_time_beginsamplescan; + tsm->NextSampleBlock = system_time_nextsampleblock; + tsm->NextSampleTuple = system_time_nextsampletuple; + tsm->EndSampleScan = NULL; - /* Find relative prime as step size for linear probing. */ - sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate); - - /* - * Randomize start position so that blocks close to step size don't have - * higher probability of being chosen on very short scan. - */ - sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step); - - tsdesc->tsmdata = (void *) sampler; - - PG_RETURN_VOID(); + PG_RETURN_POINTER(tsm); } /* - * Get next block number or InvalidBlockNumber when we're done. - * - * Uses linear probing algorithm for picking next block. + * Sample size estimation. */ -Datum -tsm_system_time_nextblock(PG_FUNCTION_ARGS) +static void +system_time_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; - - sampler->lb = (sampler->lb + sampler->step) % sampler->nblocks; - sampler->doneblocks++; + Node *limitnode; + double millis; + double spc_random_page_cost; + double npages; + double ntuples; - /* All blocks have been read, we're done */ - if (sampler->doneblocks > sampler->nblocks) - PG_RETURN_UINT32(InvalidBlockNumber); + /* Try to extract an estimate for the limit time spec */ + limitnode = (Node *) linitial(paramexprs); + limitnode = estimate_expression_value(root, limitnode); - /* - * Update the estimations for time limit at least 10 times per estimated - * number of returned blocks to handle variations in block read speed. - */ - if (sampler->doneblocks % Max(sampler->estblocks / 10, 1) == 0) + if (IsA(limitnode, Const) && + !((Const *) limitnode)->constisnull) + { + millis = DatumGetFloat8(((Const *) limitnode)->constvalue); + if (millis < 0 || isnan(millis)) + { + /* Default millis if the value is bogus */ + millis = 1000; + } + } + else { - TimestampTz now = GetCurrentTimestamp(); - long secs; - int usecs; - int usecs_remaining; - int time_per_block; + /* Default millis if we didn't obtain a non-null Const */ + millis = 1000; + } - TimestampDifference(sampler->start_time, now, &secs, &usecs); - usecs += (int) secs *1000000; + /* Get the planner's idea of cost per page read */ + get_tablespace_page_costs(baserel->reltablespace, + &spc_random_page_cost, + NULL); - time_per_block = usecs / sampler->doneblocks; + /* + * Estimate the number of pages we can read by assuming that the cost + * figure is expressed in milliseconds. This is completely, unmistakably + * bogus, but we have to do something to produce an estimate and there's + * no better answer. + */ + if (spc_random_page_cost > 0) + npages = millis / spc_random_page_cost; + else + npages = millis; /* even more bogus, but whatcha gonna do? */ - /* No time left, end. */ - TimestampDifference(now, sampler->end_time, &secs, &usecs); - if (secs <= 0 && usecs <= 0) - PG_RETURN_UINT32(InvalidBlockNumber); + /* Clamp to sane value */ + npages = clamp_row_est(Min((double) baserel->pages, npages)); - /* Remaining microseconds */ - usecs_remaining = usecs + (int) secs *1000000; + if (baserel->tuples > 0 && baserel->pages > 0) + { + /* Estimate number of tuples returned based on tuple density */ + double density = baserel->tuples / (double) baserel->pages; - /* Recalculate estimated returned number of blocks */ - if (time_per_block < usecs_remaining && time_per_block > 0) - sampler->estblocks = sampler->time * time_per_block; + ntuples = npages * density; } - - PG_RETURN_UINT32(sampler->lb); -} - -/* - * Get next tuple offset in current block or InvalidOffsetNumber if we are done - * with this block. - */ -Datum -tsm_system_time_nexttuple(PG_FUNCTION_ARGS) -{ - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - OffsetNumber maxoffset = PG_GETARG_UINT16(2); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; - OffsetNumber tupoffset = sampler->lt; - - if (tupoffset == InvalidOffsetNumber) - tupoffset = FirstOffsetNumber; else - tupoffset++; - - if (tupoffset > maxoffset) - tupoffset = InvalidOffsetNumber; + { + /* For lack of data, assume one tuple per page */ + ntuples = npages; + } - sampler->lt = tupoffset; + /* Clamp to the estimated relation size */ + ntuples = clamp_row_est(Min(baserel->tuples, ntuples)); - PG_RETURN_UINT16(tupoffset); + *pages = npages; + *tuples = ntuples; } /* - * Cleanup method. + * Initialize during executor setup. */ -Datum -tsm_system_time_end(PG_FUNCTION_ARGS) +static void +system_time_initsamplescan(SampleScanState *node, int eflags) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - - pfree(tsdesc->tsmdata); - - PG_RETURN_VOID(); + node->tsm_state = palloc0(sizeof(SystemTimeSamplerData)); + /* Note the above leaves tsm_state->step equal to zero */ } /* - * Reset state (called by ReScan). + * Examine parameters and prepare for a sample scan. */ -Datum -tsm_system_time_reset(PG_FUNCTION_ARGS) +static void +system_time_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; + SystemTimeSamplerData *sampler = (SystemTimeSamplerData *) node->tsm_state; + double millis = DatumGetFloat8(params[0]); + + if (millis < 0 || isnan(millis)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), + errmsg("sample collection time must not be negative"))); + sampler->seed = seed; + sampler->millis = millis; sampler->lt = InvalidOffsetNumber; - sampler->start_time = GetCurrentTimestamp(); - sampler->end_time = TimestampTzPlusMilliseconds(sampler->start_time, - sampler->time); - sampler->estblocks = 2; sampler->doneblocks = 0; - - sampler_random_init_state(sampler->seed, sampler->randstate); - sampler->step = random_relative_prime(sampler->nblocks, sampler->randstate); - sampler->lb = sampler_random_fract(sampler->randstate) * (sampler->nblocks / sampler->step); - - PG_RETURN_VOID(); + /* start_time, lb will be initialized during first NextSampleBlock call */ + /* we intentionally do not change nblocks/firstblock/step here */ } /* - * Costing function. + * Select next block to sample. + * + * Uses linear probing algorithm for picking next block. */ -Datum -tsm_system_time_cost(PG_FUNCTION_ARGS) +static BlockNumber +system_time_nextsampleblock(SampleScanState *node) { - PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); - Path *path = (Path *) PG_GETARG_POINTER(1); - RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2); - List *args = (List *) PG_GETARG_POINTER(3); - BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4); - double *tuples = (double *) PG_GETARG_POINTER(5); - Node *limitnode; - int32 time; - BlockNumber relpages; - double reltuples; - double density; - double spc_random_page_cost; - - limitnode = linitial(args); - limitnode = estimate_expression_value(root, limitnode); - - if (IsA(limitnode, RelabelType)) - limitnode = (Node *) ((RelabelType *) limitnode)->arg; + SystemTimeSamplerData *sampler = (SystemTimeSamplerData *) node->tsm_state; + HeapScanDesc scan = node->ss.ss_currentScanDesc; + instr_time cur_time; - if (IsA(limitnode, Const)) - time = DatumGetInt32(((Const *) limitnode)->constvalue); - else + /* First call within scan? */ + if (sampler->doneblocks == 0) { - /* Default time (1s) if the estimation didn't return Const. */ - time = 1000; + /* First scan within query? */ + if (sampler->step == 0) + { + /* Initialize now that we have scan descriptor */ + SamplerRandomState randstate; + + /* If relation is empty, there's nothing to scan */ + if (scan->rs_nblocks == 0) + return InvalidBlockNumber; + + /* We only need an RNG during this setup step */ + sampler_random_init_state(sampler->seed, randstate); + + /* Compute nblocks/firstblock/step only once per query */ + sampler->nblocks = scan->rs_nblocks; + + /* Choose random starting block within the relation */ + /* (Actually this is the predecessor of the first block visited) */ + sampler->firstblock = sampler_random_fract(randstate) * + sampler->nblocks; + + /* Find relative prime as step size for linear probing */ + sampler->step = random_relative_prime(sampler->nblocks, randstate); + } + + /* Reinitialize lb and start_time */ + sampler->lb = sampler->firstblock; + INSTR_TIME_SET_CURRENT(sampler->start_time); } - relpages = baserel->pages; - reltuples = baserel->tuples; + /* If we've read all blocks in relation, we're done */ + if (++sampler->doneblocks > sampler->nblocks) + return InvalidBlockNumber; - /* estimate the tuple density */ - if (relpages > 0) - density = reltuples / (double) relpages; - else - density = (BLCKSZ - SizeOfPageHeaderData) / baserel->width; + /* If we've used up all the allotted time, we're done */ + INSTR_TIME_SET_CURRENT(cur_time); + INSTR_TIME_SUBTRACT(cur_time, sampler->start_time); + if (INSTR_TIME_GET_MILLISEC(cur_time) >= sampler->millis) + return InvalidBlockNumber; /* - * We equal random page cost value to number of ms it takes to read the - * random page here which is far from accurate but we don't have anything - * better to base our predicted page reads. + * It's probably impossible for scan->rs_nblocks to decrease between scans + * within a query; but just in case, loop until we select a block number + * less than scan->rs_nblocks. We don't care if scan->rs_nblocks has + * increased since the first scan. */ - get_tablespace_page_costs(baserel->reltablespace, - &spc_random_page_cost, - NULL); + do + { + /* Advance lb, using uint64 arithmetic to forestall overflow */ + sampler->lb = ((uint64) sampler->lb + sampler->step) % sampler->nblocks; + } while (sampler->lb >= scan->rs_nblocks); - /* - * Assumption here is that we'll never read less than 1% of table pages, - * this is here mainly because it is much less bad to overestimate than - * underestimate and using just spc_random_page_cost will probably lead to - * underestimations in general. - */ - *pages = Min(baserel->pages, Max(time / spc_random_page_cost, baserel->pages / 100)); - *tuples = rint(density * (double) *pages * path->rows / baserel->tuples); - path->rows = *tuples; + return sampler->lb; +} + +/* + * Select next sampled tuple in current block. + * + * In block sampling, we just want to sample all the tuples in each selected + * block. + * + * When we reach end of the block, return InvalidOffsetNumber which tells + * SampleScan to go to next block. + */ +static OffsetNumber +system_time_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset) +{ + SystemTimeSamplerData *sampler = (SystemTimeSamplerData *) node->tsm_state; + OffsetNumber tupoffset = sampler->lt; + + /* Advance to next possible offset on page */ + if (tupoffset == InvalidOffsetNumber) + tupoffset = FirstOffsetNumber; + else + tupoffset++; + + /* Done? */ + if (tupoffset > maxoffset) + tupoffset = InvalidOffsetNumber; + + sampler->lt = tupoffset; - PG_RETURN_VOID(); + return tupoffset; } +/* + * Compute greatest common divisor of two uint32's. + */ static uint32 gcd(uint32 a, uint32 b) { @@ -296,22 +330,29 @@ gcd(uint32 a, uint32 b) return b; } +/* + * Pick a random value less than and relatively prime to n, if possible + * (else return 1). + */ static uint32 random_relative_prime(uint32 n, SamplerRandomState randstate) { - /* Pick random starting number, with some limits on what it can be. */ - uint32 r = (uint32) sampler_random_fract(randstate) * n / 2 + n / 4, - t; + uint32 r; + + /* Safety check to avoid infinite loop or zero result for small n. */ + if (n <= 1) + return 1; /* * This should only take 2 or 3 iterations as the probability of 2 numbers - * being relatively prime is ~61%. + * being relatively prime is ~61%; but just in case, we'll include a + * CHECK_FOR_INTERRUPTS in the loop. */ - while ((t = gcd(r, n)) > 1) + do { CHECK_FOR_INTERRUPTS(); - r /= t; - } + r = (uint32) (sampler_random_fract(randstate) * n); + } while (r == 0 || gcd(r, n) > 1); return r; } diff --git a/contrib/tsm_system_time/tsm_system_time.control b/contrib/tsm_system_time/tsm_system_time.control index ebcee19d23a0d..c247987c66d14 100644 --- a/contrib/tsm_system_time/tsm_system_time.control +++ b/contrib/tsm_system_time/tsm_system_time.control @@ -1,5 +1,5 @@ # tsm_system_time extension -comment = 'SYSTEM TABLESAMPLE method which accepts time in milliseconds as a limit' +comment = 'TABLESAMPLE method which accepts time in milliseconds as a limit' default_version = '1.0' module_pathname = '$libdir/tsm_system_time' relocatable = true diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index 2c2190f13d373..9096ee5d517de 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -278,11 +278,6 @@ planner statistics - - pg_tablesample_method - table sampling methods - - pg_tablespace tablespaces within this database cluster @@ -6132,121 +6127,6 @@ - - <structname>pg_tabesample_method</structname> - - - pg_am - - - - The catalog pg_tablesample_method stores - information about table sampling methods which can be used in - TABLESAMPLE clause of a SELECT - statement. - - -
- <structname>pg_tablesample_method</> Columns - - - - - Name - Type - References - Description - - - - - - oid - oid - - Row identifier (hidden attribute; must be explicitly selected) - - - - tsmname - name - - Name of the sampling method - - - - tsmseqscan - bool - - If true, the sampling method scans the whole table sequentially. - - - - - tsmpagemode - bool - - If true, the sampling method always reads the pages completely. - - - - - tsminit - regproc - pg_proc.oid - Initialize the sampling scan function - - - - tsmnextblock - regproc - pg_proc.oid - Get next block number function - - - - tsmnexttuple - regproc - pg_proc.oid - Get next tuple offset function - - - - tsmexaminetuple - regproc - pg_proc.oid - Function which examines the tuple contents and decides if to - return it, or zero if none - - - - tsmend - regproc - pg_proc.oid - End the sampling scan function - - - - tsmreset - regproc - pg_proc.oid - Restart the state of sampling scan function - - - - tsmcost - regproc - pg_proc.oid - Costing function - - - - -
- - - - <structname>pg_tablespace</structname> diff --git a/doc/src/sgml/datatype.sgml b/doc/src/sgml/datatype.sgml index 9d5ce953f1728..a56f327fa3cd4 100644 --- a/doc/src/sgml/datatype.sgml +++ b/doc/src/sgml/datatype.sgml @@ -4321,7 +4321,7 @@ SET xmloption TO { DOCUMENT | CONTENT }; an object identifier. There are also several alias types for oid: regproc, regprocedure, regoper, regoperator, regclass, - regtype, regrole, regnamespace, + regtype, regrole, regnamespace, regconfig, and regdictionary. shows an overview. @@ -4597,6 +4597,10 @@ SELECT * FROM pg_attribute fdw_handler + + tsm_handler + + cstring @@ -4691,6 +4695,11 @@ SELECT * FROM pg_attribute A foreign-data wrapper handler is declared to return fdw_handler. + + tsm_handler + A tablesample method handler is declared to return tsm_handler. + + record Identifies a function returning an unspecified row type. diff --git a/doc/src/sgml/postgres.sgml b/doc/src/sgml/postgres.sgml index d1703e9c01ff8..7e82cdc3b124b 100644 --- a/doc/src/sgml/postgres.sgml +++ b/doc/src/sgml/postgres.sgml @@ -243,6 +243,7 @@ &nls; &plhandler; &fdwhandler; + &tablesample-method; &custom-scan; &geqo; &indexam; @@ -250,7 +251,6 @@ &spgist; &gin; &brin; - &tablesample-method; &storage; &bki; &planstats; diff --git a/doc/src/sgml/ref/select.sgml b/doc/src/sgml/ref/select.sgml index 632d7935cb41f..44810f4909c06 100644 --- a/doc/src/sgml/ref/select.sgml +++ b/doc/src/sgml/ref/select.sgml @@ -49,7 +49,8 @@ SELECT [ ALL | DISTINCT [ ON ( expressionwhere from_item can be one of: - [ ONLY ] table_name [ * ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ] [ TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] ] + [ ONLY ] table_name [ * ] [ [ AS ] alias [ ( column_alias [, ...] ) ] ] + [ TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] ] [ LATERAL ] ( select ) [ AS ] alias [ ( column_alias [, ...] ) ] with_query_name [ [ AS ] alias [ ( column_alias [, ...] ) ] ] [ LATERAL ] function_name ( [ argument [, ...] ] ) @@ -325,50 +326,6 @@ TABLE [ ONLY ] table_name [ * ] - - TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] - - - Table sample clause after - table_name indicates that - a sampling_method should - be used to retrieve subset of rows in the table. - The sampling_method can be - any sampling method installed in the database. There are currently two - sampling methods available in the standard - PostgreSQL distribution: - - - SYSTEM - - - BERNOULLI - - - Both of these sampling methods currently accept only single argument - which is the percent (floating point from 0 to 100) of the rows to - be returned. - The SYSTEM sampling method does block level - sampling with each block having the same chance of being selected and - returns all rows from each selected block. - The BERNOULLI scans whole table and returns - individual rows with equal probability. Additional sampling methods - may be installed in the database via extensions. - - - The optional parameter REPEATABLE uses the seed - parameter, which can be a number or expression producing a number, as - a random seed for sampling. Note that subsequent commands may return - different results even if same REPEATABLE clause was - specified. This happens because DML statements and - maintenance operations such as VACUUM may affect physical - distribution of data. The setseed() function will not - affect the sampling result when the REPEATABLE - parameter is used. - - - - alias @@ -387,6 +344,61 @@ TABLE [ ONLY ] table_name [ * ] + + TABLESAMPLE sampling_method ( argument [, ...] ) [ REPEATABLE ( seed ) ] + + + A TABLESAMPLE clause after + a table_name indicates that the + specified sampling_method + should be used to retrieve a subset of the rows in that table. + This sampling precedes the application of any other filters such + as WHERE clauses. + The standard PostgreSQL distribution + includes two sampling methods, BERNOULLI + and SYSTEM, and other sampling methods can be + installed in the database via extensions. + + + + The BERNOULLI and SYSTEM sampling methods + each accept a single argument + which is the fraction of the table to sample, expressed as a + percentage between 0 and 100. This argument can be + any real-valued expression. (Other sampling methods might + accept more or different arguments.) These two methods each return + a randomly-chosen sample of the table that will contain + approximately the specified percentage of the table's rows. + The BERNOULLI method scans the whole table and + selects or ignores individual rows independently with the specified + probability. + The SYSTEM method does block-level sampling with + each block having the specified chance of being selected; all rows + in each selected block are returned. + The SYSTEM method is significantly faster than + the BERNOULLI method when small sampling + percentages are specified, but it may return a less-random sample of + the table as a result of clustering effects. + + + + The optional REPEATABLE clause specifies + a seed number or expression to use + for generating random numbers within the sampling method. The seed + value can be any non-null floating-point value. Two queries that + specify the same seed and argument + values will select the same sample of the table, if the table has + not been changed meanwhile. But different seed values will usually + produce different samples. + If REPEATABLE is not given then a new random + sample is selected for each query. + Note that some add-on sampling methods do not + accept REPEATABLE, and will always produce new + samples on each use. + + + + select @@ -1870,6 +1882,16 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; + + <literal>TABLESAMPLE</literal> Clause Restrictions + + + The TABLESAMPLE clause is currently accepted only on + regular tables and materialized views. According to the SQL standard + it should be possible to apply it to any FROM item. + + + Function Calls in <literal>FROM</literal> @@ -1993,19 +2015,5 @@ SELECT distributors.* WHERE distributors.name = 'Westward'; - - <literal>TABLESAMPLE</literal> clause - - - The TABLESAMPLE clause is currently accepted only on physical - relations and materialized views. - - - - Additional modules allow you to install custom sampling methods and use - them instead of the SQL standard methods. - - - diff --git a/doc/src/sgml/tablesample-method.sgml b/doc/src/sgml/tablesample-method.sgml index 48eb7fe84ea93..22f8bbe19aa4b 100644 --- a/doc/src/sgml/tablesample-method.sgml +++ b/doc/src/sgml/tablesample-method.sgml @@ -1,139 +1,301 @@ - Writing A TABLESAMPLE Sampling Method + Writing A Table Sampling Method - tablesample method + table sampling method + + + + TABLESAMPLE method - The TABLESAMPLE clause implementation in - PostgreSQL supports creating a custom sampling methods. - These methods control what sample of the table will be returned when the - TABLESAMPLE clause is used. + PostgreSQL's implementation of the TABLESAMPLE + clause supports custom table sampling methods, in addition to + the BERNOULLI and SYSTEM methods that are required + by the SQL standard. The sampling method determines which rows of the + table will be selected when the TABLESAMPLE clause is used. - - Tablesample Method Functions + + At the SQL level, a table sampling method is represented by a single SQL + function, typically implemented in C, having the signature + +method_name(internal) RETURNS tsm_handler + + The name of the function is the same method name appearing in the + TABLESAMPLE clause. The internal argument is a dummy + (always having value zero) that simply serves to prevent this function from + being called directly from a SQL command. + The result of the function must be a palloc'd struct of + type TsmRoutine, which contains pointers to support functions for + the sampling method. These support functions are plain C functions and + are not visible or callable at the SQL level. The support functions are + described in . + + + + In addition to function pointers, the TsmRoutine struct must + provide these additional fields: + + + + + List *parameterTypes + + + This is an OID list containing the data type OIDs of the parameter(s) + that will be accepted by the TABLESAMPLE clause when this + sampling method is used. For example, for the built-in methods, this + list contains a single item with value FLOAT4OID, which + represents the sampling percentage. Custom sampling methods can have + more or different parameters. + + + + + + bool repeatable_across_queries + + + If true, the sampling method can deliver identical samples + across successive queries, if the same parameters + and REPEATABLE seed value are supplied each time and the + table contents have not changed. When this is false, + the REPEATABLE clause is not accepted for use with the + sampling method. + + + + + + bool repeatable_across_scans + + + If true, the sampling method can deliver identical samples + across successive scans in the same query (assuming unchanging + parameters, seed value, and snapshot). + When this is false, the planner will not select plans that + would require scanning the sampled table more than once, since that + might result in inconsistent query output. + + + + + + + The TsmRoutine struct type is declared + in src/include/access/tsmapi.h, which see for additional + details. + + + + The table sampling methods included in the standard distribution are good + references when trying to write your own. Look into + the src/backend/access/tablesample subdirectory of the source + tree for the built-in sampling methods, and into the contrib + subdirectory for add-on methods. + + + + Sampling Method Support Functions - The tablesample method must provide following set of functions: + The TSM handler function returns a palloc'd TsmRoutine struct + containing pointers to the support functions described below. Most of + the functions are required, but some are optional, and those pointers can + be NULL. void -tsm_init (TableSampleDesc *desc, - uint32 seed, ...); +SampleScanGetSampleSize (PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples); - Initialize the tablesample scan. The function is called at the beginning - of each relation scan. + + This function is called during planning. It must estimate the number of + relation pages that will be read during a sample scan, and the number of + tuples that will be selected by the scan. (For example, these might be + determined by estimating the sampling fraction, and then multiplying + the baserel->pages and baserel->tuples + numbers by that, being sure to round the results to integral values.) + The paramexprs list holds the expression(s) that are + parameters to the TABLESAMPLE clause. It is recommended to + use estimate_expression_value() to try to reduce these + expressions to constants, if their values are needed for estimation + purposes; but the function must provide size estimates even if they cannot + be reduced, and it should not fail even if the values appear invalid + (remember that they're only estimates of what the run-time values will be). + The pages and tuples parameters are outputs. + - Note that the first two parameters are required but you can specify - additional parameters which then will be used by the TABLESAMPLE - clause to determine the required user input in the query itself. - This means that if your function will specify additional float4 parameter - named percent, the user will have to call the tablesample method with - expression which evaluates (or can be coerced) to float4. - For example this definition: -tsm_init (TableSampleDesc *desc, - uint32 seed, float4 pct); - -Will lead to SQL call like this: - -... TABLESAMPLE yourmethod(0.5) ... +void +InitSampleScan (SampleScanState *node, + int eflags); + + Initialize for execution of a SampleScan plan node. + This is called during executor startup. + It should perform any initialization needed before processing can start. + The SampleScanState node has already been created, but + its tsm_state field is NULL. + The InitSampleScan function can palloc whatever internal + state data is needed by the sampling method, and store a pointer to + it in node->tsm_state. + Information about the table to scan is accessible through other fields + of the SampleScanState node (but note that the + node->ss.ss_currentScanDesc scan descriptor is not set + up yet). + eflags contains flag bits describing the executor's + operating mode for this plan node. - -BlockNumber -tsm_nextblock (TableSampleDesc *desc); - - Returns the block number of next page to be scanned. InvalidBlockNumber - should be returned if the sampling has reached end of the relation. + When (eflags & EXEC_FLAG_EXPLAIN_ONLY) is true, + the scan will not actually be performed, so this function should only do + the minimum required to make the node state valid for EXPLAIN + and EndSampleScan. - -OffsetNumber -tsm_nexttuple (TableSampleDesc *desc, BlockNumber blockno, - OffsetNumber maxoffset); - - Return next tuple offset for the current page. InvalidOffsetNumber should - be returned if the sampling has reached end of the page. + This function can be omitted (set the pointer to NULL), in which case + BeginSampleScan must perform all initialization needed + by the sampling method. void -tsm_end (TableSampleDesc *desc); +BeginSampleScan (SampleScanState *node, + Datum *params, + int nparams, + uint32 seed); - The scan has finished, cleanup any left over state. + + Begin execution of a sampling scan. + This is called just before the first attempt to fetch a tuple, and + may be called again if the scan needs to be restarted. + Information about the table to scan is accessible through fields + of the SampleScanState node (but note that the + node->ss.ss_currentScanDesc scan descriptor is not set + up yet). + The params array, of length nparams, contains the + values of the parameters supplied in the TABLESAMPLE clause. + These will have the number and types specified in the sampling + method's parameterTypes list, and have been checked + to not be null. + seed contains a seed to use for any random numbers generated + within the sampling method; it is either a hash derived from the + REPEATABLE value if one was given, or the result + of random() if not. - -void -tsm_reset (TableSampleDesc *desc); - - The scan needs to rescan the relation again, reset any tablesample method - state. + This function may adjust the fields node->use_bulkread + and node->use_pagemode. + If node->use_bulkread is true, which it is by + default, the scan will use a buffer access strategy that encourages + recycling buffers after use. It might be reasonable to set this + to false if the scan will visit only a small fraction of the + table's pages. + If node->use_pagemode is true, which it is by + default, the scan will perform visibility checking in a single pass for + all tuples on each visited page. It might be reasonable to set this + to false if the scan will select only a small fraction of the + tuples on each visited page. That will result in fewer tuple visibility + checks being performed, though each one will be more expensive because it + will require more locking. + + + + If the sampling method is + marked repeatable_across_scans, it must be able to + select the same set of tuples during a rescan as it did originally, that is + a fresh call of BeginSampleScan must lead to selecting the + same tuples as before (if the TABLESAMPLE parameters + and seed don't change). -void -tsm_cost (PlannerInfo *root, Path *path, RelOptInfo *baserel, - List *args, BlockNumber *pages, double *tuples); +BlockNumber +NextSampleBlock (SampleScanState *node); - This function is used by optimizer to decide best plan and is also used - for output of EXPLAIN. + + Returns the block number of the next page to be scanned, or + InvalidBlockNumber if no pages remain to be scanned. - There is one more function which tablesampling method can implement in order - to gain more fine grained control over sampling. This function is optional: + This function can be omitted (set the pointer to NULL), in which case + the core code will perform a sequential scan of the entire relation. + Such a scan can use synchronized scanning, so that the sampling method + cannot assume that the relation pages are visited in the same order on + each scan. -bool -tsm_examinetuple (TableSampleDesc *desc, BlockNumber blockno, - HeapTuple tuple, bool visible); +OffsetNumber +NextSampleTuple (SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset); - Function that enables the sampling method to examine contents of the tuple - (for example to collect some internal statistics). The return value of this - function is used to determine if the tuple should be returned to client. - Note that this function will receive even invisible tuples but it is not - allowed to return true for such tuple (if it does, - PostgreSQL will raise an error). + + Returns the offset number of the next tuple to be sampled on the + specified page, or InvalidOffsetNumber if no tuples remain to + be sampled. maxoffset is the largest offset number in use + on the page. + + + NextSampleTuple is not explicitly told which of the offset + numbers in the range 1 .. maxoffset actually contain valid + tuples. This is not normally a problem since the core code ignores + requests to sample missing or invisible tuples; that should not result in + any bias in the sample. However, if necessary, the function can + examine node->ss.ss_currentScanDesc->rs_vistuples[] + to identify which tuples are valid and visible. (This + requires node->use_pagemode to be true.) + + + + + + NextSampleTuple must not assume + that blockno is the same page number returned by the most + recent NextSampleBlock call. It was returned by some + previous NextSampleBlock call, but the core code is allowed + to call NextSampleBlock in advance of actually scanning + pages, so as to support prefetching. It is OK to assume that once + sampling of a given page begins, successive NextSampleTuple + calls all refer to the same page until InvalidOffsetNumber is + returned. + + + - As you can see most of the tablesample method interfaces get the - TableSampleDesc as a first parameter. This structure holds - state of the current scan and also provides storage for the tablesample - method's state. It is defined as following: -typedef struct TableSampleDesc { - HeapScanDesc heapScan; - TupleDesc tupDesc; - - void *tsmdata; -} TableSampleDesc; +void +EndSampleScan (SampleScanState *node); - Where heapScan is the descriptor of the physical table scan. - It's possible to get table size info from it. The tupDesc - represents the tuple descriptor of the tuples returned by the scan and passed - to the tsm_examinetuple() interface. The tsmdata - can be used by tablesample method itself to store any state info it might - need during the scan. If used by the method, it should be pfreed - in tsm_end() function. + + End the scan and release resources. It is normally not important + to release palloc'd memory, but any externally-visible resources + should be cleaned up. + This function can be omitted (set the pointer to NULL) in the common + case where no such resources exist. + diff --git a/doc/src/sgml/tsm-system-rows.sgml b/doc/src/sgml/tsm-system-rows.sgml index 0c2f1779c9ad8..93aa5366649bd 100644 --- a/doc/src/sgml/tsm-system-rows.sgml +++ b/doc/src/sgml/tsm-system-rows.sgml @@ -8,24 +8,37 @@ - The tsm_system_rows module provides the tablesample method - SYSTEM_ROWS, which can be used inside the - TABLESAMPLE clause of a SELECT. + The tsm_system_rows module provides the table sampling method + SYSTEM_ROWS, which can be used in + the TABLESAMPLE clause of a + command. - This tablesample method uses a linear probing algorithm to read sample - of a table and uses actual number of rows as limit (unlike the - SYSTEM tablesample method which limits by percentage - of a table). + This table sampling method accepts a single integer argument that is the + maximum number of rows to read. The resulting sample will always contain + exactly that many rows, unless the table does not contain enough rows, in + which case the whole table is selected. + + + + Like the built-in SYSTEM sampling + method, SYSTEM_ROWS performs block-level sampling, so + that the sample is not completely random but may be subject to clustering + effects, especially if only a small number of rows are requested. + + + + SYSTEM_ROWS does not support + the REPEATABLE clause. Examples - Here is an example of selecting sample of a table with - SYSTEM_ROWS. First install the extension: + Here is an example of selecting a sample of a table with + SYSTEM_ROWS. First install the extension: @@ -33,8 +46,7 @@ CREATE EXTENSION tsm_system_rows; - Then you can use it in SELECT command same way as other - tablesample methods: + Then you can use it in a SELECT command, for instance: SELECT * FROM my_table TABLESAMPLE SYSTEM_ROWS(100); @@ -42,8 +54,9 @@ SELECT * FROM my_table TABLESAMPLE SYSTEM_ROWS(100); - The above command will return a sample of 100 rows from the table my_table - (less if the table does not have 100 visible rows). + This command will return a sample of 100 rows from the + table my_table (unless the table does not have 100 + visible rows, in which case all its rows are returned). diff --git a/doc/src/sgml/tsm-system-time.sgml b/doc/src/sgml/tsm-system-time.sgml index 2343ab16d4f2b..3f8ff1a026f2e 100644 --- a/doc/src/sgml/tsm-system-time.sgml +++ b/doc/src/sgml/tsm-system-time.sgml @@ -8,25 +8,39 @@ - The tsm_system_time module provides the tablesample method - SYSTEM_TIME, which can be used inside the - TABLESAMPLE clause of a SELECT. + The tsm_system_time module provides the table sampling method + SYSTEM_TIME, which can be used in + the TABLESAMPLE clause of a + command. - This tablesample method uses a linear probing algorithm to read sample - of a table and uses time in milliseconds as limit (unlike the - SYSTEM tablesample method which limits by percentage - of a table). This gives you some control over the length of execution - of your query. + This table sampling method accepts a single floating-point argument that + is the maximum number of milliseconds to spend reading the table. This + gives you direct control over how long the query takes, at the price that + the size of the sample becomes hard to predict. The resulting sample will + contain as many rows as could be read in the specified time, unless the + whole table has been read first. + + + + Like the built-in SYSTEM sampling + method, SYSTEM_TIME performs block-level sampling, so + that the sample is not completely random but may be subject to clustering + effects, especially if only a small number of rows are selected. + + + + SYSTEM_TIME does not support + the REPEATABLE clause. Examples - Here is an example of selecting sample of a table with - SYSTEM_TIME. First install the extension: + Here is an example of selecting a sample of a table with + SYSTEM_TIME. First install the extension: @@ -34,8 +48,7 @@ CREATE EXTENSION tsm_system_time; - Then you can use it in a SELECT command the same way as - other tablesample methods: + Then you can use it in a SELECT command, for instance: SELECT * FROM my_table TABLESAMPLE SYSTEM_TIME(1000); @@ -43,8 +56,9 @@ SELECT * FROM my_table TABLESAMPLE SYSTEM_TIME(1000); - The above command will return as large a sample of my_table as it can read in - 1 second (or less if it reads whole table faster). + This command will return as large a sample of my_table as + it can read in 1 second (1000 milliseconds). Of course, if the whole + table can be read in under 1 second, all its rows will be returned. diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 6f4ff2718fed8..050efdc4806a7 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -80,8 +80,11 @@ bool synchronize_seqscans = true; static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, - bool allow_strat, bool allow_sync, bool allow_pagemode, - bool is_bitmapscan, bool is_samplescan, + bool allow_strat, + bool allow_sync, + bool allow_pagemode, + bool is_bitmapscan, + bool is_samplescan, bool temp_snap); static HeapTuple heap_prepare_insert(Relation relation, HeapTuple tup, TransactionId xid, CommandId cid, int options); @@ -207,7 +210,7 @@ static const int MultiXactStatusLock[MaxMultiXactStatus + 1] = * ---------------- */ static void -initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) +initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock) { bool allow_strat; bool allow_sync; @@ -257,12 +260,12 @@ initscan(HeapScanDesc scan, ScanKey key, bool is_rescan) scan->rs_strategy = NULL; } - if (is_rescan) + if (keep_startblock) { /* - * If rescan, keep the previous startblock setting so that rewinding a - * cursor doesn't generate surprising results. Reset the syncscan - * setting, though. + * When rescanning, we want to keep the previous startblock setting, + * so that rewinding a cursor doesn't generate surprising results. + * Reset the active syncscan setting, though. */ scan->rs_syncscan = (allow_sync && synchronize_seqscans); } @@ -1313,6 +1316,10 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode, /* ---------------- * heap_beginscan - begin relation scan * + * heap_beginscan is the "standard" case. + * + * heap_beginscan_catalog differs in setting up its own temporary snapshot. + * * heap_beginscan_strat offers an extended API that lets the caller control * whether a nondefault buffer access strategy can be used, and whether * syncscan can be chosen (possibly resulting in the scan not starting from @@ -1323,8 +1330,11 @@ heap_openrv_extended(const RangeVar *relation, LOCKMODE lockmode, * really quite unlike a standard seqscan, there is just enough commonality * to make it worth using the same data structure. * - * heap_beginscan_samplingscan is alternate entry point for setting up a - * HeapScanDesc for a TABLESAMPLE scan. + * heap_beginscan_sampling is an alternative entry point for setting up a + * HeapScanDesc for a TABLESAMPLE scan. As with bitmap scans, it's worth + * using the same data structure although the behavior is rather different. + * In addition to the options offered by heap_beginscan_strat, this call + * also allows control of whether page-mode visibility checking is used. * ---------------- */ HeapScanDesc @@ -1366,18 +1376,22 @@ heap_beginscan_bm(Relation relation, Snapshot snapshot, HeapScanDesc heap_beginscan_sampling(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, - bool allow_strat, bool allow_pagemode) + bool allow_strat, bool allow_sync, bool allow_pagemode) { return heap_beginscan_internal(relation, snapshot, nkeys, key, - allow_strat, false, allow_pagemode, + allow_strat, allow_sync, allow_pagemode, false, true, false); } static HeapScanDesc heap_beginscan_internal(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, - bool allow_strat, bool allow_sync, bool allow_pagemode, - bool is_bitmapscan, bool is_samplescan, bool temp_snap) + bool allow_strat, + bool allow_sync, + bool allow_pagemode, + bool is_bitmapscan, + bool is_samplescan, + bool temp_snap) { HeapScanDesc scan; @@ -1461,6 +1475,27 @@ heap_rescan(HeapScanDesc scan, initscan(scan, key, true); } +/* ---------------- + * heap_rescan_set_params - restart a relation scan after changing params + * + * This call allows changing the buffer strategy, syncscan, and pagemode + * options before starting a fresh scan. Note that although the actual use + * of syncscan might change (effectively, enabling or disabling reporting), + * the previously selected startblock will be kept. + * ---------------- + */ +void +heap_rescan_set_params(HeapScanDesc scan, ScanKey key, + bool allow_strat, bool allow_sync, bool allow_pagemode) +{ + /* adjust parameters */ + scan->rs_allow_strat = allow_strat; + scan->rs_allow_sync = allow_sync; + scan->rs_pageatatime = allow_pagemode && IsMVCCSnapshot(scan->rs_snapshot); + /* ... and rescan */ + heap_rescan(scan, key); +} + /* ---------------- * heap_endscan - end relation scan * diff --git a/src/backend/access/tablesample/Makefile b/src/backend/access/tablesample/Makefile index 46eeb59f9c468..68d9ab281472d 100644 --- a/src/backend/access/tablesample/Makefile +++ b/src/backend/access/tablesample/Makefile @@ -1,10 +1,10 @@ #------------------------------------------------------------------------- # # Makefile-- -# Makefile for utils/tablesample +# Makefile for access/tablesample # # IDENTIFICATION -# src/backend/utils/tablesample/Makefile +# src/backend/access/tablesample/Makefile # #------------------------------------------------------------------------- @@ -12,6 +12,6 @@ subdir = src/backend/access/tablesample top_builddir = ../../../.. include $(top_builddir)/src/Makefile.global -OBJS = tablesample.o system.o bernoulli.o +OBJS = bernoulli.o system.o tablesample.o include $(top_srcdir)/src/backend/common.mk diff --git a/src/backend/access/tablesample/bernoulli.c b/src/backend/access/tablesample/bernoulli.c index 0a539008221a5..cf88f95e757b1 100644 --- a/src/backend/access/tablesample/bernoulli.c +++ b/src/backend/access/tablesample/bernoulli.c @@ -1,233 +1,231 @@ /*------------------------------------------------------------------------- * * bernoulli.c - * interface routines for BERNOULLI tablesample method + * support routines for BERNOULLI tablesample method * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * To ensure repeatability of samples, it is necessary that selection of a + * given tuple be history-independent; otherwise syncscanning would break + * repeatability, to say nothing of logically-irrelevant maintenance such + * as physical extension or shortening of the relation. + * + * To achieve that, we proceed by hashing each candidate TID together with + * the active seed, and then selecting it if the hash is less than the + * cutoff value computed from the selection probability by BeginSampleScan. + * + * + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * src/backend/utils/tablesample/bernoulli.c + * src/backend/access/tablesample/bernoulli.c * *------------------------------------------------------------------------- */ #include "postgres.h" -#include "fmgr.h" +#ifdef _MSC_VER +#include /* for _isnan */ +#endif +#include -#include "access/tablesample.h" -#include "access/relscan.h" -#include "nodes/execnodes.h" -#include "nodes/relation.h" +#include "access/hash.h" +#include "access/tsmapi.h" +#include "catalog/pg_type.h" #include "optimizer/clauses.h" -#include "storage/bufmgr.h" -#include "utils/sampling.h" +#include "optimizer/cost.h" +#include "utils/builtins.h" -/* tsdesc */ +/* Private state */ typedef struct { + uint64 cutoff; /* select tuples with hash less than this */ uint32 seed; /* random seed */ - BlockNumber startblock; /* starting block, we use ths for syncscan - * support */ - BlockNumber nblocks; /* number of blocks */ - BlockNumber blockno; /* current block */ - float4 probability; /* probabilty that tuple will be returned - * (0.0-1.0) */ OffsetNumber lt; /* last tuple returned from current block */ - SamplerRandomState randstate; /* random generator tsdesc */ } BernoulliSamplerData; + +static void bernoulli_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples); +static void bernoulli_initsamplescan(SampleScanState *node, + int eflags); +static void bernoulli_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed); +static OffsetNumber bernoulli_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset); + + /* - * Initialize the state. + * Create a TsmRoutine descriptor for the BERNOULLI method. */ Datum -tsm_bernoulli_init(PG_FUNCTION_ARGS) +tsm_bernoulli_handler(PG_FUNCTION_ARGS) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - uint32 seed = PG_GETARG_UINT32(1); - float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2); - HeapScanDesc scan = tsdesc->heapScan; - BernoulliSamplerData *sampler; + TsmRoutine *tsm = makeNode(TsmRoutine); + + tsm->parameterTypes = list_make1_oid(FLOAT4OID); + tsm->repeatable_across_queries = true; + tsm->repeatable_across_scans = true; + tsm->SampleScanGetSampleSize = bernoulli_samplescangetsamplesize; + tsm->InitSampleScan = bernoulli_initsamplescan; + tsm->BeginSampleScan = bernoulli_beginsamplescan; + tsm->NextSampleBlock = NULL; + tsm->NextSampleTuple = bernoulli_nextsampletuple; + tsm->EndSampleScan = NULL; + + PG_RETURN_POINTER(tsm); +} - if (percent < 0 || percent > 100) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("invalid sample size"), - errhint("Sample size must be numeric value between 0 and 100 (inclusive)."))); +/* + * Sample size estimation. + */ +static void +bernoulli_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples) +{ + Node *pctnode; + float4 samplefract; - sampler = palloc0(sizeof(BernoulliSamplerData)); + /* Try to extract an estimate for the sample percentage */ + pctnode = (Node *) linitial(paramexprs); + pctnode = estimate_expression_value(root, pctnode); - /* Remember initial values for reinit */ - sampler->seed = seed; - sampler->startblock = scan->rs_startblock; - sampler->nblocks = scan->rs_nblocks; - sampler->blockno = InvalidBlockNumber; - sampler->probability = percent / 100; - sampler->lt = InvalidOffsetNumber; - sampler_random_init_state(sampler->seed, sampler->randstate); + if (IsA(pctnode, Const) && + !((Const *) pctnode)->constisnull) + { + samplefract = DatumGetFloat4(((Const *) pctnode)->constvalue); + if (samplefract >= 0 && samplefract <= 100 && !isnan(samplefract)) + samplefract /= 100.0f; + else + { + /* Default samplefract if the value is bogus */ + samplefract = 0.1f; + } + } + else + { + /* Default samplefract if we didn't obtain a non-null Const */ + samplefract = 0.1f; + } + + /* We'll visit all pages of the baserel */ + *pages = baserel->pages; - tsdesc->tsmdata = (void *) sampler; + *tuples = clamp_row_est(baserel->tuples * samplefract); +} - PG_RETURN_VOID(); +/* + * Initialize during executor setup. + */ +static void +bernoulli_initsamplescan(SampleScanState *node, int eflags) +{ + node->tsm_state = palloc0(sizeof(BernoulliSamplerData)); } /* - * Get next block number to read or InvalidBlockNumber if we are at the - * end of the relation. + * Examine parameters and prepare for a sample scan. */ -Datum -tsm_bernoulli_nextblock(PG_FUNCTION_ARGS) +static void +bernoulli_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata; + BernoulliSamplerData *sampler = (BernoulliSamplerData *) node->tsm_state; + double percent = DatumGetFloat4(params[0]); + + if (percent < 0 || percent > 100 || isnan(percent)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), + errmsg("sample percentage must be between 0 and 100"))); /* - * Bernoulli sampling scans all blocks on the table and supports syncscan - * so loop from startblock to startblock instead of from 0 to nblocks. + * The cutoff is sample probability times (PG_UINT32_MAX + 1); we have to + * store that as a uint64, of course. Note that this gives strictly + * correct behavior at the limits of zero or one probability. */ - if (sampler->blockno == InvalidBlockNumber) - sampler->blockno = sampler->startblock; - else - { - sampler->blockno++; - - if (sampler->blockno >= sampler->nblocks) - sampler->blockno = 0; - - if (sampler->blockno == sampler->startblock) - PG_RETURN_UINT32(InvalidBlockNumber); - } + sampler->cutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); + sampler->seed = seed; + sampler->lt = InvalidOffsetNumber; - PG_RETURN_UINT32(sampler->blockno); + /* + * Use bulkread, since we're scanning all pages. But pagemode visibility + * checking is a win only at larger sampling fractions. The 25% cutoff + * here is based on very limited experimentation. + */ + node->use_bulkread = true; + node->use_pagemode = (percent >= 25); } /* - * Get next tuple from current block. - * - * This method implements the main logic in bernoulli sampling. - * The algorithm simply generates new random number (in 0.0-1.0 range) and if - * it falls within user specified probability (in the same range) return the - * tuple offset. - * - * It is ok here to return tuple offset without knowing if tuple is visible - * and not check it via examinetuple. The reason for that is that we do the - * coinflip (random number generation) for every tuple in the table. Since all - * tuples have same probability of being returned the visible and invisible - * tuples will be returned in same ratio as they have in the actual table. - * This means that there is no skew towards either visible or invisible tuples - * and the number of visible tuples returned from the executor node should - * match the fraction of visible tuples which was specified by user. + * Select next sampled tuple in current block. * - * This is faster than doing the coinflip in examinetuple because we don't - * have to do visibility checks on uninteresting tuples. + * It is OK here to return an offset without knowing if the tuple is visible + * (or even exists). The reason is that we do the coinflip for every tuple + * offset in the table. Since all tuples have the same probability of being + * returned, it doesn't matter if we do extra coinflips for invisible tuples. * - * If we reach end of the block return InvalidOffsetNumber which tells + * When we reach end of the block, return InvalidOffsetNumber which tells * SampleScan to go to next block. */ -Datum -tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS) +static OffsetNumber +bernoulli_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - OffsetNumber maxoffset = PG_GETARG_UINT16(2); - BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata; + BernoulliSamplerData *sampler = (BernoulliSamplerData *) node->tsm_state; OffsetNumber tupoffset = sampler->lt; - float4 probability = sampler->probability; + uint32 hashinput[3]; + /* Advance to first/next tuple in block */ if (tupoffset == InvalidOffsetNumber) tupoffset = FirstOffsetNumber; else tupoffset++; /* - * Loop over tuple offsets until the random generator returns value that - * is within the probability of returning the tuple or until we reach end - * of the block. + * We compute the hash by applying hash_any to an array of 3 uint32's + * containing the block, offset, and seed. This is efficient to set up, + * and with the current implementation of hash_any, it gives + * machine-independent results, which is a nice property for regression + * testing. * - * (This is our implementation of bernoulli trial) + * These words in the hash input are the same throughout the block: */ - while (sampler_random_fract(sampler->randstate) > probability) + hashinput[0] = blockno; + hashinput[2] = sampler->seed; + + /* + * Loop over tuple offsets until finding suitable TID or reaching end of + * block. + */ + for (; tupoffset <= maxoffset; tupoffset++) { - tupoffset++; + uint32 hash; - if (tupoffset > maxoffset) + hashinput[1] = tupoffset; + + hash = DatumGetUInt32(hash_any((const unsigned char *) hashinput, + (int) sizeof(hashinput))); + if (hash < sampler->cutoff) break; } if (tupoffset > maxoffset) - /* Tell SampleScan that we want next block. */ tupoffset = InvalidOffsetNumber; sampler->lt = tupoffset; - PG_RETURN_UINT16(tupoffset); -} - -/* - * Cleanup method. - */ -Datum -tsm_bernoulli_end(PG_FUNCTION_ARGS) -{ - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - - pfree(tsdesc->tsmdata); - - PG_RETURN_VOID(); -} - -/* - * Reset tsdesc (called by ReScan). - */ -Datum -tsm_bernoulli_reset(PG_FUNCTION_ARGS) -{ - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - BernoulliSamplerData *sampler = (BernoulliSamplerData *) tsdesc->tsmdata; - - sampler->blockno = InvalidBlockNumber; - sampler->lt = InvalidOffsetNumber; - sampler_random_init_state(sampler->seed, sampler->randstate); - - PG_RETURN_VOID(); -} - -/* - * Costing function. - */ -Datum -tsm_bernoulli_cost(PG_FUNCTION_ARGS) -{ - PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); - Path *path = (Path *) PG_GETARG_POINTER(1); - RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2); - List *args = (List *) PG_GETARG_POINTER(3); - BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4); - double *tuples = (double *) PG_GETARG_POINTER(5); - Node *pctnode; - float4 samplesize; - - *pages = baserel->pages; - - pctnode = linitial(args); - pctnode = estimate_expression_value(root, pctnode); - - if (IsA(pctnode, RelabelType)) - pctnode = (Node *) ((RelabelType *) pctnode)->arg; - - if (IsA(pctnode, Const)) - { - samplesize = DatumGetFloat4(((Const *) pctnode)->constvalue); - samplesize /= 100.0; - } - else - { - /* Default samplesize if the estimation didn't return Const. */ - samplesize = 0.1f; - } - - *tuples = path->rows * samplesize; - path->rows = *tuples; - - PG_RETURN_VOID(); + return tupoffset; } diff --git a/src/backend/access/tablesample/system.c b/src/backend/access/tablesample/system.c index 1d834369a4bd1..43c5dab71619a 100644 --- a/src/backend/access/tablesample/system.c +++ b/src/backend/access/tablesample/system.c @@ -1,186 +1,260 @@ /*------------------------------------------------------------------------- * * system.c - * interface routines for system tablesample method + * support routines for SYSTEM tablesample method * + * To ensure repeatability of samples, it is necessary that selection of a + * given tuple be history-independent; otherwise syncscanning would break + * repeatability, to say nothing of logically-irrelevant maintenance such + * as physical extension or shortening of the relation. * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * To achieve that, we proceed by hashing each candidate block number together + * with the active seed, and then selecting it if the hash is less than the + * cutoff value computed from the selection probability by BeginSampleScan. + * + * + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group + * Portions Copyright (c) 1994, Regents of the University of California * * IDENTIFICATION - * src/backend/utils/tablesample/system.c + * src/backend/access/tablesample/system.c * *------------------------------------------------------------------------- */ #include "postgres.h" -#include "fmgr.h" +#ifdef _MSC_VER +#include /* for _isnan */ +#endif +#include -#include "access/tablesample.h" +#include "access/hash.h" #include "access/relscan.h" -#include "nodes/execnodes.h" -#include "nodes/relation.h" +#include "access/tsmapi.h" +#include "catalog/pg_type.h" #include "optimizer/clauses.h" -#include "storage/bufmgr.h" -#include "utils/sampling.h" +#include "optimizer/cost.h" +#include "utils/builtins.h" -/* - * State - */ +/* Private state */ typedef struct { - BlockSamplerData bs; + uint64 cutoff; /* select blocks with hash less than this */ uint32 seed; /* random seed */ - BlockNumber nblocks; /* number of block in relation */ - int samplesize; /* number of blocks to return */ + BlockNumber nextblock; /* next block to consider sampling */ OffsetNumber lt; /* last tuple returned from current block */ } SystemSamplerData; -/* - * Initializes the state. - */ -Datum -tsm_system_init(PG_FUNCTION_ARGS) -{ - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - uint32 seed = PG_GETARG_UINT32(1); - float4 percent = PG_ARGISNULL(2) ? -1 : PG_GETARG_FLOAT4(2); - HeapScanDesc scan = tsdesc->heapScan; - SystemSamplerData *sampler; +static void system_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples); +static void system_initsamplescan(SampleScanState *node, + int eflags); +static void system_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed); +static BlockNumber system_nextsampleblock(SampleScanState *node); +static OffsetNumber system_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset); - if (percent < 0 || percent > 100) - ereport(ERROR, - (errcode(ERRCODE_NUMERIC_VALUE_OUT_OF_RANGE), - errmsg("invalid sample size"), - errhint("Sample size must be numeric value between 0 and 100 (inclusive)."))); - - sampler = palloc0(sizeof(SystemSamplerData)); - - /* Remember initial values for reinit */ - sampler->seed = seed; - sampler->nblocks = scan->rs_nblocks; - sampler->samplesize = 1 + (int) (sampler->nblocks * (percent / 100.0)); - sampler->lt = InvalidOffsetNumber; - - BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize, - sampler->seed); - - tsdesc->tsmdata = (void *) sampler; - - PG_RETURN_VOID(); -} /* - * Get next block number or InvalidBlockNumber when we're done. - * - * Uses the same logic as ANALYZE for picking the random blocks. + * Create a TsmRoutine descriptor for the SYSTEM method. */ Datum -tsm_system_nextblock(PG_FUNCTION_ARGS) +tsm_system_handler(PG_FUNCTION_ARGS) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; - BlockNumber blockno; - - if (!BlockSampler_HasMore(&sampler->bs)) - PG_RETURN_UINT32(InvalidBlockNumber); - - blockno = BlockSampler_Next(&sampler->bs); - - PG_RETURN_UINT32(blockno); + TsmRoutine *tsm = makeNode(TsmRoutine); + + tsm->parameterTypes = list_make1_oid(FLOAT4OID); + tsm->repeatable_across_queries = true; + tsm->repeatable_across_scans = true; + tsm->SampleScanGetSampleSize = system_samplescangetsamplesize; + tsm->InitSampleScan = system_initsamplescan; + tsm->BeginSampleScan = system_beginsamplescan; + tsm->NextSampleBlock = system_nextsampleblock; + tsm->NextSampleTuple = system_nextsampletuple; + tsm->EndSampleScan = NULL; + + PG_RETURN_POINTER(tsm); } /* - * Get next tuple offset in current block or InvalidOffsetNumber if we are done - * with this block. + * Sample size estimation. */ -Datum -tsm_system_nexttuple(PG_FUNCTION_ARGS) +static void +system_samplescangetsamplesize(PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - OffsetNumber maxoffset = PG_GETARG_UINT16(2); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; - OffsetNumber tupoffset = sampler->lt; + Node *pctnode; + float4 samplefract; - if (tupoffset == InvalidOffsetNumber) - tupoffset = FirstOffsetNumber; - else - tupoffset++; + /* Try to extract an estimate for the sample percentage */ + pctnode = (Node *) linitial(paramexprs); + pctnode = estimate_expression_value(root, pctnode); - if (tupoffset > maxoffset) - tupoffset = InvalidOffsetNumber; + if (IsA(pctnode, Const) && + !((Const *) pctnode)->constisnull) + { + samplefract = DatumGetFloat4(((Const *) pctnode)->constvalue); + if (samplefract >= 0 && samplefract <= 100 && !isnan(samplefract)) + samplefract /= 100.0f; + else + { + /* Default samplefract if the value is bogus */ + samplefract = 0.1f; + } + } + else + { + /* Default samplefract if we didn't obtain a non-null Const */ + samplefract = 0.1f; + } - sampler->lt = tupoffset; + /* We'll visit a sample of the pages ... */ + *pages = clamp_row_est(baserel->pages * samplefract); - PG_RETURN_UINT16(tupoffset); + /* ... and hopefully get a representative number of tuples from them */ + *tuples = clamp_row_est(baserel->tuples * samplefract); } /* - * Cleanup method. + * Initialize during executor setup. */ -Datum -tsm_system_end(PG_FUNCTION_ARGS) +static void +system_initsamplescan(SampleScanState *node, int eflags) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - - pfree(tsdesc->tsmdata); - - PG_RETURN_VOID(); + node->tsm_state = palloc0(sizeof(SystemSamplerData)); } /* - * Reset state (called by ReScan). + * Examine parameters and prepare for a sample scan. */ -Datum -tsm_system_reset(PG_FUNCTION_ARGS) +static void +system_beginsamplescan(SampleScanState *node, + Datum *params, + int nparams, + uint32 seed) { - TableSampleDesc *tsdesc = (TableSampleDesc *) PG_GETARG_POINTER(0); - SystemSamplerData *sampler = (SystemSamplerData *) tsdesc->tsmdata; + SystemSamplerData *sampler = (SystemSamplerData *) node->tsm_state; + double percent = DatumGetFloat4(params[0]); + if (percent < 0 || percent > 100 || isnan(percent)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), + errmsg("sample percentage must be between 0 and 100"))); + + /* + * The cutoff is sample probability times (PG_UINT32_MAX + 1); we have to + * store that as a uint64, of course. Note that this gives strictly + * correct behavior at the limits of zero or one probability. + */ + sampler->cutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); + sampler->seed = seed; + sampler->nextblock = 0; sampler->lt = InvalidOffsetNumber; - BlockSampler_Init(&sampler->bs, sampler->nblocks, sampler->samplesize, - sampler->seed); - PG_RETURN_VOID(); + /* + * Bulkread buffer access strategy probably makes sense unless we're + * scanning a very small fraction of the table. The 1% cutoff here is a + * guess. We should use pagemode visibility checking, since we scan all + * tuples on each selected page. + */ + node->use_bulkread = (percent >= 1); + node->use_pagemode = true; } /* - * Costing function. + * Select next block to sample. */ -Datum -tsm_system_cost(PG_FUNCTION_ARGS) +static BlockNumber +system_nextsampleblock(SampleScanState *node) { - PlannerInfo *root = (PlannerInfo *) PG_GETARG_POINTER(0); - Path *path = (Path *) PG_GETARG_POINTER(1); - RelOptInfo *baserel = (RelOptInfo *) PG_GETARG_POINTER(2); - List *args = (List *) PG_GETARG_POINTER(3); - BlockNumber *pages = (BlockNumber *) PG_GETARG_POINTER(4); - double *tuples = (double *) PG_GETARG_POINTER(5); - Node *pctnode; - float4 samplesize; + SystemSamplerData *sampler = (SystemSamplerData *) node->tsm_state; + HeapScanDesc scan = node->ss.ss_currentScanDesc; + BlockNumber nextblock = sampler->nextblock; + uint32 hashinput[2]; + + /* + * We compute the hash by applying hash_any to an array of 2 uint32's + * containing the block number and seed. This is efficient to set up, and + * with the current implementation of hash_any, it gives + * machine-independent results, which is a nice property for regression + * testing. + * + * These words in the hash input are the same throughout the block: + */ + hashinput[1] = sampler->seed; + + /* + * Loop over block numbers until finding suitable block or reaching end of + * relation. + */ + for (; nextblock < scan->rs_nblocks; nextblock++) + { + uint32 hash; - pctnode = linitial(args); - pctnode = estimate_expression_value(root, pctnode); + hashinput[0] = nextblock; - if (IsA(pctnode, RelabelType)) - pctnode = (Node *) ((RelabelType *) pctnode)->arg; + hash = DatumGetUInt32(hash_any((const unsigned char *) hashinput, + (int) sizeof(hashinput))); + if (hash < sampler->cutoff) + break; + } - if (IsA(pctnode, Const)) + if (nextblock < scan->rs_nblocks) { - samplesize = DatumGetFloat4(((Const *) pctnode)->constvalue); - samplesize /= 100.0; + /* Found a suitable block; remember where we should start next time */ + sampler->nextblock = nextblock + 1; + return nextblock; } + + /* Done, but let's reset nextblock to 0 for safety. */ + sampler->nextblock = 0; + return InvalidBlockNumber; +} + +/* + * Select next sampled tuple in current block. + * + * In block sampling, we just want to sample all the tuples in each selected + * block. + * + * It is OK here to return an offset without knowing if the tuple is visible + * (or even exists); nodeSamplescan.c will deal with that. + * + * When we reach end of the block, return InvalidOffsetNumber which tells + * SampleScan to go to next block. + */ +static OffsetNumber +system_nextsampletuple(SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset) +{ + SystemSamplerData *sampler = (SystemSamplerData *) node->tsm_state; + OffsetNumber tupoffset = sampler->lt; + + /* Advance to next possible offset on page */ + if (tupoffset == InvalidOffsetNumber) + tupoffset = FirstOffsetNumber; else - { - /* Default samplesize if the estimation didn't return Const. */ - samplesize = 0.1f; - } + tupoffset++; - *pages = baserel->pages * samplesize; - *tuples = path->rows * samplesize; - path->rows = *tuples; + /* Done? */ + if (tupoffset > maxoffset) + tupoffset = InvalidOffsetNumber; + + sampler->lt = tupoffset; - PG_RETURN_VOID(); + return tupoffset; } diff --git a/src/backend/access/tablesample/tablesample.c b/src/backend/access/tablesample/tablesample.c index f21d42c8e38ca..b8ad7ced743cb 100644 --- a/src/backend/access/tablesample/tablesample.c +++ b/src/backend/access/tablesample/tablesample.c @@ -1,7 +1,7 @@ /*------------------------------------------------------------------------- * * tablesample.c - * TABLESAMPLE internal API + * Support functions for TABLESAMPLE feature * * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -10,356 +10,31 @@ * IDENTIFICATION * src/backend/access/tablesample/tablesample.c * - * TABLESAMPLE is the SQL standard clause for sampling the relations. - * - * The API is interface between the Executor and the TABLESAMPLE Methods. - * - * TABLESAMPLE Methods are implementations of actual sampling algorithms which - * can be used for returning a sample of the source relation. - * Methods don't read the table directly but are asked for block number and - * tuple offset which they want to examine (or return) and the tablesample - * interface implemented here does the reading for them. - * - * We currently only support sampling of the physical relations, but in the - * future we might extend the API to support subqueries as well. - * * ------------------------------------------------------------------------- */ #include "postgres.h" -#include "access/tablesample.h" - -#include "catalog/pg_tablesample_method.h" -#include "miscadmin.h" -#include "pgstat.h" -#include "storage/bufmgr.h" -#include "storage/predicate.h" -#include "utils/rel.h" -#include "utils/tqual.h" - - -static bool SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan); - - -/* - * Initialize the TABLESAMPLE Descriptor and the TABLESAMPLE Method. - */ -TableSampleDesc * -tablesample_init(SampleScanState *scanstate, TableSampleClause *tablesample) -{ - FunctionCallInfoData fcinfo; - int i; - List *args = tablesample->args; - ListCell *arg; - ExprContext *econtext = scanstate->ss.ps.ps_ExprContext; - TableSampleDesc *tsdesc = (TableSampleDesc *) palloc0(sizeof(TableSampleDesc)); - - /* Load functions */ - fmgr_info(tablesample->tsminit, &(tsdesc->tsminit)); - fmgr_info(tablesample->tsmnextblock, &(tsdesc->tsmnextblock)); - fmgr_info(tablesample->tsmnexttuple, &(tsdesc->tsmnexttuple)); - if (OidIsValid(tablesample->tsmexaminetuple)) - fmgr_info(tablesample->tsmexaminetuple, &(tsdesc->tsmexaminetuple)); - else - tsdesc->tsmexaminetuple.fn_oid = InvalidOid; - fmgr_info(tablesample->tsmreset, &(tsdesc->tsmreset)); - fmgr_info(tablesample->tsmend, &(tsdesc->tsmend)); - - InitFunctionCallInfoData(fcinfo, &tsdesc->tsminit, - list_length(args) + 2, - InvalidOid, NULL, NULL); - - tsdesc->tupDesc = scanstate->ss.ss_ScanTupleSlot->tts_tupleDescriptor; - tsdesc->heapScan = scanstate->ss.ss_currentScanDesc; - - /* First argument for init function is always TableSampleDesc */ - fcinfo.arg[0] = PointerGetDatum(tsdesc); - fcinfo.argnull[0] = false; +#include "access/tsmapi.h" - /* - * Second arg for init function is always REPEATABLE. - * - * If tablesample->repeatable is NULL then REPEATABLE clause was not - * specified, and we insert a random value as default. - * - * When specified, the expression cannot evaluate to NULL. - */ - if (tablesample->repeatable) - { - ExprState *argstate = ExecInitExpr((Expr *) tablesample->repeatable, - (PlanState *) scanstate); - - fcinfo.arg[1] = ExecEvalExpr(argstate, econtext, - &fcinfo.argnull[1], NULL); - if (fcinfo.argnull[1]) - ereport(ERROR, - (errcode(ERRCODE_NULL_VALUE_NOT_ALLOWED), - errmsg("REPEATABLE clause must be NOT NULL numeric value"))); - } - else - { - fcinfo.arg[1] = UInt32GetDatum(random()); - fcinfo.argnull[1] = false; - } - - /* Rest of the arguments come from user. */ - i = 2; - foreach(arg, args) - { - Expr *argexpr = (Expr *) lfirst(arg); - ExprState *argstate = ExecInitExpr(argexpr, (PlanState *) scanstate); - - fcinfo.arg[i] = ExecEvalExpr(argstate, econtext, - &fcinfo.argnull[i], NULL); - i++; - } - Assert(i == fcinfo.nargs); - - (void) FunctionCallInvoke(&fcinfo); - - return tsdesc; -} /* - * Get next tuple from TABLESAMPLE Method. - */ -HeapTuple -tablesample_getnext(TableSampleDesc *desc) -{ - HeapScanDesc scan = desc->heapScan; - HeapTuple tuple = &(scan->rs_ctup); - bool pagemode = scan->rs_pageatatime; - BlockNumber blockno; - Page page; - bool page_all_visible; - ItemId itemid; - OffsetNumber tupoffset, - maxoffset; - - if (!scan->rs_inited) - { - /* - * return null immediately if relation is empty - */ - if (scan->rs_nblocks == 0) - { - Assert(!BufferIsValid(scan->rs_cbuf)); - tuple->t_data = NULL; - return NULL; - } - blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock, - PointerGetDatum(desc))); - if (!BlockNumberIsValid(blockno)) - { - tuple->t_data = NULL; - return NULL; - } - - heapgetpage(scan, blockno); - scan->rs_inited = true; - } - else - { - /* continue from previously returned page/tuple */ - blockno = scan->rs_cblock; /* current page */ - } - - /* - * When pagemode is disabled, the scan will do visibility checks for each - * tuple it finds so the buffer needs to be locked. - */ - if (!pagemode) - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); - - page = (Page) BufferGetPage(scan->rs_cbuf); - page_all_visible = PageIsAllVisible(page); - maxoffset = PageGetMaxOffsetNumber(page); - - for (;;) - { - CHECK_FOR_INTERRUPTS(); - - tupoffset = DatumGetUInt16(FunctionCall3(&desc->tsmnexttuple, - PointerGetDatum(desc), - UInt32GetDatum(blockno), - UInt16GetDatum(maxoffset))); - - if (OffsetNumberIsValid(tupoffset)) - { - bool visible; - bool found; - - /* Skip invalid tuple pointers. */ - itemid = PageGetItemId(page, tupoffset); - if (!ItemIdIsNormal(itemid)) - continue; - - tuple->t_data = (HeapTupleHeader) PageGetItem((Page) page, itemid); - tuple->t_len = ItemIdGetLength(itemid); - ItemPointerSet(&(tuple->t_self), blockno, tupoffset); - - if (page_all_visible) - visible = true; - else - visible = SampleTupleVisible(tuple, tupoffset, scan); - - /* - * Let the sampling method examine the actual tuple and decide if - * we should return it. - * - * Note that we let it examine even invisible tuples for - * statistical purposes, but not return them since user should - * never see invisible tuples. - */ - if (OidIsValid(desc->tsmexaminetuple.fn_oid)) - { - found = DatumGetBool(FunctionCall4(&desc->tsmexaminetuple, - PointerGetDatum(desc), - UInt32GetDatum(blockno), - PointerGetDatum(tuple), - BoolGetDatum(visible))); - /* Should not happen if sampling method is well written. */ - if (found && !visible) - elog(ERROR, "Sampling method wanted to return invisible tuple"); - } - else - found = visible; - - /* Found visible tuple, return it. */ - if (found) - { - if (!pagemode) - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); - break; - } - else - { - /* Try next tuple from same page. */ - continue; - } - } - - - if (!pagemode) - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); - - blockno = DatumGetInt32(FunctionCall1(&desc->tsmnextblock, - PointerGetDatum(desc))); - - /* - * Report our new scan position for synchronization purposes. We don't - * do that when moving backwards, however. That would just mess up any - * other forward-moving scanners. - * - * Note: we do this before checking for end of scan so that the final - * state of the position hint is back at the start of the rel. That's - * not strictly necessary, but otherwise when you run the same query - * multiple times the starting position would shift a little bit - * backwards on every invocation, which is confusing. We don't - * guarantee any specific ordering in general, though. - */ - if (scan->rs_syncscan) - ss_report_location(scan->rs_rd, BlockNumberIsValid(blockno) ? - blockno : scan->rs_startblock); - - /* - * Reached end of scan. - */ - if (!BlockNumberIsValid(blockno)) - { - if (BufferIsValid(scan->rs_cbuf)) - ReleaseBuffer(scan->rs_cbuf); - scan->rs_cbuf = InvalidBuffer; - scan->rs_cblock = InvalidBlockNumber; - tuple->t_data = NULL; - scan->rs_inited = false; - return NULL; - } - - heapgetpage(scan, blockno); - - if (!pagemode) - LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); - - page = (Page) BufferGetPage(scan->rs_cbuf); - page_all_visible = PageIsAllVisible(page); - maxoffset = PageGetMaxOffsetNumber(page); - } - - pgstat_count_heap_getnext(scan->rs_rd); - - return &(scan->rs_ctup); -} - -/* - * Reset the sampling to starting state - */ -void -tablesample_reset(TableSampleDesc *desc) -{ - (void) FunctionCall1(&desc->tsmreset, PointerGetDatum(desc)); -} - -/* - * Signal the sampling method that the scan has finished. - */ -void -tablesample_end(TableSampleDesc *desc) -{ - (void) FunctionCall1(&desc->tsmend, PointerGetDatum(desc)); -} - -/* - * Check visibility of the tuple. + * GetTsmRoutine --- get a TsmRoutine struct by invoking the handler. + * + * This is a convenience routine that's just meant to check for errors. */ -static bool -SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan) +TsmRoutine * +GetTsmRoutine(Oid tsmhandler) { - /* - * If this scan is reading whole pages at a time, there is already - * visibility info present in rs_vistuples so we can just search it for - * the tupoffset. - */ - if (scan->rs_pageatatime) - { - int start = 0, - end = scan->rs_ntuples - 1; - - /* - * Do the binary search over rs_vistuples, it's already sorted by - * OffsetNumber so we don't need to do any sorting ourselves here. - * - * We could use bsearch() here but it's slower for integers because of - * the function call overhead and because it needs boiler plate code - * it would not save us anything code-wise anyway. - */ - while (start <= end) - { - int mid = start + (end - start) / 2; - OffsetNumber curoffset = scan->rs_vistuples[mid]; - - if (curoffset == tupoffset) - return true; - else if (curoffset > tupoffset) - end = mid - 1; - else - start = mid + 1; - } - - return false; - } - else - { - /* No pagemode, we have to check the tuple itself. */ - Snapshot snapshot = scan->rs_snapshot; - Buffer buffer = scan->rs_cbuf; + Datum datum; + TsmRoutine *routine; - bool visible = HeapTupleSatisfiesVisibility(tuple, snapshot, buffer); + datum = OidFunctionCall1(tsmhandler, PointerGetDatum(NULL)); + routine = (TsmRoutine *) DatumGetPointer(datum); - CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, buffer, - snapshot); + if (routine == NULL || !IsA(routine, TsmRoutine)) + elog(ERROR, "tablesample handler function %u did not return a TsmRoutine struct", + tsmhandler); - return visible; - } + return routine; } diff --git a/src/backend/catalog/Makefile b/src/backend/catalog/Makefile index 3d1139b5ba0bf..25130ecf12480 100644 --- a/src/backend/catalog/Makefile +++ b/src/backend/catalog/Makefile @@ -40,8 +40,9 @@ POSTGRES_BKI_SRCS = $(addprefix $(top_srcdir)/src/include/catalog/,\ pg_ts_parser.h pg_ts_template.h pg_extension.h \ pg_foreign_data_wrapper.h pg_foreign_server.h pg_user_mapping.h \ pg_foreign_table.h pg_policy.h pg_replication_origin.h \ - pg_tablesample_method.h pg_default_acl.h pg_seclabel.h pg_shseclabel.h \ - pg_collation.h pg_range.h pg_transform.h toasting.h indexing.h \ + pg_default_acl.h pg_seclabel.h pg_shseclabel.h \ + pg_collation.h pg_range.h pg_transform.h \ + toasting.h indexing.h \ ) # location of Catalog.pm diff --git a/src/backend/catalog/dependency.c b/src/backend/catalog/dependency.c index 5d7c441739cec..90b1cd835f89e 100644 --- a/src/backend/catalog/dependency.c +++ b/src/backend/catalog/dependency.c @@ -1911,6 +1911,14 @@ find_expr_references_walker(Node *node, context->addrs); } } + else if (IsA(node, TableSampleClause)) + { + TableSampleClause *tsc = (TableSampleClause *) node; + + add_object_address(OCLASS_PROC, tsc->tsmhandler, 0, + context->addrs); + /* fall through to examine arguments */ + } return expression_tree_walker(node, find_expr_references_walker, (void *) context); diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index 0d1ecc2a3edbb..5d06fa4ea65c4 100644 --- a/src/backend/commands/explain.c +++ b/src/backend/commands/explain.c @@ -96,6 +96,8 @@ static void show_sort_group_keys(PlanState *planstate, const char *qlabel, List *ancestors, ExplainState *es); static void show_sortorder_options(StringInfo buf, Node *sortexpr, Oid sortOperator, Oid collation, bool nullsFirst); +static void show_tablesample(TableSampleClause *tsc, PlanState *planstate, + List *ancestors, ExplainState *es); static void show_sort_info(SortState *sortstate, ExplainState *es); static void show_hash_info(HashState *hashstate, ExplainState *es); static void show_tidbitmap_info(BitmapHeapScanState *planstate, @@ -116,7 +118,7 @@ static void ExplainMemberNodes(List *plans, PlanState **planstates, static void ExplainSubPlans(List *plans, List *ancestors, const char *relationship, ExplainState *es); static void ExplainCustomChildren(CustomScanState *css, - List *ancestors, ExplainState *es); + List *ancestors, ExplainState *es); static void ExplainProperty(const char *qlabel, const char *value, bool numeric, ExplainState *es); static void ExplainOpenGroup(const char *objtype, const char *labelname, @@ -730,6 +732,7 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used) switch (nodeTag(plan)) { case T_SeqScan: + case T_SampleScan: case T_IndexScan: case T_IndexOnlyScan: case T_BitmapHeapScan: @@ -739,7 +742,6 @@ ExplainPreScanNode(PlanState *planstate, Bitmapset **rels_used) case T_ValuesScan: case T_CteScan: case T_WorkTableScan: - case T_SampleScan: *rels_used = bms_add_member(*rels_used, ((Scan *) plan)->scanrelid); break; @@ -935,6 +937,9 @@ ExplainNode(PlanState *planstate, List *ancestors, case T_SeqScan: pname = sname = "Seq Scan"; break; + case T_SampleScan: + pname = sname = "Sample Scan"; + break; case T_IndexScan: pname = sname = "Index Scan"; break; @@ -976,23 +981,6 @@ ExplainNode(PlanState *planstate, List *ancestors, else pname = sname; break; - case T_SampleScan: - { - /* - * Fetch the tablesample method name from RTE. - * - * It would be nice to also show parameters, but since we - * support arbitrary expressions as parameter it might get - * quite messy. - */ - RangeTblEntry *rte; - - rte = rt_fetch(((SampleScan *) plan)->scanrelid, es->rtable); - custom_name = get_tablesample_method_name(rte->tablesample->tsmid); - pname = psprintf("Sample Scan (%s)", custom_name); - sname = "Sample Scan"; - } - break; case T_Material: pname = sname = "Materialize"; break; @@ -1101,6 +1089,7 @@ ExplainNode(PlanState *planstate, List *ancestors, switch (nodeTag(plan)) { case T_SeqScan: + case T_SampleScan: case T_BitmapHeapScan: case T_TidScan: case T_SubqueryScan: @@ -1115,9 +1104,6 @@ ExplainNode(PlanState *planstate, List *ancestors, if (((Scan *) plan)->scanrelid > 0) ExplainScanTarget((Scan *) plan, es); break; - case T_SampleScan: - ExplainScanTarget((Scan *) plan, es); - break; case T_IndexScan: { IndexScan *indexscan = (IndexScan *) plan; @@ -1363,12 +1349,15 @@ ExplainNode(PlanState *planstate, List *ancestors, if (es->analyze) show_tidbitmap_info((BitmapHeapScanState *) planstate, es); break; + case T_SampleScan: + show_tablesample(((SampleScan *) plan)->tablesample, + planstate, ancestors, es); + /* FALL THRU to print additional fields the same as SeqScan */ case T_SeqScan: case T_ValuesScan: case T_CteScan: case T_WorkTableScan: case T_SubqueryScan: - case T_SampleScan: show_scan_qual(plan->qual, "Filter", planstate, ancestors, es); if (plan->qual) show_instrumentation_count("Rows Removed by Filter", 1, @@ -2109,6 +2098,72 @@ show_sortorder_options(StringInfo buf, Node *sortexpr, } } +/* + * Show TABLESAMPLE properties + */ +static void +show_tablesample(TableSampleClause *tsc, PlanState *planstate, + List *ancestors, ExplainState *es) +{ + List *context; + bool useprefix; + char *method_name; + List *params = NIL; + char *repeatable; + ListCell *lc; + + /* Set up deparsing context */ + context = set_deparse_context_planstate(es->deparse_cxt, + (Node *) planstate, + ancestors); + useprefix = list_length(es->rtable) > 1; + + /* Get the tablesample method name */ + method_name = get_func_name(tsc->tsmhandler); + + /* Deparse parameter expressions */ + foreach(lc, tsc->args) + { + Node *arg = (Node *) lfirst(lc); + + params = lappend(params, + deparse_expression(arg, context, + useprefix, false)); + } + if (tsc->repeatable) + repeatable = deparse_expression((Node *) tsc->repeatable, context, + useprefix, false); + else + repeatable = NULL; + + /* Print results */ + if (es->format == EXPLAIN_FORMAT_TEXT) + { + bool first = true; + + appendStringInfoSpaces(es->str, es->indent * 2); + appendStringInfo(es->str, "Sampling: %s (", method_name); + foreach(lc, params) + { + if (!first) + appendStringInfoString(es->str, ", "); + appendStringInfoString(es->str, (const char *) lfirst(lc)); + first = false; + } + appendStringInfoChar(es->str, ')'); + if (repeatable) + appendStringInfo(es->str, " REPEATABLE (%s)", repeatable); + appendStringInfoChar(es->str, '\n'); + } + else + { + ExplainPropertyText("Sampling Method", method_name, es); + ExplainPropertyList("Sampling Parameters", params, es); + if (repeatable) + ExplainPropertyText("Repeatable Seed", repeatable, es); + } +} + /* * If it's EXPLAIN ANALYZE, show tuplesort stats for a sort node */ @@ -2366,13 +2421,13 @@ ExplainTargetRel(Plan *plan, Index rti, ExplainState *es) switch (nodeTag(plan)) { case T_SeqScan: + case T_SampleScan: case T_IndexScan: case T_IndexOnlyScan: case T_BitmapHeapScan: case T_TidScan: case T_ForeignScan: case T_CustomScan: - case T_SampleScan: case T_ModifyTable: /* Assert it's on a real relation */ Assert(rte->rtekind == RTE_RELATION); @@ -2663,9 +2718,9 @@ ExplainCustomChildren(CustomScanState *css, List *ancestors, ExplainState *es) { ListCell *cell; const char *label = - (list_length(css->custom_ps) != 1 ? "children" : "child"); + (list_length(css->custom_ps) != 1 ? "children" : "child"); - foreach (cell, css->custom_ps) + foreach(cell, css->custom_ps) ExplainNode((PlanState *) lfirst(cell), ancestors, label, NULL, es); } diff --git a/src/backend/executor/execAmi.c b/src/backend/executor/execAmi.c index 04073d3f9f916..93e1e9a691c50 100644 --- a/src/backend/executor/execAmi.c +++ b/src/backend/executor/execAmi.c @@ -463,6 +463,10 @@ ExecSupportsBackwardScan(Plan *node) case T_CteScan: return TargetListSupportsBackwardScan(node->targetlist); + case T_SampleScan: + /* Simplify life for tablesample methods by disallowing this */ + return false; + case T_IndexScan: return IndexSupportsBackwardScan(((IndexScan *) node)->indexid) && TargetListSupportsBackwardScan(node->targetlist); @@ -485,9 +489,6 @@ ExecSupportsBackwardScan(Plan *node) } return false; - case T_SampleScan: - return false; - case T_Material: case T_Sort: /* these don't evaluate tlist */ diff --git a/src/backend/executor/nodeSamplescan.c b/src/backend/executor/nodeSamplescan.c index 4c1c5237b7d20..dbe84b0baa868 100644 --- a/src/backend/executor/nodeSamplescan.c +++ b/src/backend/executor/nodeSamplescan.c @@ -3,7 +3,7 @@ * nodeSamplescan.c * Support routines for sample scans of relations (table sampling). * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * @@ -14,22 +14,23 @@ */ #include "postgres.h" -#include "access/tablesample.h" +#include "access/hash.h" +#include "access/relscan.h" +#include "access/tsmapi.h" #include "executor/executor.h" #include "executor/nodeSamplescan.h" #include "miscadmin.h" -#include "parser/parsetree.h" #include "pgstat.h" -#include "storage/bufmgr.h" #include "storage/predicate.h" #include "utils/rel.h" -#include "utils/syscache.h" #include "utils/tqual.h" -static void InitScanRelation(SampleScanState *node, EState *estate, - int eflags, TableSampleClause *tablesample); +static void InitScanRelation(SampleScanState *node, EState *estate, int eflags); static TupleTableSlot *SampleNext(SampleScanState *node); - +static void tablesample_init(SampleScanState *scanstate); +static HeapTuple tablesample_getnext(SampleScanState *scanstate); +static bool SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, + HeapScanDesc scan); /* ---------------------------------------------------------------- * Scan Support @@ -45,23 +46,26 @@ static TupleTableSlot *SampleNext(SampleScanState *node); static TupleTableSlot * SampleNext(SampleScanState *node) { - TupleTableSlot *slot; - TableSampleDesc *tsdesc; HeapTuple tuple; + TupleTableSlot *slot; /* - * get information from the scan state + * if this is first call within a scan, initialize */ - slot = node->ss.ss_ScanTupleSlot; - tsdesc = node->tsdesc; + if (!node->begun) + tablesample_init(node); + + /* + * get the next tuple, and store it in our result slot + */ + tuple = tablesample_getnext(node); - tuple = tablesample_getnext(tsdesc); + slot = node->ss.ss_ScanTupleSlot; if (tuple) ExecStoreTuple(tuple, /* tuple to store */ slot, /* slot to store in */ - tsdesc->heapScan->rs_cbuf, /* buffer associated - * with this tuple */ + node->ss.ss_currentScanDesc->rs_cbuf, /* tuple's buffer */ false); /* don't pfree this pointer */ else ExecClearTuple(slot); @@ -75,7 +79,10 @@ SampleNext(SampleScanState *node) static bool SampleRecheck(SampleScanState *node, TupleTableSlot *slot) { - /* No need to recheck for SampleScan */ + /* + * No need to recheck for SampleScan, since like SeqScan we don't pass any + * checkable keys to heap_beginscan. + */ return true; } @@ -103,8 +110,7 @@ ExecSampleScan(SampleScanState *node) * ---------------------------------------------------------------- */ static void -InitScanRelation(SampleScanState *node, EState *estate, int eflags, - TableSampleClause *tablesample) +InitScanRelation(SampleScanState *node, EState *estate, int eflags) { Relation currentRelation; @@ -113,19 +119,13 @@ InitScanRelation(SampleScanState *node, EState *estate, int eflags, * open that relation and acquire appropriate lock on it. */ currentRelation = ExecOpenScanRelation(estate, - ((SampleScan *) node->ss.ps.plan)->scanrelid, + ((SampleScan *) node->ss.ps.plan)->scan.scanrelid, eflags); node->ss.ss_currentRelation = currentRelation; - /* - * Even though we aren't going to do a conventional seqscan, it is useful - * to create a HeapScanDesc --- many of the fields in it are usable. - */ - node->ss.ss_currentScanDesc = - heap_beginscan_sampling(currentRelation, estate->es_snapshot, 0, NULL, - tablesample->tsmseqscan, - tablesample->tsmpagemode); + /* we won't set up the HeapScanDesc till later */ + node->ss.ss_currentScanDesc = NULL; /* and report the scan tuple slot's rowtype */ ExecAssignScanType(&node->ss, RelationGetDescr(currentRelation)); @@ -140,12 +140,11 @@ SampleScanState * ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) { SampleScanState *scanstate; - RangeTblEntry *rte = rt_fetch(node->scanrelid, - estate->es_range_table); + TableSampleClause *tsc = node->tablesample; + TsmRoutine *tsm; Assert(outerPlan(node) == NULL); Assert(innerPlan(node) == NULL); - Assert(rte->tablesample != NULL); /* * create state structure @@ -165,10 +164,17 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) * initialize child expressions */ scanstate->ss.ps.targetlist = (List *) - ExecInitExpr((Expr *) node->plan.targetlist, + ExecInitExpr((Expr *) node->scan.plan.targetlist, (PlanState *) scanstate); scanstate->ss.ps.qual = (List *) - ExecInitExpr((Expr *) node->plan.qual, + ExecInitExpr((Expr *) node->scan.plan.qual, + (PlanState *) scanstate); + + scanstate->args = (List *) + ExecInitExpr((Expr *) tsc->args, + (PlanState *) scanstate); + scanstate->repeatable = + ExecInitExpr(tsc->repeatable, (PlanState *) scanstate); /* @@ -180,7 +186,7 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) /* * initialize scan relation */ - InitScanRelation(scanstate, estate, eflags, rte->tablesample); + InitScanRelation(scanstate, estate, eflags); scanstate->ss.ps.ps_TupFromTlist = false; @@ -190,7 +196,25 @@ ExecInitSampleScan(SampleScan *node, EState *estate, int eflags) ExecAssignResultTypeFromTL(&scanstate->ss.ps); ExecAssignScanProjectionInfo(&scanstate->ss); - scanstate->tsdesc = tablesample_init(scanstate, rte->tablesample); + /* + * If we don't have a REPEATABLE clause, select a random seed. We want to + * do this just once, since the seed shouldn't change over rescans. + */ + if (tsc->repeatable == NULL) + scanstate->seed = random(); + + /* + * Finally, initialize the TABLESAMPLE method handler. + */ + tsm = GetTsmRoutine(tsc->tsmhandler); + scanstate->tsmroutine = tsm; + scanstate->tsm_state = NULL; + + if (tsm->InitSampleScan) + tsm->InitSampleScan(scanstate, eflags); + + /* We'll do BeginSampleScan later; we can't evaluate params yet */ + scanstate->begun = false; return scanstate; } @@ -207,7 +231,8 @@ ExecEndSampleScan(SampleScanState *node) /* * Tell sampling function that we finished the scan. */ - tablesample_end(node->tsdesc); + if (node->tsmroutine->EndSampleScan) + node->tsmroutine->EndSampleScan(node); /* * Free the exprcontext @@ -223,7 +248,8 @@ ExecEndSampleScan(SampleScanState *node) /* * close heap scan */ - heap_endscan(node->ss.ss_currentScanDesc); + if (node->ss.ss_currentScanDesc) + heap_endscan(node->ss.ss_currentScanDesc); /* * close the heap relation. @@ -231,11 +257,6 @@ ExecEndSampleScan(SampleScanState *node) ExecCloseScanRelation(node->ss.ss_currentRelation); } -/* ---------------------------------------------------------------- - * Join Support - * ---------------------------------------------------------------- - */ - /* ---------------------------------------------------------------- * ExecReScanSampleScan * @@ -246,12 +267,336 @@ ExecEndSampleScan(SampleScanState *node) void ExecReScanSampleScan(SampleScanState *node) { - heap_rescan(node->ss.ss_currentScanDesc, NULL); + /* Remember we need to do BeginSampleScan again (if we did it at all) */ + node->begun = false; + + ExecScanReScan(&node->ss); +} + + +/* + * Initialize the TABLESAMPLE method: evaluate params and call BeginSampleScan. + */ +static void +tablesample_init(SampleScanState *scanstate) +{ + TsmRoutine *tsm = scanstate->tsmroutine; + ExprContext *econtext = scanstate->ss.ps.ps_ExprContext; + Datum *params; + Datum datum; + bool isnull; + uint32 seed; + bool allow_sync; + int i; + ListCell *arg; + + params = (Datum *) palloc(list_length(scanstate->args) * sizeof(Datum)); + + i = 0; + foreach(arg, scanstate->args) + { + ExprState *argstate = (ExprState *) lfirst(arg); + + params[i] = ExecEvalExprSwitchContext(argstate, + econtext, + &isnull, + NULL); + if (isnull) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), + errmsg("TABLESAMPLE parameter cannot be null"))); + i++; + } + + if (scanstate->repeatable) + { + datum = ExecEvalExprSwitchContext(scanstate->repeatable, + econtext, + &isnull, + NULL); + if (isnull) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLESAMPLE_REPEAT), + errmsg("TABLESAMPLE REPEATABLE parameter cannot be null"))); + + /* + * The REPEATABLE parameter has been coerced to float8 by the parser. + * The reason for using float8 at the SQL level is that it will + * produce unsurprising results both for users used to databases that + * accept only integers in the REPEATABLE clause and for those who + * might expect that REPEATABLE works like setseed() (a float in the + * range from -1 to 1). + * + * We use hashfloat8() to convert the supplied value into a suitable + * seed. For regression-testing purposes, that has the convenient + * property that REPEATABLE(0) gives a machine-independent result. + */ + seed = DatumGetUInt32(DirectFunctionCall1(hashfloat8, datum)); + } + else + { + /* Use the seed selected by ExecInitSampleScan */ + seed = scanstate->seed; + } + + /* Set default values for params that BeginSampleScan can adjust */ + scanstate->use_bulkread = true; + scanstate->use_pagemode = true; + + /* Let tablesample method do its thing */ + tsm->BeginSampleScan(scanstate, + params, + list_length(scanstate->args), + seed); + + /* We'll use syncscan if there's no NextSampleBlock function */ + allow_sync = (tsm->NextSampleBlock == NULL); + + /* Now we can create or reset the HeapScanDesc */ + if (scanstate->ss.ss_currentScanDesc == NULL) + { + scanstate->ss.ss_currentScanDesc = + heap_beginscan_sampling(scanstate->ss.ss_currentRelation, + scanstate->ss.ps.state->es_snapshot, + 0, NULL, + scanstate->use_bulkread, + allow_sync, + scanstate->use_pagemode); + } + else + { + heap_rescan_set_params(scanstate->ss.ss_currentScanDesc, NULL, + scanstate->use_bulkread, + allow_sync, + scanstate->use_pagemode); + } + + pfree(params); + + /* And we're initialized. */ + scanstate->begun = true; +} + +/* + * Get next tuple from TABLESAMPLE method. + * + * Note: an awful lot of this is copied-and-pasted from heapam.c. It would + * perhaps be better to refactor to share more code. + */ +static HeapTuple +tablesample_getnext(SampleScanState *scanstate) +{ + TsmRoutine *tsm = scanstate->tsmroutine; + HeapScanDesc scan = scanstate->ss.ss_currentScanDesc; + HeapTuple tuple = &(scan->rs_ctup); + Snapshot snapshot = scan->rs_snapshot; + bool pagemode = scan->rs_pageatatime; + BlockNumber blockno; + Page page; + bool all_visible; + OffsetNumber maxoffset; + + if (!scan->rs_inited) + { + /* + * return null immediately if relation is empty + */ + if (scan->rs_nblocks == 0) + { + Assert(!BufferIsValid(scan->rs_cbuf)); + tuple->t_data = NULL; + return NULL; + } + if (tsm->NextSampleBlock) + { + blockno = tsm->NextSampleBlock(scanstate); + if (!BlockNumberIsValid(blockno)) + { + tuple->t_data = NULL; + return NULL; + } + } + else + blockno = scan->rs_startblock; + Assert(blockno < scan->rs_nblocks); + heapgetpage(scan, blockno); + scan->rs_inited = true; + } + else + { + /* continue from previously returned page/tuple */ + blockno = scan->rs_cblock; /* current page */ + } /* - * Tell sampling function to reset its state for rescan. + * When not using pagemode, we must lock the buffer during tuple + * visibility checks. */ - tablesample_reset(node->tsdesc); + if (!pagemode) + LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); + + page = (Page) BufferGetPage(scan->rs_cbuf); + all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery; + maxoffset = PageGetMaxOffsetNumber(page); + + for (;;) + { + OffsetNumber tupoffset; + bool finished; + + CHECK_FOR_INTERRUPTS(); + + /* Ask the tablesample method which tuples to check on this page. */ + tupoffset = tsm->NextSampleTuple(scanstate, + blockno, + maxoffset); + + if (OffsetNumberIsValid(tupoffset)) + { + ItemId itemid; + bool visible; + + /* Skip invalid tuple pointers. */ + itemid = PageGetItemId(page, tupoffset); + if (!ItemIdIsNormal(itemid)) + continue; + + tuple->t_data = (HeapTupleHeader) PageGetItem(page, itemid); + tuple->t_len = ItemIdGetLength(itemid); + ItemPointerSet(&(tuple->t_self), blockno, tupoffset); + + if (all_visible) + visible = true; + else + visible = SampleTupleVisible(tuple, tupoffset, scan); + + /* in pagemode, heapgetpage did this for us */ + if (!pagemode) + CheckForSerializableConflictOut(visible, scan->rs_rd, tuple, + scan->rs_cbuf, snapshot); + + if (visible) + { + /* Found visible tuple, return it. */ + if (!pagemode) + LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); + break; + } + else + { + /* Try next tuple from same page. */ + continue; + } + } + + /* + * if we get here, it means we've exhausted the items on this page and + * it's time to move to the next. + */ + if (!pagemode) + LockBuffer(scan->rs_cbuf, BUFFER_LOCK_UNLOCK); + + if (tsm->NextSampleBlock) + { + blockno = tsm->NextSampleBlock(scanstate); + Assert(!scan->rs_syncscan); + finished = !BlockNumberIsValid(blockno); + } + else + { + /* Without NextSampleBlock, just do a plain forward seqscan. */ + blockno++; + if (blockno >= scan->rs_nblocks) + blockno = 0; + + /* + * Report our new scan position for synchronization purposes. + * + * Note: we do this before checking for end of scan so that the + * final state of the position hint is back at the start of the + * rel. That's not strictly necessary, but otherwise when you run + * the same query multiple times the starting position would shift + * a little bit backwards on every invocation, which is confusing. + * We don't guarantee any specific ordering in general, though. + */ + if (scan->rs_syncscan) + ss_report_location(scan->rs_rd, blockno); + + finished = (blockno == scan->rs_startblock); + } + + /* + * Reached end of scan? + */ + if (finished) + { + if (BufferIsValid(scan->rs_cbuf)) + ReleaseBuffer(scan->rs_cbuf); + scan->rs_cbuf = InvalidBuffer; + scan->rs_cblock = InvalidBlockNumber; + tuple->t_data = NULL; + scan->rs_inited = false; + return NULL; + } + + Assert(blockno < scan->rs_nblocks); + heapgetpage(scan, blockno); + + /* Re-establish state for new page */ + if (!pagemode) + LockBuffer(scan->rs_cbuf, BUFFER_LOCK_SHARE); + + page = (Page) BufferGetPage(scan->rs_cbuf); + all_visible = PageIsAllVisible(page) && !snapshot->takenDuringRecovery; + maxoffset = PageGetMaxOffsetNumber(page); + } + + /* Count successfully-fetched tuples as heap fetches */ + pgstat_count_heap_getnext(scan->rs_rd); + + return &(scan->rs_ctup); +} - ExecScanReScan(&node->ss); +/* + * Check visibility of the tuple. + */ +static bool +SampleTupleVisible(HeapTuple tuple, OffsetNumber tupoffset, HeapScanDesc scan) +{ + if (scan->rs_pageatatime) + { + /* + * In pageatatime mode, heapgetpage() already did visibility checks, + * so just look at the info it left in rs_vistuples[]. + * + * We use a binary search over the known-sorted array. Note: we could + * save some effort if we insisted that NextSampleTuple select tuples + * in increasing order, but it's not clear that there would be enough + * gain to justify the restriction. + */ + int start = 0, + end = scan->rs_ntuples - 1; + + while (start <= end) + { + int mid = (start + end) / 2; + OffsetNumber curoffset = scan->rs_vistuples[mid]; + + if (tupoffset == curoffset) + return true; + else if (tupoffset < curoffset) + end = mid - 1; + else + start = mid + 1; + } + + return false; + } + else + { + /* Otherwise, we have to check the tuple individually. */ + return HeapTupleSatisfiesVisibility(tuple, + scan->rs_snapshot, + scan->rs_cbuf); + } } diff --git a/src/backend/nodes/copyfuncs.c b/src/backend/nodes/copyfuncs.c index 4c363d3d39a9e..ea9bdccab8e30 100644 --- a/src/backend/nodes/copyfuncs.c +++ b/src/backend/nodes/copyfuncs.c @@ -359,6 +359,27 @@ _copySeqScan(const SeqScan *from) return newnode; } +/* + * _copySampleScan + */ +static SampleScan * +_copySampleScan(const SampleScan *from) +{ + SampleScan *newnode = makeNode(SampleScan); + + /* + * copy node superclass fields + */ + CopyScanFields((const Scan *) from, (Scan *) newnode); + + /* + * copy remainder of node + */ + COPY_NODE_FIELD(tablesample); + + return newnode; +} + /* * _copyIndexScan */ @@ -641,22 +662,6 @@ _copyCustomScan(const CustomScan *from) return newnode; } -/* - * _copySampleScan - */ -static SampleScan * -_copySampleScan(const SampleScan *from) -{ - SampleScan *newnode = makeNode(SampleScan); - - /* - * copy node superclass fields - */ - CopyScanFields((const Scan *) from, (Scan *) newnode); - - return newnode; -} - /* * CopyJoinFields * @@ -2143,6 +2148,18 @@ _copyRangeTblFunction(const RangeTblFunction *from) return newnode; } +static TableSampleClause * +_copyTableSampleClause(const TableSampleClause *from) +{ + TableSampleClause *newnode = makeNode(TableSampleClause); + + COPY_SCALAR_FIELD(tsmhandler); + COPY_NODE_FIELD(args); + COPY_NODE_FIELD(repeatable); + + return newnode; +} + static WithCheckOption * _copyWithCheckOption(const WithCheckOption *from) { @@ -2271,40 +2288,6 @@ _copyCommonTableExpr(const CommonTableExpr *from) return newnode; } -static RangeTableSample * -_copyRangeTableSample(const RangeTableSample *from) -{ - RangeTableSample *newnode = makeNode(RangeTableSample); - - COPY_NODE_FIELD(relation); - COPY_STRING_FIELD(method); - COPY_NODE_FIELD(repeatable); - COPY_NODE_FIELD(args); - - return newnode; -} - -static TableSampleClause * -_copyTableSampleClause(const TableSampleClause *from) -{ - TableSampleClause *newnode = makeNode(TableSampleClause); - - COPY_SCALAR_FIELD(tsmid); - COPY_SCALAR_FIELD(tsmseqscan); - COPY_SCALAR_FIELD(tsmpagemode); - COPY_SCALAR_FIELD(tsminit); - COPY_SCALAR_FIELD(tsmnextblock); - COPY_SCALAR_FIELD(tsmnexttuple); - COPY_SCALAR_FIELD(tsmexaminetuple); - COPY_SCALAR_FIELD(tsmend); - COPY_SCALAR_FIELD(tsmreset); - COPY_SCALAR_FIELD(tsmcost); - COPY_NODE_FIELD(repeatable); - COPY_NODE_FIELD(args); - - return newnode; -} - static A_Expr * _copyAExpr(const A_Expr *from) { @@ -2532,6 +2515,20 @@ _copyRangeFunction(const RangeFunction *from) return newnode; } +static RangeTableSample * +_copyRangeTableSample(const RangeTableSample *from) +{ + RangeTableSample *newnode = makeNode(RangeTableSample); + + COPY_NODE_FIELD(relation); + COPY_NODE_FIELD(method); + COPY_NODE_FIELD(args); + COPY_NODE_FIELD(repeatable); + COPY_LOCATION_FIELD(location); + + return newnode; +} + static TypeCast * _copyTypeCast(const TypeCast *from) { @@ -4225,6 +4222,9 @@ copyObject(const void *from) case T_SeqScan: retval = _copySeqScan(from); break; + case T_SampleScan: + retval = _copySampleScan(from); + break; case T_IndexScan: retval = _copyIndexScan(from); break; @@ -4261,9 +4261,6 @@ copyObject(const void *from) case T_CustomScan: retval = _copyCustomScan(from); break; - case T_SampleScan: - retval = _copySampleScan(from); - break; case T_Join: retval = _copyJoin(from); break; @@ -4882,6 +4879,9 @@ copyObject(const void *from) case T_RangeFunction: retval = _copyRangeFunction(from); break; + case T_RangeTableSample: + retval = _copyRangeTableSample(from); + break; case T_TypeName: retval = _copyTypeName(from); break; @@ -4906,6 +4906,9 @@ copyObject(const void *from) case T_RangeTblFunction: retval = _copyRangeTblFunction(from); break; + case T_TableSampleClause: + retval = _copyTableSampleClause(from); + break; case T_WithCheckOption: retval = _copyWithCheckOption(from); break; @@ -4933,12 +4936,6 @@ copyObject(const void *from) case T_CommonTableExpr: retval = _copyCommonTableExpr(from); break; - case T_RangeTableSample: - retval = _copyRangeTableSample(from); - break; - case T_TableSampleClause: - retval = _copyTableSampleClause(from); - break; case T_FuncWithArgs: retval = _copyFuncWithArgs(from); break; diff --git a/src/backend/nodes/equalfuncs.c b/src/backend/nodes/equalfuncs.c index f19251e7c415f..514c964277326 100644 --- a/src/backend/nodes/equalfuncs.c +++ b/src/backend/nodes/equalfuncs.c @@ -2280,6 +2280,18 @@ _equalRangeFunction(const RangeFunction *a, const RangeFunction *b) return true; } +static bool +_equalRangeTableSample(const RangeTableSample *a, const RangeTableSample *b) +{ + COMPARE_NODE_FIELD(relation); + COMPARE_NODE_FIELD(method); + COMPARE_NODE_FIELD(args); + COMPARE_NODE_FIELD(repeatable); + COMPARE_LOCATION_FIELD(location); + + return true; +} + static bool _equalIndexElem(const IndexElem *a, const IndexElem *b) { @@ -2418,6 +2430,16 @@ _equalRangeTblFunction(const RangeTblFunction *a, const RangeTblFunction *b) return true; } +static bool +_equalTableSampleClause(const TableSampleClause *a, const TableSampleClause *b) +{ + COMPARE_SCALAR_FIELD(tsmhandler); + COMPARE_NODE_FIELD(args); + COMPARE_NODE_FIELD(repeatable); + + return true; +} + static bool _equalWithCheckOption(const WithCheckOption *a, const WithCheckOption *b) { @@ -2528,36 +2550,6 @@ _equalCommonTableExpr(const CommonTableExpr *a, const CommonTableExpr *b) return true; } -static bool -_equalRangeTableSample(const RangeTableSample *a, const RangeTableSample *b) -{ - COMPARE_NODE_FIELD(relation); - COMPARE_STRING_FIELD(method); - COMPARE_NODE_FIELD(repeatable); - COMPARE_NODE_FIELD(args); - - return true; -} - -static bool -_equalTableSampleClause(const TableSampleClause *a, const TableSampleClause *b) -{ - COMPARE_SCALAR_FIELD(tsmid); - COMPARE_SCALAR_FIELD(tsmseqscan); - COMPARE_SCALAR_FIELD(tsmpagemode); - COMPARE_SCALAR_FIELD(tsminit); - COMPARE_SCALAR_FIELD(tsmnextblock); - COMPARE_SCALAR_FIELD(tsmnexttuple); - COMPARE_SCALAR_FIELD(tsmexaminetuple); - COMPARE_SCALAR_FIELD(tsmend); - COMPARE_SCALAR_FIELD(tsmreset); - COMPARE_SCALAR_FIELD(tsmcost); - COMPARE_NODE_FIELD(repeatable); - COMPARE_NODE_FIELD(args); - - return true; -} - static bool _equalXmlSerialize(const XmlSerialize *a, const XmlSerialize *b) { @@ -3247,6 +3239,9 @@ equal(const void *a, const void *b) case T_RangeFunction: retval = _equalRangeFunction(a, b); break; + case T_RangeTableSample: + retval = _equalRangeTableSample(a, b); + break; case T_TypeName: retval = _equalTypeName(a, b); break; @@ -3271,6 +3266,9 @@ equal(const void *a, const void *b) case T_RangeTblFunction: retval = _equalRangeTblFunction(a, b); break; + case T_TableSampleClause: + retval = _equalTableSampleClause(a, b); + break; case T_WithCheckOption: retval = _equalWithCheckOption(a, b); break; @@ -3298,12 +3296,6 @@ equal(const void *a, const void *b) case T_CommonTableExpr: retval = _equalCommonTableExpr(a, b); break; - case T_RangeTableSample: - retval = _equalRangeTableSample(a, b); - break; - case T_TableSampleClause: - retval = _equalTableSampleClause(a, b); - break; case T_FuncWithArgs: retval = _equalFuncWithArgs(a, b); break; diff --git a/src/backend/nodes/nodeFuncs.c b/src/backend/nodes/nodeFuncs.c index b1e3e6e489320..c517dfd9d69c6 100644 --- a/src/backend/nodes/nodeFuncs.c +++ b/src/backend/nodes/nodeFuncs.c @@ -1486,6 +1486,9 @@ exprLocation(const Node *expr) case T_WindowDef: loc = ((const WindowDef *) expr)->location; break; + case T_RangeTableSample: + loc = ((const RangeTableSample *) expr)->location; + break; case T_TypeName: loc = ((const TypeName *) expr)->location; break; @@ -1995,6 +1998,17 @@ expression_tree_walker(Node *node, return walker(((PlaceHolderInfo *) node)->ph_var, context); case T_RangeTblFunction: return walker(((RangeTblFunction *) node)->funcexpr, context); + case T_TableSampleClause: + { + TableSampleClause *tsc = (TableSampleClause *) node; + + if (expression_tree_walker((Node *) tsc->args, + walker, context)) + return true; + if (walker((Node *) tsc->repeatable, context)) + return true; + } + break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); @@ -2082,13 +2096,8 @@ range_table_walker(List *rtable, switch (rte->rtekind) { case RTE_RELATION: - if (rte->tablesample) - { - if (walker(rte->tablesample->args, context)) - return true; - if (walker(rte->tablesample->repeatable, context)) - return true; - } + if (walker(rte->tablesample, context)) + return true; break; case RTE_CTE: /* nothing to do */ @@ -2782,6 +2791,17 @@ expression_tree_mutator(Node *node, return (Node *) newnode; } break; + case T_TableSampleClause: + { + TableSampleClause *tsc = (TableSampleClause *) node; + TableSampleClause *newnode; + + FLATCOPY(newnode, tsc, TableSampleClause); + MUTATE(newnode->args, tsc->args, List *); + MUTATE(newnode->repeatable, tsc->repeatable, Expr *); + return (Node *) newnode; + } + break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); @@ -2868,20 +2888,12 @@ range_table_mutator(List *rtable, switch (rte->rtekind) { case RTE_RELATION: - if (rte->tablesample) - { - CHECKFLATCOPY(newrte->tablesample, rte->tablesample, - TableSampleClause); - MUTATE(newrte->tablesample->args, - newrte->tablesample->args, - List *); - MUTATE(newrte->tablesample->repeatable, - newrte->tablesample->repeatable, - Node *); - } + MUTATE(newrte->tablesample, rte->tablesample, + TableSampleClause *); + /* we don't bother to copy eref, aliases, etc; OK? */ break; case RTE_CTE: - /* we don't bother to copy eref, aliases, etc; OK? */ + /* nothing to do */ break; case RTE_SUBQUERY: if (!(flags & QTW_IGNORE_RT_SUBQUERIES)) @@ -3316,6 +3328,19 @@ raw_expression_tree_walker(Node *node, return true; } break; + case T_RangeTableSample: + { + RangeTableSample *rts = (RangeTableSample *) node; + + if (walker(rts->relation, context)) + return true; + /* method name is deemed uninteresting */ + if (walker(rts->args, context)) + return true; + if (walker(rts->repeatable, context)) + return true; + } + break; case T_TypeName: { TypeName *tn = (TypeName *) node; @@ -3380,18 +3405,6 @@ raw_expression_tree_walker(Node *node, break; case T_CommonTableExpr: return walker(((CommonTableExpr *) node)->ctequery, context); - case T_RangeTableSample: - { - RangeTableSample *rts = (RangeTableSample *) node; - - if (walker(rts->relation, context)) - return true; - if (walker(rts->repeatable, context)) - return true; - if (walker(rts->args, context)) - return true; - } - break; default: elog(ERROR, "unrecognized node type: %d", (int) nodeTag(node)); diff --git a/src/backend/nodes/outfuncs.c b/src/backend/nodes/outfuncs.c index 87304ba9bf65d..81725d6e59a20 100644 --- a/src/backend/nodes/outfuncs.c +++ b/src/backend/nodes/outfuncs.c @@ -444,6 +444,16 @@ _outSeqScan(StringInfo str, const SeqScan *node) _outScanInfo(str, (const Scan *) node); } +static void +_outSampleScan(StringInfo str, const SampleScan *node) +{ + WRITE_NODE_TYPE("SAMPLESCAN"); + + _outScanInfo(str, (const Scan *) node); + + WRITE_NODE_FIELD(tablesample); +} + static void _outIndexScan(StringInfo str, const IndexScan *node) { @@ -591,14 +601,6 @@ _outCustomScan(StringInfo str, const CustomScan *node) node->methods->TextOutCustomScan(str, node); } -static void -_outSampleScan(StringInfo str, const SampleScan *node) -{ - WRITE_NODE_TYPE("SAMPLESCAN"); - - _outScanInfo(str, (const Scan *) node); -} - static void _outJoin(StringInfo str, const Join *node) { @@ -2478,36 +2480,6 @@ _outCommonTableExpr(StringInfo str, const CommonTableExpr *node) WRITE_NODE_FIELD(ctecolcollations); } -static void -_outRangeTableSample(StringInfo str, const RangeTableSample *node) -{ - WRITE_NODE_TYPE("RANGETABLESAMPLE"); - - WRITE_NODE_FIELD(relation); - WRITE_STRING_FIELD(method); - WRITE_NODE_FIELD(repeatable); - WRITE_NODE_FIELD(args); -} - -static void -_outTableSampleClause(StringInfo str, const TableSampleClause *node) -{ - WRITE_NODE_TYPE("TABLESAMPLECLAUSE"); - - WRITE_OID_FIELD(tsmid); - WRITE_BOOL_FIELD(tsmseqscan); - WRITE_BOOL_FIELD(tsmpagemode); - WRITE_OID_FIELD(tsminit); - WRITE_OID_FIELD(tsmnextblock); - WRITE_OID_FIELD(tsmnexttuple); - WRITE_OID_FIELD(tsmexaminetuple); - WRITE_OID_FIELD(tsmend); - WRITE_OID_FIELD(tsmreset); - WRITE_OID_FIELD(tsmcost); - WRITE_NODE_FIELD(repeatable); - WRITE_NODE_FIELD(args); -} - static void _outSetOperationStmt(StringInfo str, const SetOperationStmt *node) { @@ -2594,6 +2566,16 @@ _outRangeTblFunction(StringInfo str, const RangeTblFunction *node) WRITE_BITMAPSET_FIELD(funcparams); } +static void +_outTableSampleClause(StringInfo str, const TableSampleClause *node) +{ + WRITE_NODE_TYPE("TABLESAMPLECLAUSE"); + + WRITE_OID_FIELD(tsmhandler); + WRITE_NODE_FIELD(args); + WRITE_NODE_FIELD(repeatable); +} + static void _outAExpr(StringInfo str, const A_Expr *node) { @@ -2845,6 +2827,18 @@ _outRangeFunction(StringInfo str, const RangeFunction *node) WRITE_NODE_FIELD(coldeflist); } +static void +_outRangeTableSample(StringInfo str, const RangeTableSample *node) +{ + WRITE_NODE_TYPE("RANGETABLESAMPLE"); + + WRITE_NODE_FIELD(relation); + WRITE_NODE_FIELD(method); + WRITE_NODE_FIELD(args); + WRITE_NODE_FIELD(repeatable); + WRITE_LOCATION_FIELD(location); +} + static void _outConstraint(StringInfo str, const Constraint *node) { @@ -3002,6 +2996,9 @@ _outNode(StringInfo str, const void *obj) case T_SeqScan: _outSeqScan(str, obj); break; + case T_SampleScan: + _outSampleScan(str, obj); + break; case T_IndexScan: _outIndexScan(str, obj); break; @@ -3038,9 +3035,6 @@ _outNode(StringInfo str, const void *obj) case T_CustomScan: _outCustomScan(str, obj); break; - case T_SampleScan: - _outSampleScan(str, obj); - break; case T_Join: _outJoin(str, obj); break; @@ -3393,12 +3387,6 @@ _outNode(StringInfo str, const void *obj) case T_CommonTableExpr: _outCommonTableExpr(str, obj); break; - case T_RangeTableSample: - _outRangeTableSample(str, obj); - break; - case T_TableSampleClause: - _outTableSampleClause(str, obj); - break; case T_SetOperationStmt: _outSetOperationStmt(str, obj); break; @@ -3408,6 +3396,9 @@ _outNode(StringInfo str, const void *obj) case T_RangeTblFunction: _outRangeTblFunction(str, obj); break; + case T_TableSampleClause: + _outTableSampleClause(str, obj); + break; case T_A_Expr: _outAExpr(str, obj); break; @@ -3450,6 +3441,9 @@ _outNode(StringInfo str, const void *obj) case T_RangeFunction: _outRangeFunction(str, obj); break; + case T_RangeTableSample: + _outRangeTableSample(str, obj); + break; case T_Constraint: _outConstraint(str, obj); break; diff --git a/src/backend/nodes/readfuncs.c b/src/backend/nodes/readfuncs.c index f5a40fbfb44b8..71be840eac9f7 100644 --- a/src/backend/nodes/readfuncs.c +++ b/src/backend/nodes/readfuncs.c @@ -367,46 +367,6 @@ _readCommonTableExpr(void) READ_DONE(); } -/* - * _readRangeTableSample - */ -static RangeTableSample * -_readRangeTableSample(void) -{ - READ_LOCALS(RangeTableSample); - - READ_NODE_FIELD(relation); - READ_STRING_FIELD(method); - READ_NODE_FIELD(repeatable); - READ_NODE_FIELD(args); - - READ_DONE(); -} - -/* - * _readTableSampleClause - */ -static TableSampleClause * -_readTableSampleClause(void) -{ - READ_LOCALS(TableSampleClause); - - READ_OID_FIELD(tsmid); - READ_BOOL_FIELD(tsmseqscan); - READ_BOOL_FIELD(tsmpagemode); - READ_OID_FIELD(tsminit); - READ_OID_FIELD(tsmnextblock); - READ_OID_FIELD(tsmnexttuple); - READ_OID_FIELD(tsmexaminetuple); - READ_OID_FIELD(tsmend); - READ_OID_FIELD(tsmreset); - READ_OID_FIELD(tsmcost); - READ_NODE_FIELD(repeatable); - READ_NODE_FIELD(args); - - READ_DONE(); -} - /* * _readSetOperationStmt */ @@ -1391,6 +1351,21 @@ _readRangeTblFunction(void) READ_DONE(); } +/* + * _readTableSampleClause + */ +static TableSampleClause * +_readTableSampleClause(void) +{ + READ_LOCALS(TableSampleClause); + + READ_OID_FIELD(tsmhandler); + READ_NODE_FIELD(args); + READ_NODE_FIELD(repeatable); + + READ_DONE(); +} + /* * parseNodeString @@ -1426,10 +1401,6 @@ parseNodeString(void) return_value = _readRowMarkClause(); else if (MATCH("COMMONTABLEEXPR", 15)) return_value = _readCommonTableExpr(); - else if (MATCH("RANGETABLESAMPLE", 16)) - return_value = _readRangeTableSample(); - else if (MATCH("TABLESAMPLECLAUSE", 17)) - return_value = _readTableSampleClause(); else if (MATCH("SETOPERATIONSTMT", 16)) return_value = _readSetOperationStmt(); else if (MATCH("ALIAS", 5)) @@ -1528,6 +1499,8 @@ parseNodeString(void) return_value = _readRangeTblEntry(); else if (MATCH("RANGETBLFUNCTION", 16)) return_value = _readRangeTblFunction(); + else if (MATCH("TABLESAMPLECLAUSE", 17)) + return_value = _readTableSampleClause(); else if (MATCH("NOTIFY", 6)) return_value = _readNotifyStmt(); else if (MATCH("DECLARECURSOR", 13)) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 888eeac515184..1590be1167508 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -18,6 +18,7 @@ #include #include "access/sysattr.h" +#include "access/tsmapi.h" #include "catalog/pg_class.h" #include "catalog/pg_operator.h" #include "foreign/fdwapi.h" @@ -390,7 +391,7 @@ set_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, } else if (rte->tablesample != NULL) { - /* Build sample scan on relation */ + /* Sampled relation */ set_tablesample_rel_pathlist(root, rel, rte); } else @@ -480,11 +481,40 @@ set_plain_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) /* * set_tablesample_rel_size - * Set size estimates for a sampled relation. + * Set size estimates for a sampled relation */ static void set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) { + TableSampleClause *tsc = rte->tablesample; + TsmRoutine *tsm; + BlockNumber pages; + double tuples; + + /* + * Test any partial indexes of rel for applicability. We must do this + * first since partial unique indexes can affect size estimates. + */ + check_partial_indexes(root, rel); + + /* + * Call the sampling method's estimation function to estimate the number + * of pages it will read and the number of tuples it will return. (Note: + * we assume the function returns sane values.) + */ + tsm = GetTsmRoutine(tsc->tsmhandler); + tsm->SampleScanGetSampleSize(root, rel, tsc->args, + &pages, &tuples); + + /* + * For the moment, because we will only consider a SampleScan path for the + * rel, it's okay to just overwrite the pages and tuples estimates for the + * whole relation. If we ever consider multiple path types for sampled + * rels, we'll need more complication. + */ + rel->pages = pages; + rel->tuples = tuples; + /* Mark rel with estimated output rows, width, etc */ set_baserel_size_estimates(root, rel); } @@ -492,8 +522,6 @@ set_tablesample_rel_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) /* * set_tablesample_rel_pathlist * Build access paths for a sampled relation - * - * There is only one possible path - sampling scan */ static void set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) @@ -502,15 +530,41 @@ set_tablesample_rel_pathlist(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry * Path *path; /* - * We don't support pushing join clauses into the quals of a seqscan, but - * it could still have required parameterization due to LATERAL refs in - * its tlist. + * We don't support pushing join clauses into the quals of a samplescan, + * but it could still have required parameterization due to LATERAL refs + * in its tlist or TABLESAMPLE arguments. */ required_outer = rel->lateral_relids; - /* We only do sample scan if it was requested */ + /* Consider sampled scan */ path = create_samplescan_path(root, rel, required_outer); - rel->pathlist = list_make1(path); + + /* + * If the sampling method does not support repeatable scans, we must avoid + * plans that would scan the rel multiple times. Ideally, we'd simply + * avoid putting the rel on the inside of a nestloop join; but adding such + * a consideration to the planner seems like a great deal of complication + * to support an uncommon usage of second-rate sampling methods. Instead, + * if there is a risk that the query might perform an unsafe join, just + * wrap the SampleScan in a Materialize node. We can check for joins by + * counting the membership of all_baserels (note that this correctly + * counts inheritance trees as single rels). If we're inside a subquery, + * we can't easily check whether a join might occur in the outer query, so + * just assume one is possible. + * + * GetTsmRoutine is relatively expensive compared to the other tests here, + * so check repeatable_across_scans last, even though that's a bit odd. + */ + if ((root->query_level > 1 || + bms_membership(root->all_baserels) != BMS_SINGLETON) && + !(GetTsmRoutine(rte->tablesample->tsmhandler)->repeatable_across_scans)) + { + path = (Path *) create_material_path(rel, path); + } + + add_path(rel, path); + + /* For the moment, at least, there are no other paths to consider */ } /* @@ -2450,7 +2504,33 @@ print_path(PlannerInfo *root, Path *path, int indent) switch (nodeTag(path)) { case T_Path: - ptype = "SeqScan"; + switch (path->pathtype) + { + case T_SeqScan: + ptype = "SeqScan"; + break; + case T_SampleScan: + ptype = "SampleScan"; + break; + case T_SubqueryScan: + ptype = "SubqueryScan"; + break; + case T_FunctionScan: + ptype = "FunctionScan"; + break; + case T_ValuesScan: + ptype = "ValuesScan"; + break; + case T_CteScan: + ptype = "CteScan"; + break; + case T_WorkTableScan: + ptype = "WorkTableScan"; + break; + default: + ptype = "???Path"; + break; + } break; case T_IndexPath: ptype = "IdxScan"; diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 0d302f66bee4c..7069f6041102e 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -74,6 +74,7 @@ #include #include "access/htup_details.h" +#include "access/tsmapi.h" #include "executor/executor.h" #include "executor/nodeHash.h" #include "miscadmin.h" @@ -223,64 +224,66 @@ cost_seqscan(Path *path, PlannerInfo *root, * cost_samplescan * Determines and returns the cost of scanning a relation using sampling. * - * From planner/optimizer perspective, we don't care all that much about cost - * itself since there is always only one scan path to consider when sampling - * scan is present, but number of rows estimation is still important. - * * 'baserel' is the relation to be scanned * 'param_info' is the ParamPathInfo if this is a parameterized path, else NULL */ void -cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel) +cost_samplescan(Path *path, PlannerInfo *root, + RelOptInfo *baserel, ParamPathInfo *param_info) { Cost startup_cost = 0; Cost run_cost = 0; + RangeTblEntry *rte; + TableSampleClause *tsc; + TsmRoutine *tsm; double spc_seq_page_cost, spc_random_page_cost, spc_page_cost; QualCost qpqual_cost; Cost cpu_per_tuple; - BlockNumber pages; - double tuples; - RangeTblEntry *rte = planner_rt_fetch(baserel->relid, root); - TableSampleClause *tablesample = rte->tablesample; - /* Should only be applied to base relations */ + /* Should only be applied to base relations with tablesample clauses */ Assert(baserel->relid > 0); - Assert(baserel->rtekind == RTE_RELATION); + rte = planner_rt_fetch(baserel->relid, root); + Assert(rte->rtekind == RTE_RELATION); + tsc = rte->tablesample; + Assert(tsc != NULL); + tsm = GetTsmRoutine(tsc->tsmhandler); /* Mark the path with the correct row estimate */ - if (path->param_info) - path->rows = path->param_info->ppi_rows; + if (param_info) + path->rows = param_info->ppi_rows; else path->rows = baserel->rows; - /* Call the sampling method's costing function. */ - OidFunctionCall6(tablesample->tsmcost, PointerGetDatum(root), - PointerGetDatum(path), PointerGetDatum(baserel), - PointerGetDatum(tablesample->args), - PointerGetDatum(&pages), PointerGetDatum(&tuples)); - /* fetch estimated page cost for tablespace containing table */ get_tablespace_page_costs(baserel->reltablespace, &spc_random_page_cost, &spc_seq_page_cost); - - spc_page_cost = tablesample->tsmseqscan ? spc_seq_page_cost : - spc_random_page_cost; + /* if NextSampleBlock is used, assume random access, else sequential */ + spc_page_cost = (tsm->NextSampleBlock != NULL) ? + spc_random_page_cost : spc_seq_page_cost; /* - * disk costs + * disk costs (recall that baserel->pages has already been set to the + * number of pages the sampling method will visit) */ - run_cost += spc_page_cost * pages; + run_cost += spc_page_cost * baserel->pages; - /* CPU costs */ - get_restriction_qual_cost(root, baserel, path->param_info, &qpqual_cost); + /* + * CPU costs (recall that baserel->tuples has already been set to the + * number of tuples the sampling method will select). Note that we ignore + * execution cost of the TABLESAMPLE parameter expressions; they will be + * evaluated only once per scan, and in most usages they'll likely be + * simple constants anyway. We also don't charge anything for the + * calculations the sampling method might do internally. + */ + get_restriction_qual_cost(root, baserel, param_info, &qpqual_cost); startup_cost += qpqual_cost.startup; cpu_per_tuple = cpu_tuple_cost + qpqual_cost.per_tuple; - run_cost += cpu_per_tuple * tuples; + run_cost += cpu_per_tuple * baserel->tuples; path->startup_cost = startup_cost; path->total_cost = startup_cost + run_cost; diff --git a/src/backend/optimizer/plan/createplan.c b/src/backend/optimizer/plan/createplan.c index 8d15c8ede90f9..f461586e08c5b 100644 --- a/src/backend/optimizer/plan/createplan.c +++ b/src/backend/optimizer/plan/createplan.c @@ -102,7 +102,8 @@ static List *order_qual_clauses(PlannerInfo *root, List *clauses); static void copy_path_costsize(Plan *dest, Path *src); static void copy_plan_costsize(Plan *dest, Plan *src); static SeqScan *make_seqscan(List *qptlist, List *qpqual, Index scanrelid); -static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid); +static SampleScan *make_samplescan(List *qptlist, List *qpqual, Index scanrelid, + TableSampleClause *tsc); static IndexScan *make_indexscan(List *qptlist, List *qpqual, Index scanrelid, Oid indexid, List *indexqual, List *indexqualorig, List *indexorderby, List *indexorderbyorig, @@ -1148,7 +1149,7 @@ create_seqscan_plan(PlannerInfo *root, Path *best_path, /* * create_samplescan_plan - * Returns a samplecan plan for the base relation scanned by 'best_path' + * Returns a samplescan plan for the base relation scanned by 'best_path' * with restriction clauses 'scan_clauses' and targetlist 'tlist'. */ static SampleScan * @@ -1157,11 +1158,15 @@ create_samplescan_plan(PlannerInfo *root, Path *best_path, { SampleScan *scan_plan; Index scan_relid = best_path->parent->relid; + RangeTblEntry *rte; + TableSampleClause *tsc; - /* it should be a base rel with tablesample clause... */ + /* it should be a base rel with a tablesample clause... */ Assert(scan_relid > 0); - Assert(best_path->parent->rtekind == RTE_RELATION); - Assert(best_path->pathtype == T_SampleScan); + rte = planner_rt_fetch(scan_relid, root); + Assert(rte->rtekind == RTE_RELATION); + tsc = rte->tablesample; + Assert(tsc != NULL); /* Sort clauses into best execution order */ scan_clauses = order_qual_clauses(root, scan_clauses); @@ -1174,13 +1179,16 @@ create_samplescan_plan(PlannerInfo *root, Path *best_path, { scan_clauses = (List *) replace_nestloop_params(root, (Node *) scan_clauses); + tsc = (TableSampleClause *) + replace_nestloop_params(root, (Node *) tsc); } scan_plan = make_samplescan(tlist, scan_clauses, - scan_relid); + scan_relid, + tsc); - copy_path_costsize(&scan_plan->plan, best_path); + copy_path_costsize(&scan_plan->scan.plan, best_path); return scan_plan; } @@ -2161,9 +2169,9 @@ create_customscan_plan(PlannerInfo *root, CustomPath *best_path, ListCell *lc; /* Recursively transform child paths. */ - foreach (lc, best_path->custom_paths) + foreach(lc, best_path->custom_paths) { - Plan *plan = create_plan_recurse(root, (Path *) lfirst(lc)); + Plan *plan = create_plan_recurse(root, (Path *) lfirst(lc)); custom_plans = lappend(custom_plans, plan); } @@ -3437,17 +3445,19 @@ make_seqscan(List *qptlist, static SampleScan * make_samplescan(List *qptlist, List *qpqual, - Index scanrelid) + Index scanrelid, + TableSampleClause *tsc) { SampleScan *node = makeNode(SampleScan); - Plan *plan = &node->plan; + Plan *plan = &node->scan.plan; /* cost should be inserted by caller */ plan->targetlist = qptlist; plan->qual = qpqual; plan->lefttree = NULL; plan->righttree = NULL; - node->scanrelid = scanrelid; + node->scan.scanrelid = scanrelid; + node->tablesample = tsc; return node; } diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index 00b2625d342ee..701b99254db0d 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -306,7 +306,9 @@ extract_lateral_references(PlannerInfo *root, RelOptInfo *brel, Index rtindex) return; /* Fetch the appropriate variables */ - if (rte->rtekind == RTE_SUBQUERY) + if (rte->rtekind == RTE_RELATION) + vars = pull_vars_of_level((Node *) rte->tablesample, 0); + else if (rte->rtekind == RTE_SUBQUERY) vars = pull_vars_of_level((Node *) rte->subquery, 1); else if (rte->rtekind == RTE_FUNCTION) vars = pull_vars_of_level((Node *) rte->functions, 0); diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index a6ce96efc4862..b95cc95e5d9a2 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -505,14 +505,10 @@ subquery_planner(PlannerGlobal *glob, Query *parse, if (rte->rtekind == RTE_RELATION) { if (rte->tablesample) - { - rte->tablesample->args = (List *) - preprocess_expression(root, (Node *) rte->tablesample->args, - EXPRKIND_TABLESAMPLE); - rte->tablesample->repeatable = (Node *) - preprocess_expression(root, rte->tablesample->repeatable, + rte->tablesample = (TableSampleClause *) + preprocess_expression(root, + (Node *) rte->tablesample, EXPRKIND_TABLESAMPLE); - } } else if (rte->rtekind == RTE_SUBQUERY) { @@ -697,11 +693,14 @@ preprocess_expression(PlannerInfo *root, Node *expr, int kind) * If the query has any join RTEs, replace join alias variables with * base-relation variables. We must do this before sublink processing, * else sublinks expanded out from join aliases would not get processed. - * We can skip it in non-lateral RTE functions and VALUES lists, however, - * since they can't contain any Vars of the current query level. + * We can skip it in non-lateral RTE functions, VALUES lists, and + * TABLESAMPLE clauses, however, since they can't contain any Vars of the + * current query level. */ if (root->hasJoinRTEs && - !(kind == EXPRKIND_RTFUNC || kind == EXPRKIND_VALUES)) + !(kind == EXPRKIND_RTFUNC || + kind == EXPRKIND_VALUES || + kind == EXPRKIND_TABLESAMPLE)) expr = flatten_join_alias_vars(root, expr); /* diff --git a/src/backend/optimizer/plan/setrefs.c b/src/backend/optimizer/plan/setrefs.c index 258e541754aa1..ea185d4b4cff6 100644 --- a/src/backend/optimizer/plan/setrefs.c +++ b/src/backend/optimizer/plan/setrefs.c @@ -372,9 +372,8 @@ flatten_rtes_walker(Node *node, PlannerGlobal *glob) * * In the flat rangetable, we zero out substructure pointers that are not * needed by the executor; this reduces the storage space and copying cost - * for cached plans. We keep only the tablesample field (which we'd otherwise - * have to put in the plan tree, anyway); the ctename, alias and eref Alias - * fields, which are needed by EXPLAIN; and the selectedCols, insertedCols and + * for cached plans. We keep only the ctename, alias and eref Alias fields, + * which are needed by EXPLAIN, and the selectedCols, insertedCols and * updatedCols bitmaps, which are needed for executor-startup permissions * checking and for trigger event checking. */ @@ -388,6 +387,7 @@ add_rte_to_flat_rtable(PlannerGlobal *glob, RangeTblEntry *rte) memcpy(newrte, rte, sizeof(RangeTblEntry)); /* zap unneeded sub-structure */ + newrte->tablesample = NULL; newrte->subquery = NULL; newrte->joinaliasvars = NIL; newrte->functions = NIL; @@ -456,11 +456,13 @@ set_plan_refs(PlannerInfo *root, Plan *plan, int rtoffset) { SampleScan *splan = (SampleScan *) plan; - splan->scanrelid += rtoffset; - splan->plan.targetlist = - fix_scan_list(root, splan->plan.targetlist, rtoffset); - splan->plan.qual = - fix_scan_list(root, splan->plan.qual, rtoffset); + splan->scan.scanrelid += rtoffset; + splan->scan.plan.targetlist = + fix_scan_list(root, splan->scan.plan.targetlist, rtoffset); + splan->scan.plan.qual = + fix_scan_list(root, splan->scan.plan.qual, rtoffset); + splan->tablesample = (TableSampleClause *) + fix_scan_expr(root, (Node *) splan->tablesample, rtoffset); } break; case T_IndexScan: diff --git a/src/backend/optimizer/plan/subselect.c b/src/backend/optimizer/plan/subselect.c index 4708b87f330b6..f3038cdffda3a 100644 --- a/src/backend/optimizer/plan/subselect.c +++ b/src/backend/optimizer/plan/subselect.c @@ -2216,7 +2216,12 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, break; case T_SeqScan: + context.paramids = bms_add_members(context.paramids, scan_params); + break; + case T_SampleScan: + finalize_primnode((Node *) ((SampleScan *) plan)->tablesample, + &context); context.paramids = bms_add_members(context.paramids, scan_params); break; @@ -2384,7 +2389,7 @@ finalize_plan(PlannerInfo *root, Plan *plan, Bitmapset *valid_params, bms_add_members(context.paramids, scan_params); /* child nodes if any */ - foreach (lc, cscan->custom_plans) + foreach(lc, cscan->custom_plans) { context.paramids = bms_add_members(context.paramids, diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index 92b0562843458..34144ccaf0fa6 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -1091,12 +1091,15 @@ pull_up_simple_subquery(PlannerInfo *root, Node *jtnode, RangeTblEntry *rte, switch (child_rte->rtekind) { + case RTE_RELATION: + if (child_rte->tablesample) + child_rte->lateral = true; + break; case RTE_SUBQUERY: case RTE_FUNCTION: case RTE_VALUES: child_rte->lateral = true; break; - case RTE_RELATION: case RTE_JOIN: case RTE_CTE: /* these can't contain any lateral references */ @@ -1909,6 +1912,13 @@ replace_vars_in_jointree(Node *jtnode, { switch (rte->rtekind) { + case RTE_RELATION: + /* shouldn't be marked LATERAL unless tablesample */ + Assert(rte->tablesample); + rte->tablesample = (TableSampleClause *) + pullup_replace_vars((Node *) rte->tablesample, + context); + break; case RTE_SUBQUERY: rte->subquery = pullup_replace_vars_subquery(rte->subquery, @@ -1924,7 +1934,6 @@ replace_vars_in_jointree(Node *jtnode, pullup_replace_vars((Node *) rte->values_lists, context); break; - case RTE_RELATION: case RTE_JOIN: case RTE_CTE: /* these shouldn't be marked LATERAL */ diff --git a/src/backend/optimizer/util/pathnode.c b/src/backend/optimizer/util/pathnode.c index f7f33bbe7721b..935bc2b9667d3 100644 --- a/src/backend/optimizer/util/pathnode.c +++ b/src/backend/optimizer/util/pathnode.c @@ -713,7 +713,7 @@ create_seqscan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer) /* * create_samplescan_path - * Like seqscan but uses sampling function while scanning. + * Creates a path node for a sampled table scan. */ Path * create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer) @@ -726,7 +726,7 @@ create_samplescan_path(PlannerInfo *root, RelOptInfo *rel, Relids required_outer required_outer); pathnode->pathkeys = NIL; /* samplescan has unordered result */ - cost_samplescan(pathnode, root, rel); + cost_samplescan(pathnode, root, rel, pathnode->param_info); return pathnode; } @@ -1773,6 +1773,8 @@ reparameterize_path(PlannerInfo *root, Path *path, { case T_SeqScan: return create_seqscan_path(root, rel, required_outer); + case T_SampleScan: + return (Path *) create_samplescan_path(root, rel, required_outer); case T_IndexScan: case T_IndexOnlyScan: { @@ -1805,8 +1807,6 @@ reparameterize_path(PlannerInfo *root, Path *path, case T_SubqueryScan: return create_subqueryscan_path(root, rel, path->pathkeys, required_outer); - case T_SampleScan: - return (Path *) create_samplescan_path(root, rel, required_outer); default: break; } diff --git a/src/backend/parser/gram.y b/src/backend/parser/gram.y index e0ff6f16a2181..4e02f7d73f24b 100644 --- a/src/backend/parser/gram.y +++ b/src/backend/parser/gram.y @@ -457,8 +457,8 @@ static Node *makeRecursiveViewSelect(char *relname, List *aliases, Node *query); %type joined_table %type relation_expr %type relation_expr_opt_alias +%type tablesample_clause opt_repeatable_clause %type target_el single_set_clause set_target insert_column_item -%type relation_expr_tablesample tablesample_clause opt_repeatable_clause %type generic_option_name %type generic_option_arg @@ -10463,9 +10463,13 @@ table_ref: relation_expr opt_alias_clause $1->alias = $2; $$ = (Node *) $1; } - | relation_expr_tablesample + | relation_expr opt_alias_clause tablesample_clause { - $$ = (Node *) $1; + RangeTableSample *n = (RangeTableSample *) $3; + $1->alias = $2; + /* relation_expr goes inside the RangeTableSample node */ + n->relation = (Node *) $1; + $$ = (Node *) n; } | func_table func_alias_clause { @@ -10792,23 +10796,18 @@ relation_expr_opt_alias: relation_expr %prec UMINUS } ; - -relation_expr_tablesample: relation_expr opt_alias_clause tablesample_clause - { - RangeTableSample *n = (RangeTableSample *) $3; - n->relation = $1; - n->relation->alias = $2; - $$ = (Node *) n; - } - ; - +/* + * TABLESAMPLE decoration in a FROM item + */ tablesample_clause: - TABLESAMPLE ColId '(' expr_list ')' opt_repeatable_clause + TABLESAMPLE func_name '(' expr_list ')' opt_repeatable_clause { RangeTableSample *n = makeNode(RangeTableSample); + /* n->relation will be filled in later */ n->method = $2; n->args = $4; n->repeatable = $6; + n->location = @2; $$ = (Node *) n; } ; diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index e90e1d68e3a53..4e490b23b4e27 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -18,8 +18,8 @@ #include "miscadmin.h" #include "access/heapam.h" +#include "access/tsmapi.h" #include "catalog/catalog.h" -#include "access/htup_details.h" #include "catalog/heap.h" #include "catalog/pg_constraint.h" #include "catalog/pg_type.h" @@ -43,7 +43,7 @@ #include "utils/guc.h" #include "utils/lsyscache.h" #include "utils/rel.h" -#include "utils/syscache.h" + /* Convenience macro for the most common makeNamespaceItem() case */ #define makeDefaultNSItem(rte) makeNamespaceItem(rte, true, true, false, true) @@ -63,6 +63,8 @@ static RangeTblEntry *transformRangeSubselect(ParseState *pstate, RangeSubselect *r); static RangeTblEntry *transformRangeFunction(ParseState *pstate, RangeFunction *r); +static TableSampleClause *transformRangeTableSample(ParseState *pstate, + RangeTableSample *rts); static Node *transformFromClauseItem(ParseState *pstate, Node *n, RangeTblEntry **top_rte, int *top_rti, List **namespace); @@ -423,40 +425,6 @@ transformJoinOnClause(ParseState *pstate, JoinExpr *j, List *namespace) return result; } -static RangeTblEntry * -transformTableSampleEntry(ParseState *pstate, RangeTableSample *rv) -{ - RangeTblEntry *rte = NULL; - CommonTableExpr *cte = NULL; - TableSampleClause *tablesample = NULL; - - /* if relation has an unqualified name, it might be a CTE reference */ - if (!rv->relation->schemaname) - { - Index levelsup; - - cte = scanNameSpaceForCTE(pstate, rv->relation->relname, &levelsup); - } - - /* We first need to build a range table entry */ - if (!cte) - rte = transformTableEntry(pstate, rv->relation); - - if (!rte || - (rte->relkind != RELKIND_RELATION && - rte->relkind != RELKIND_MATVIEW)) - ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), - errmsg("TABLESAMPLE clause can only be used on tables and materialized views"), - parser_errposition(pstate, rv->relation->location))); - - tablesample = ParseTableSample(pstate, rv->method, rv->repeatable, - rv->args, rv->relation->location); - rte->tablesample = tablesample; - - return rte; -} - /* * transformTableEntry --- transform a RangeVar (simple relation reference) */ @@ -748,6 +716,109 @@ transformRangeFunction(ParseState *pstate, RangeFunction *r) return rte; } +/* + * transformRangeTableSample --- transform a TABLESAMPLE clause + * + * Caller has already transformed rts->relation, we just have to validate + * the remaining fields and create a TableSampleClause node. + */ +static TableSampleClause * +transformRangeTableSample(ParseState *pstate, RangeTableSample *rts) +{ + TableSampleClause *tablesample; + Oid handlerOid; + Oid funcargtypes[1]; + TsmRoutine *tsm; + List *fargs; + ListCell *larg, + *ltyp; + + /* + * To validate the sample method name, look up the handler function, which + * has the same name, one dummy INTERNAL argument, and a result type of + * tsm_handler. (Note: tablesample method names are not schema-qualified + * in the SQL standard; but since they are just functions to us, we allow + * schema qualification to resolve any potential ambiguity.) + */ + funcargtypes[0] = INTERNALOID; + + handlerOid = LookupFuncName(rts->method, 1, funcargtypes, true); + + /* we want error to complain about no-such-method, not no-such-function */ + if (!OidIsValid(handlerOid)) + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("tablesample method %s does not exist", + NameListToString(rts->method)), + parser_errposition(pstate, rts->location))); + + /* check that handler has correct return type */ + if (get_func_rettype(handlerOid) != TSM_HANDLEROID) + ereport(ERROR, + (errcode(ERRCODE_WRONG_OBJECT_TYPE), + errmsg("function %s must return type \"tsm_handler\"", + NameListToString(rts->method)), + parser_errposition(pstate, rts->location))); + + /* OK, run the handler to get TsmRoutine, for argument type info */ + tsm = GetTsmRoutine(handlerOid); + + tablesample = makeNode(TableSampleClause); + tablesample->tsmhandler = handlerOid; + + /* check user provided the expected number of arguments */ + if (list_length(rts->args) != list_length(tsm->parameterTypes)) + ereport(ERROR, + (errcode(ERRCODE_INVALID_TABLESAMPLE_ARGUMENT), + errmsg_plural("tablesample method %s requires %d argument, not %d", + "tablesample method %s requires %d arguments, not %d", + list_length(tsm->parameterTypes), + NameListToString(rts->method), + list_length(tsm->parameterTypes), + list_length(rts->args)), + parser_errposition(pstate, rts->location))); + + /* + * Transform the arguments, typecasting them as needed. Note we must also + * assign collations now, because assign_query_collations() doesn't + * examine any substructure of RTEs. + */ + fargs = NIL; + forboth(larg, rts->args, ltyp, tsm->parameterTypes) + { + Node *arg = (Node *) lfirst(larg); + Oid argtype = lfirst_oid(ltyp); + + arg = transformExpr(pstate, arg, EXPR_KIND_FROM_FUNCTION); + arg = coerce_to_specific_type(pstate, arg, argtype, "TABLESAMPLE"); + assign_expr_collations(pstate, arg); + fargs = lappend(fargs, arg); + } + tablesample->args = fargs; + + /* Process REPEATABLE (seed) */ + if (rts->repeatable != NULL) + { + Node *arg; + + if (!tsm->repeatable_across_queries) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("tablesample method %s does not support REPEATABLE", + NameListToString(rts->method)), + parser_errposition(pstate, rts->location))); + + arg = transformExpr(pstate, rts->repeatable, EXPR_KIND_FROM_FUNCTION); + arg = coerce_to_specific_type(pstate, arg, FLOAT8OID, "REPEATABLE"); + assign_expr_collations(pstate, arg); + tablesample->repeatable = (Expr *) arg; + } + else + tablesample->repeatable = NULL; + + return tablesample; +} + /* * transformFromClauseItem - @@ -844,6 +915,33 @@ transformFromClauseItem(ParseState *pstate, Node *n, rtr->rtindex = rtindex; return (Node *) rtr; } + else if (IsA(n, RangeTableSample)) + { + /* TABLESAMPLE clause (wrapping some other valid FROM node) */ + RangeTableSample *rts = (RangeTableSample *) n; + Node *rel; + RangeTblRef *rtr; + RangeTblEntry *rte; + + /* Recursively transform the contained relation */ + rel = transformFromClauseItem(pstate, rts->relation, + top_rte, top_rti, namespace); + /* Currently, grammar could only return a RangeVar as contained rel */ + Assert(IsA(rel, RangeTblRef)); + rtr = (RangeTblRef *) rel; + rte = rt_fetch(rtr->rtindex, pstate->p_rtable); + /* We only support this on plain relations and matviews */ + if (rte->relkind != RELKIND_RELATION && + rte->relkind != RELKIND_MATVIEW) + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("TABLESAMPLE clause can only be applied to tables and materialized views"), + parser_errposition(pstate, exprLocation(rts->relation)))); + + /* Transform TABLESAMPLE details and attach to the RTE */ + rte->tablesample = transformRangeTableSample(pstate, rts); + return (Node *) rtr; + } else if (IsA(n, JoinExpr)) { /* A newfangled join expression */ @@ -1165,26 +1263,6 @@ transformFromClauseItem(ParseState *pstate, Node *n, return (Node *) j; } - else if (IsA(n, RangeTableSample)) - { - /* Tablesample reference */ - RangeTableSample *rv = (RangeTableSample *) n; - RangeTblRef *rtr; - RangeTblEntry *rte = NULL; - int rtindex; - - rte = transformTableSampleEntry(pstate, rv); - - /* assume new rte is at end */ - rtindex = list_length(pstate->p_rtable); - Assert(rte == rt_fetch(rtindex, pstate->p_rtable)); - *top_rte = rte; - *top_rti = rtindex; - *namespace = list_make1(makeDefaultNSItem(rte)); - rtr = makeNode(RangeTblRef); - rtr->rtindex = rtindex; - return (Node *) rtr; - } else elog(ERROR, "unrecognized node type: %d", (int) nodeTag(n)); return NULL; /* can't get here, keep compiler quiet */ diff --git a/src/backend/parser/parse_func.c b/src/backend/parser/parse_func.c index 430baff116527..554ca9d8c47e5 100644 --- a/src/backend/parser/parse_func.c +++ b/src/backend/parser/parse_func.c @@ -18,7 +18,6 @@ #include "catalog/pg_aggregate.h" #include "catalog/pg_proc.h" #include "catalog/pg_type.h" -#include "catalog/pg_tablesample_method.h" #include "funcapi.h" #include "lib/stringinfo.h" #include "nodes/makefuncs.h" @@ -27,7 +26,6 @@ #include "parser/parse_clause.h" #include "parser/parse_coerce.h" #include "parser/parse_func.h" -#include "parser/parse_expr.h" #include "parser/parse_relation.h" #include "parser/parse_target.h" #include "parser/parse_type.h" @@ -769,148 +767,6 @@ ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, } -/* - * ParseTableSample - * - * Parse TABLESAMPLE clause and process the arguments - */ -TableSampleClause * -ParseTableSample(ParseState *pstate, char *samplemethod, Node *repeatable, - List *sampleargs, int location) -{ - HeapTuple tuple; - Form_pg_tablesample_method tsm; - Form_pg_proc procform; - TableSampleClause *tablesample; - List *fargs; - ListCell *larg; - int nargs, - initnargs; - Oid init_arg_types[FUNC_MAX_ARGS]; - - /* Load the tablesample method */ - tuple = SearchSysCache1(TABLESAMPLEMETHODNAME, PointerGetDatum(samplemethod)); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("tablesample method \"%s\" does not exist", - samplemethod), - parser_errposition(pstate, location))); - - tablesample = makeNode(TableSampleClause); - tablesample->tsmid = HeapTupleGetOid(tuple); - - tsm = (Form_pg_tablesample_method) GETSTRUCT(tuple); - - tablesample->tsmseqscan = tsm->tsmseqscan; - tablesample->tsmpagemode = tsm->tsmpagemode; - tablesample->tsminit = tsm->tsminit; - tablesample->tsmnextblock = tsm->tsmnextblock; - tablesample->tsmnexttuple = tsm->tsmnexttuple; - tablesample->tsmexaminetuple = tsm->tsmexaminetuple; - tablesample->tsmend = tsm->tsmend; - tablesample->tsmreset = tsm->tsmreset; - tablesample->tsmcost = tsm->tsmcost; - - ReleaseSysCache(tuple); - - /* Validate the parameters against init function definition. */ - tuple = SearchSysCache1(PROCOID, - ObjectIdGetDatum(tablesample->tsminit)); - - if (!HeapTupleIsValid(tuple)) /* should not happen */ - elog(ERROR, "cache lookup failed for function %u", - tablesample->tsminit); - - procform = (Form_pg_proc) GETSTRUCT(tuple); - initnargs = procform->pronargs; - Assert(initnargs >= 3); - - /* - * First parameter is used to pass the SampleScanState, second is seed - * (REPEATABLE), skip the processing for them here, just assert that the - * types are correct. - */ - Assert(procform->proargtypes.values[0] == INTERNALOID); - Assert(procform->proargtypes.values[1] == INT4OID); - initnargs -= 2; - memcpy(init_arg_types, procform->proargtypes.values + 2, - initnargs * sizeof(Oid)); - - /* Now we are done with the catalog */ - ReleaseSysCache(tuple); - - /* Process repeatable (seed) */ - if (repeatable != NULL) - { - Node *arg = repeatable; - - if (arg && IsA(arg, A_Const)) - { - A_Const *con = (A_Const *) arg; - - if (con->val.type == T_Null) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("REPEATABLE clause must be NOT NULL numeric value"), - parser_errposition(pstate, con->location))); - - } - - arg = transformExpr(pstate, arg, EXPR_KIND_FROM_FUNCTION); - arg = coerce_to_specific_type(pstate, arg, INT4OID, "REPEATABLE"); - tablesample->repeatable = arg; - } - else - tablesample->repeatable = NULL; - - /* Check user provided expected number of arguments. */ - if (list_length(sampleargs) != initnargs) - ereport(ERROR, - (errcode(ERRCODE_TOO_MANY_ARGUMENTS), - errmsg_plural("tablesample method \"%s\" expects %d argument got %d", - "tablesample method \"%s\" expects %d arguments got %d", - initnargs, - samplemethod, - initnargs, list_length(sampleargs)), - parser_errposition(pstate, location))); - - /* Transform the arguments, typecasting them as needed. */ - fargs = NIL; - nargs = 0; - foreach(larg, sampleargs) - { - Node *inarg = (Node *) lfirst(larg); - Node *arg = transformExpr(pstate, inarg, EXPR_KIND_FROM_FUNCTION); - Oid argtype = exprType(arg); - - if (argtype != init_arg_types[nargs]) - { - if (!can_coerce_type(1, &argtype, &init_arg_types[nargs], - COERCION_IMPLICIT)) - ereport(ERROR, - (errcode(ERRCODE_INVALID_PARAMETER_VALUE), - errmsg("wrong parameter %d for tablesample method \"%s\"", - nargs + 1, samplemethod), - errdetail("Expected type %s got %s.", - format_type_be(init_arg_types[nargs]), - format_type_be(argtype)), - parser_errposition(pstate, exprLocation(inarg)))); - - arg = coerce_type(pstate, arg, argtype, init_arg_types[nargs], -1, - COERCION_IMPLICIT, COERCE_IMPLICIT_CAST, -1); - } - - fargs = lappend(fargs, arg); - nargs++; - } - - /* Pass the arguments down */ - tablesample->args = fargs; - - return tablesample; -} - /* func_match_argtypes() * * Given a list of candidate functions (having the right name and number diff --git a/src/backend/rewrite/rewriteHandler.c b/src/backend/rewrite/rewriteHandler.c index bbd6b77c5eab6..1734e48241ada 100644 --- a/src/backend/rewrite/rewriteHandler.c +++ b/src/backend/rewrite/rewriteHandler.c @@ -418,6 +418,10 @@ rewriteRuleAction(Query *parsetree, switch (rte->rtekind) { + case RTE_RELATION: + sub_action->hasSubLinks = + checkExprHasSubLink((Node *) rte->tablesample); + break; case RTE_FUNCTION: sub_action->hasSubLinks = checkExprHasSubLink((Node *) rte->functions); diff --git a/src/backend/utils/adt/pseudotypes.c b/src/backend/utils/adt/pseudotypes.c index 9ad460abfbdbc..5b809aa7d4996 100644 --- a/src/backend/utils/adt/pseudotypes.c +++ b/src/backend/utils/adt/pseudotypes.c @@ -373,6 +373,33 @@ fdw_handler_out(PG_FUNCTION_ARGS) } +/* + * tsm_handler_in - input routine for pseudo-type TSM_HANDLER. + */ +Datum +tsm_handler_in(PG_FUNCTION_ARGS) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot accept a value of type tsm_handler"))); + + PG_RETURN_VOID(); /* keep compiler quiet */ +} + +/* + * tsm_handler_out - output routine for pseudo-type TSM_HANDLER. + */ +Datum +tsm_handler_out(PG_FUNCTION_ARGS) +{ + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot display a value of type tsm_handler"))); + + PG_RETURN_VOID(); /* keep compiler quiet */ +} + + /* * internal_in - input routine for pseudo-type INTERNAL. */ diff --git a/src/backend/utils/adt/ruleutils.c b/src/backend/utils/adt/ruleutils.c index 5112cac901735..51391f6a4e0d1 100644 --- a/src/backend/utils/adt/ruleutils.c +++ b/src/backend/utils/adt/ruleutils.c @@ -32,7 +32,6 @@ #include "catalog/pg_opclass.h" #include "catalog/pg_operator.h" #include "catalog/pg_proc.h" -#include "catalog/pg_tablesample_method.h" #include "catalog/pg_trigger.h" #include "catalog/pg_type.h" #include "commands/defrem.h" @@ -349,8 +348,6 @@ static void make_ruledef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, int prettyFlags); static void make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, int prettyFlags, int wrapColumn); -static void get_tablesample_def(TableSampleClause *tablesample, - deparse_context *context); static void get_query_def(Query *query, StringInfo buf, List *parentnamespace, TupleDesc resultDesc, int prettyFlags, int wrapColumn, int startIndent); @@ -416,6 +413,8 @@ static void get_column_alias_list(deparse_columns *colinfo, static void get_from_clause_coldeflist(RangeTblFunction *rtfunc, deparse_columns *colinfo, deparse_context *context); +static void get_tablesample_def(TableSampleClause *tablesample, + deparse_context *context); static void get_opclass_name(Oid opclass, Oid actual_datatype, StringInfo buf); static Node *processIndirection(Node *node, deparse_context *context, @@ -4235,50 +4234,6 @@ make_viewdef(StringInfo buf, HeapTuple ruletup, TupleDesc rulettc, heap_close(ev_relation, AccessShareLock); } -/* ---------- - * get_tablesample_def - Convert TableSampleClause back to SQL - * ---------- - */ -static void -get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) -{ - StringInfo buf = context->buf; - HeapTuple tuple; - Form_pg_tablesample_method tsm; - char *tsmname; - int nargs; - ListCell *l; - - /* Load the tablesample method */ - tuple = SearchSysCache1(TABLESAMPLEMETHODOID, ObjectIdGetDatum(tablesample->tsmid)); - if (!HeapTupleIsValid(tuple)) - ereport(ERROR, - (errcode(ERRCODE_UNDEFINED_OBJECT), - errmsg("cache lookup failed for tablesample method %u", - tablesample->tsmid))); - - tsm = (Form_pg_tablesample_method) GETSTRUCT(tuple); - tsmname = NameStr(tsm->tsmname); - appendStringInfo(buf, " TABLESAMPLE %s (", quote_identifier(tsmname)); - - ReleaseSysCache(tuple); - - nargs = 0; - foreach(l, tablesample->args) - { - if (nargs++ > 0) - appendStringInfoString(buf, ", "); - get_rule_expr((Node *) lfirst(l), context, true); - } - appendStringInfoChar(buf, ')'); - - if (tablesample->repeatable != NULL) - { - appendStringInfoString(buf, " REPEATABLE ("); - get_rule_expr(tablesample->repeatable, context, true); - appendStringInfoChar(buf, ')'); - } -} /* ---------- * get_query_def - Parse back one query parsetree @@ -8781,9 +8736,6 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) only_marker(rte), generate_relation_name(rte->relid, context->namespaces)); - - if (rte->tablesample) - get_tablesample_def(rte->tablesample, context); break; case RTE_SUBQUERY: /* Subquery RTE */ @@ -8963,6 +8915,10 @@ get_from_clause_item(Node *jtnode, Query *query, deparse_context *context) /* Else print column aliases as needed */ get_column_alias_list(colinfo, context); } + + /* Tablesample clause must go after any alias */ + if (rte->rtekind == RTE_RELATION && rte->tablesample) + get_tablesample_def(rte->tablesample, context); } else if (IsA(jtnode, JoinExpr)) { @@ -9162,6 +9118,44 @@ get_from_clause_coldeflist(RangeTblFunction *rtfunc, appendStringInfoChar(buf, ')'); } +/* + * get_tablesample_def - print a TableSampleClause + */ +static void +get_tablesample_def(TableSampleClause *tablesample, deparse_context *context) +{ + StringInfo buf = context->buf; + Oid argtypes[1]; + int nargs; + ListCell *l; + + /* + * We should qualify the handler's function name if it wouldn't be + * resolved by lookup in the current search path. + */ + argtypes[0] = INTERNALOID; + appendStringInfo(buf, " TABLESAMPLE %s (", + generate_function_name(tablesample->tsmhandler, 1, + NIL, argtypes, + false, NULL, EXPR_KIND_NONE)); + + nargs = 0; + foreach(l, tablesample->args) + { + if (nargs++ > 0) + appendStringInfoString(buf, ", "); + get_rule_expr((Node *) lfirst(l), context, false); + } + appendStringInfoChar(buf, ')'); + + if (tablesample->repeatable != NULL) + { + appendStringInfoString(buf, " REPEATABLE ("); + get_rule_expr((Node *) tablesample->repeatable, context, false); + appendStringInfoChar(buf, ')'); + } +} + /* * get_opclass_name - fetch name of an index operator class * diff --git a/src/backend/utils/cache/lsyscache.c b/src/backend/utils/cache/lsyscache.c index 7b32247d34eae..1dc293297d93e 100644 --- a/src/backend/utils/cache/lsyscache.c +++ b/src/backend/utils/cache/lsyscache.c @@ -32,7 +32,6 @@ #include "catalog/pg_range.h" #include "catalog/pg_statistic.h" #include "catalog/pg_transform.h" -#include "catalog/pg_tablesample_method.h" #include "catalog/pg_type.h" #include "miscadmin.h" #include "nodes/makefuncs.h" @@ -2997,29 +2996,3 @@ get_range_subtype(Oid rangeOid) else return InvalidOid; } - -/* ---------- PG_TABLESAMPLE_METHOD CACHE ---------- */ - -/* - * get_tablesample_method_name - given a tablesample method OID, - * look up the name or NULL if not found - */ -char * -get_tablesample_method_name(Oid tsmid) -{ - HeapTuple tuple; - - tuple = SearchSysCache1(TABLESAMPLEMETHODOID, ObjectIdGetDatum(tsmid)); - if (HeapTupleIsValid(tuple)) - { - Form_pg_tablesample_method tup = - (Form_pg_tablesample_method) GETSTRUCT(tuple); - char *result; - - result = pstrdup(NameStr(tup->tsmname)); - ReleaseSysCache(tuple); - return result; - } - else - return NULL; -} diff --git a/src/backend/utils/cache/syscache.c b/src/backend/utils/cache/syscache.c index b6333e362f018..efce7b9a3d13b 100644 --- a/src/backend/utils/cache/syscache.c +++ b/src/backend/utils/cache/syscache.c @@ -56,7 +56,6 @@ #include "catalog/pg_shseclabel.h" #include "catalog/pg_replication_origin.h" #include "catalog/pg_statistic.h" -#include "catalog/pg_tablesample_method.h" #include "catalog/pg_tablespace.h" #include "catalog/pg_transform.h" #include "catalog/pg_ts_config.h" @@ -667,28 +666,6 @@ static const struct cachedesc cacheinfo[] = { }, 128 }, - {TableSampleMethodRelationId, /* TABLESAMPLEMETHODNAME */ - TableSampleMethodNameIndexId, - 1, - { - Anum_pg_tablesample_method_tsmname, - 0, - 0, - 0, - }, - 2 - }, - {TableSampleMethodRelationId, /* TABLESAMPLEMETHODOID */ - TableSampleMethodOidIndexId, - 1, - { - ObjectIdAttributeNumber, - 0, - 0, - 0, - }, - 2 - }, {TableSpaceRelationId, /* TABLESPACEOID */ TablespaceOidIndexId, 1, diff --git a/src/backend/utils/errcodes.txt b/src/backend/utils/errcodes.txt index 6cc3ed96c447b..7b97d45a53a12 100644 --- a/src/backend/utils/errcodes.txt +++ b/src/backend/utils/errcodes.txt @@ -177,6 +177,8 @@ Section: Class 22 - Data Exception 2201B E ERRCODE_INVALID_REGULAR_EXPRESSION invalid_regular_expression 2201W E ERRCODE_INVALID_ROW_COUNT_IN_LIMIT_CLAUSE invalid_row_count_in_limit_clause 2201X E ERRCODE_INVALID_ROW_COUNT_IN_RESULT_OFFSET_CLAUSE invalid_row_count_in_result_offset_clause +2202H E ERRCODE_INVALID_TABLESAMPLE_ARGUMENT invalid_tablesample_argument +2202G E ERRCODE_INVALID_TABLESAMPLE_REPEAT invalid_tablesample_repeat 22009 E ERRCODE_INVALID_TIME_ZONE_DISPLACEMENT_VALUE invalid_time_zone_displacement_value 2200C E ERRCODE_INVALID_USE_OF_ESCAPE_CHARACTER invalid_use_of_escape_character 2200G E ERRCODE_MOST_SPECIFIC_TYPE_MISMATCH most_specific_type_mismatch diff --git a/src/backend/utils/misc/sampling.c b/src/backend/utils/misc/sampling.c index 6191f7973441b..4142e01123f79 100644 --- a/src/backend/utils/misc/sampling.c +++ b/src/backend/utils/misc/sampling.c @@ -228,7 +228,7 @@ reservoir_get_next_S(ReservoirState rs, double t, int n) void sampler_random_init_state(long seed, SamplerRandomState randstate) { - randstate[0] = RAND48_SEED_0; + randstate[0] = 0x330e; /* same as pg_erand48, but could be anything */ randstate[1] = (unsigned short) seed; randstate[2] = (unsigned short) (seed >> 16); } diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c index b2d627f471b82..4369d89e81247 100644 --- a/src/bin/psql/tab-complete.c +++ b/src/bin/psql/tab-complete.c @@ -738,13 +738,15 @@ static const SchemaQuery Query_for_list_of_matviews = { " WHERE substring(pg_catalog.quote_ident(evtname),1,%d)='%s'" #define Query_for_list_of_tablesample_methods \ -" SELECT pg_catalog.quote_ident(tsmname) "\ -" FROM pg_catalog.pg_tablesample_method "\ -" WHERE substring(pg_catalog.quote_ident(tsmname),1,%d)='%s'" +" SELECT pg_catalog.quote_ident(proname) "\ +" FROM pg_catalog.pg_proc "\ +" WHERE prorettype = 'pg_catalog.tsm_handler'::pg_catalog.regtype AND "\ +" proargtypes[0] = 'pg_catalog.internal'::pg_catalog.regtype AND "\ +" substring(pg_catalog.quote_ident(proname),1,%d)='%s'" #define Query_for_list_of_policies \ " SELECT pg_catalog.quote_ident(polname) "\ -" FROM pg_catalog.pg_policy " \ +" FROM pg_catalog.pg_policy "\ " WHERE substring(pg_catalog.quote_ident(polname),1,%d)='%s'" #define Query_for_list_of_tables_for_policy \ diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index 31139cbd0ccc7..75e6b72f9e020 100644 --- a/src/include/access/heapam.h +++ b/src/include/access/heapam.h @@ -116,11 +116,13 @@ extern HeapScanDesc heap_beginscan_bm(Relation relation, Snapshot snapshot, int nkeys, ScanKey key); extern HeapScanDesc heap_beginscan_sampling(Relation relation, Snapshot snapshot, int nkeys, ScanKey key, - bool allow_strat, bool allow_pagemode); + bool allow_strat, bool allow_sync, bool allow_pagemode); extern void heap_setscanlimits(HeapScanDesc scan, BlockNumber startBlk, BlockNumber endBlk); extern void heapgetpage(HeapScanDesc scan, BlockNumber page); extern void heap_rescan(HeapScanDesc scan, ScanKey key); +extern void heap_rescan_set_params(HeapScanDesc scan, ScanKey key, + bool allow_strat, bool allow_sync, bool allow_pagemode); extern void heap_endscan(HeapScanDesc scan); extern HeapTuple heap_getnext(HeapScanDesc scan, ScanDirection direction); diff --git a/src/include/access/tablesample.h b/src/include/access/tablesample.h deleted file mode 100644 index a02e93d32223d..0000000000000 --- a/src/include/access/tablesample.h +++ /dev/null @@ -1,61 +0,0 @@ -/*------------------------------------------------------------------------- - * - * tablesample.h - * Public header file for TABLESAMPLE clause interface - * - * - * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/access/tablesample.h - * - *------------------------------------------------------------------------- - */ -#ifndef TABLESAMPLE_H -#define TABLESAMPLE_H - -#include "access/relscan.h" -#include "executor/executor.h" - -typedef struct TableSampleDesc -{ - HeapScanDesc heapScan; - TupleDesc tupDesc; /* Mostly useful for tsmexaminetuple */ - - void *tsmdata; /* private method data */ - - /* These point to he function of the TABLESAMPLE Method. */ - FmgrInfo tsminit; - FmgrInfo tsmnextblock; - FmgrInfo tsmnexttuple; - FmgrInfo tsmexaminetuple; - FmgrInfo tsmreset; - FmgrInfo tsmend; -} TableSampleDesc; - - -extern TableSampleDesc *tablesample_init(SampleScanState *scanstate, - TableSampleClause *tablesample); -extern HeapTuple tablesample_getnext(TableSampleDesc *desc); -extern void tablesample_reset(TableSampleDesc *desc); -extern void tablesample_end(TableSampleDesc *desc); -extern HeapTuple tablesample_source_getnext(TableSampleDesc *desc); -extern HeapTuple tablesample_source_gettup(TableSampleDesc *desc, ItemPointer tid, - bool *visible); - -extern Datum tsm_system_init(PG_FUNCTION_ARGS); -extern Datum tsm_system_nextblock(PG_FUNCTION_ARGS); -extern Datum tsm_system_nexttuple(PG_FUNCTION_ARGS); -extern Datum tsm_system_end(PG_FUNCTION_ARGS); -extern Datum tsm_system_reset(PG_FUNCTION_ARGS); -extern Datum tsm_system_cost(PG_FUNCTION_ARGS); - -extern Datum tsm_bernoulli_init(PG_FUNCTION_ARGS); -extern Datum tsm_bernoulli_nextblock(PG_FUNCTION_ARGS); -extern Datum tsm_bernoulli_nexttuple(PG_FUNCTION_ARGS); -extern Datum tsm_bernoulli_end(PG_FUNCTION_ARGS); -extern Datum tsm_bernoulli_reset(PG_FUNCTION_ARGS); -extern Datum tsm_bernoulli_cost(PG_FUNCTION_ARGS); - - -#endif diff --git a/src/include/access/tsmapi.h b/src/include/access/tsmapi.h new file mode 100644 index 0000000000000..4b59ffabd6e11 --- /dev/null +++ b/src/include/access/tsmapi.h @@ -0,0 +1,81 @@ +/*------------------------------------------------------------------------- + * + * tsmapi.h + * API for tablesample methods + * + * Copyright (c) 2015, PostgreSQL Global Development Group + * + * src/include/access/tsmapi.h + * + *------------------------------------------------------------------------- + */ +#ifndef TSMAPI_H +#define TSMAPI_H + +#include "nodes/execnodes.h" +#include "nodes/relation.h" + + +/* + * Callback function signatures --- see tablesample-method.sgml for more info. + */ + +typedef void (*SampleScanGetSampleSize_function) (PlannerInfo *root, + RelOptInfo *baserel, + List *paramexprs, + BlockNumber *pages, + double *tuples); + +typedef void (*InitSampleScan_function) (SampleScanState *node, + int eflags); + +typedef void (*BeginSampleScan_function) (SampleScanState *node, + Datum *params, + int nparams, + uint32 seed); + +typedef BlockNumber (*NextSampleBlock_function) (SampleScanState *node); + +typedef OffsetNumber (*NextSampleTuple_function) (SampleScanState *node, + BlockNumber blockno, + OffsetNumber maxoffset); + +typedef void (*EndSampleScan_function) (SampleScanState *node); + +/* + * TsmRoutine is the struct returned by a tablesample method's handler + * function. It provides pointers to the callback functions needed by the + * planner and executor, as well as additional information about the method. + * + * More function pointers are likely to be added in the future. + * Therefore it's recommended that the handler initialize the struct with + * makeNode(TsmRoutine) so that all fields are set to NULL. This will + * ensure that no fields are accidentally left undefined. + */ +typedef struct TsmRoutine +{ + NodeTag type; + + /* List of datatype OIDs for the arguments of the TABLESAMPLE clause */ + List *parameterTypes; + + /* Can method produce repeatable samples across, or even within, queries? */ + bool repeatable_across_queries; + bool repeatable_across_scans; + + /* Functions for planning a SampleScan on a physical table */ + SampleScanGetSampleSize_function SampleScanGetSampleSize; + + /* Functions for executing a SampleScan on a physical table */ + InitSampleScan_function InitSampleScan; /* can be NULL */ + BeginSampleScan_function BeginSampleScan; + NextSampleBlock_function NextSampleBlock; /* can be NULL */ + NextSampleTuple_function NextSampleTuple; + EndSampleScan_function EndSampleScan; /* can be NULL */ +} TsmRoutine; + + +/* Functions in access/tablesample/tablesample.c */ +extern TsmRoutine *GetTsmRoutine(Oid tsmhandler); + +#endif /* TSMAPI_H */ diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 965a53cbfda46..349dd2531028a 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 201506282 +#define CATALOG_VERSION_NO 201507251 #endif diff --git a/src/include/catalog/indexing.h b/src/include/catalog/indexing.h index 748aadde94598..c38958d6c5e26 100644 --- a/src/include/catalog/indexing.h +++ b/src/include/catalog/indexing.h @@ -316,11 +316,6 @@ DECLARE_UNIQUE_INDEX(pg_replication_origin_roiident_index, 6001, on pg_replicati DECLARE_UNIQUE_INDEX(pg_replication_origin_roname_index, 6002, on pg_replication_origin using btree(roname text_pattern_ops)); #define ReplicationOriginNameIndex 6002 -DECLARE_UNIQUE_INDEX(pg_tablesample_method_name_index, 3331, on pg_tablesample_method using btree(tsmname name_ops)); -#define TableSampleMethodNameIndexId 3331 -DECLARE_UNIQUE_INDEX(pg_tablesample_method_oid_index, 3332, on pg_tablesample_method using btree(oid oid_ops)); -#define TableSampleMethodOidIndexId 3332 - /* last step of initialization script: build the indexes declared above */ BUILD_INDICES diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index be3a8fba1bed4..be55666dd076b 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -3732,6 +3732,16 @@ DATA(insert OID = 3116 ( fdw_handler_in PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 DESCR("I/O"); DATA(insert OID = 3117 ( fdw_handler_out PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "3115" _null_ _null_ _null_ _null_ _null_ fdw_handler_out _null_ _null_ _null_ )); DESCR("I/O"); +DATA(insert OID = 3311 ( tsm_handler_in PGNSP PGUID 12 1 0 0 0 f f f f f f i 1 0 3310 "2275" _null_ _null_ _null_ _null_ _null_ tsm_handler_in _null_ _null_ _null_ )); +DESCR("I/O"); +DATA(insert OID = 3312 ( tsm_handler_out PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 2275 "3310" _null_ _null_ _null_ _null_ _null_ tsm_handler_out _null_ _null_ _null_ )); +DESCR("I/O"); + +/* tablesample method handlers */ +DATA(insert OID = 3313 ( bernoulli PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 3310 "2281" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_handler _null_ _null_ _null_ )); +DESCR("BERNOULLI tablesample method handler"); +DATA(insert OID = 3314 ( system PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 3310 "2281" _null_ _null_ _null_ _null_ _null_ tsm_system_handler _null_ _null_ _null_ )); +DESCR("SYSTEM tablesample method handler"); /* cryptographic */ DATA(insert OID = 2311 ( md5 PGNSP PGUID 12 1 0 0 0 f f f f t f i 1 0 25 "25" _null_ _null_ _null_ _null_ _null_ md5_text _null_ _null_ _null_ )); @@ -5315,33 +5325,6 @@ DESCR("get an individual replication origin's replication progress"); DATA(insert OID = 6014 ( pg_show_replication_origin_status PGNSP PGUID 12 1 100 0 0 f f f f f t v 0 0 2249 "" "{26,25,3220,3220}" "{o,o,o,o}" "{local_id, external_id, remote_lsn, local_lsn}" _null_ _null_ pg_show_replication_origin_status _null_ _null_ _null_ )); DESCR("get progress for all replication origins"); -/* tablesample */ -DATA(insert OID = 3335 ( tsm_system_init PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_system_init _null_ _null_ _null_ )); -DESCR("tsm_system_init(internal)"); -DATA(insert OID = 3336 ( tsm_system_nextblock PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "2281 16" _null_ _null_ _null_ _null_ _null_ tsm_system_nextblock _null_ _null_ _null_ )); -DESCR("tsm_system_nextblock(internal)"); -DATA(insert OID = 3337 ( tsm_system_nexttuple PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_system_nexttuple _null_ _null_ _null_ )); -DESCR("tsm_system_nexttuple(internal)"); -DATA(insert OID = 3338 ( tsm_system_end PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_system_end _null_ _null_ _null_ )); -DESCR("tsm_system_end(internal)"); -DATA(insert OID = 3339 ( tsm_system_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_system_reset _null_ _null_ _null_ )); -DESCR("tsm_system_reset(internal)"); -DATA(insert OID = 3340 ( tsm_system_cost PGNSP PGUID 12 1 0 0 0 f f f f t f v 7 0 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ tsm_system_cost _null_ _null_ _null_ )); -DESCR("tsm_system_cost(internal)"); - -DATA(insert OID = 3341 ( tsm_bernoulli_init PGNSP PGUID 12 1 0 0 0 f f f f t f v 3 0 2278 "2281 23 700" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_init _null_ _null_ _null_ )); -DESCR("tsm_bernoulli_init(internal)"); -DATA(insert OID = 3342 ( tsm_bernoulli_nextblock PGNSP PGUID 12 1 0 0 0 f f f f t f v 2 0 23 "2281 16" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_nextblock _null_ _null_ _null_ )); -DESCR("tsm_bernoulli_nextblock(internal)"); -DATA(insert OID = 3343 ( tsm_bernoulli_nexttuple PGNSP PGUID 12 1 0 0 0 f f f f t f v 4 0 21 "2281 23 21 16" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_nexttuple _null_ _null_ _null_ )); -DESCR("tsm_bernoulli_nexttuple(internal)"); -DATA(insert OID = 3344 ( tsm_bernoulli_end PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_end _null_ _null_ _null_ )); -DESCR("tsm_bernoulli_end(internal)"); -DATA(insert OID = 3345 ( tsm_bernoulli_reset PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 2278 "2281" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_reset _null_ _null_ _null_ )); -DESCR("tsm_bernoulli_reset(internal)"); -DATA(insert OID = 3346 ( tsm_bernoulli_cost PGNSP PGUID 12 1 0 0 0 f f f f t f v 7 0 2278 "2281 2281 2281 2281 2281 2281 2281" _null_ _null_ _null_ _null_ _null_ tsm_bernoulli_cost _null_ _null_ _null_ )); -DESCR("tsm_bernoulli_cost(internal)"); - /* * Symbolic values for provolatile column: these indicate whether the result * of a function is dependent *only* on the values of its explicit arguments, diff --git a/src/include/catalog/pg_tablesample_method.h b/src/include/catalog/pg_tablesample_method.h deleted file mode 100644 index b422414d08016..0000000000000 --- a/src/include/catalog/pg_tablesample_method.h +++ /dev/null @@ -1,81 +0,0 @@ -/*------------------------------------------------------------------------- - * - * pg_tablesample_method.h - * definition of the table scan methods. - * - * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group - * Portions Copyright (c) 1994, Regents of the University of California - * - * src/include/catalog/pg_tablesample_method.h - * - * - *------------------------------------------------------------------------- - */ -#ifndef PG_TABLESAMPLE_METHOD_H -#define PG_TABLESAMPLE_METHOD_H - -#include "catalog/genbki.h" -#include "catalog/objectaddress.h" - -/* ---------------- - * pg_tablesample_method definition. cpp turns this into - * typedef struct FormData_pg_tablesample_method - * ---------------- - */ -#define TableSampleMethodRelationId 3330 - -CATALOG(pg_tablesample_method,3330) -{ - NameData tsmname; /* tablesample method name */ - bool tsmseqscan; /* does this method scan whole table - * sequentially? */ - bool tsmpagemode; /* does this method scan page at a time? */ - regproc tsminit; /* init scan function */ - regproc tsmnextblock; /* function returning next block to sample or - * InvalidBlockOffset if finished */ - regproc tsmnexttuple; /* function returning next tuple offset from - * current block or InvalidOffsetNumber if end - * of the block was reacher */ - regproc tsmexaminetuple;/* optional function which can examine tuple - * contents and decide if tuple should be - * returned or not */ - regproc tsmend; /* end scan function */ - regproc tsmreset; /* reset state - used by rescan */ - regproc tsmcost; /* costing function */ -} FormData_pg_tablesample_method; - -/* ---------------- - * Form_pg_tablesample_method corresponds to a pointer to a tuple with - * the format of pg_tablesample_method relation. - * ---------------- - */ -typedef FormData_pg_tablesample_method *Form_pg_tablesample_method; - -/* ---------------- - * compiler constants for pg_tablesample_method - * ---------------- - */ -#define Natts_pg_tablesample_method 10 -#define Anum_pg_tablesample_method_tsmname 1 -#define Anum_pg_tablesample_method_tsmseqscan 2 -#define Anum_pg_tablesample_method_tsmpagemode 3 -#define Anum_pg_tablesample_method_tsminit 4 -#define Anum_pg_tablesample_method_tsmnextblock 5 -#define Anum_pg_tablesample_method_tsmnexttuple 6 -#define Anum_pg_tablesample_method_tsmexaminetuple 7 -#define Anum_pg_tablesample_method_tsmend 8 -#define Anum_pg_tablesample_method_tsmreset 9 -#define Anum_pg_tablesample_method_tsmcost 10 - -/* ---------------- - * initial contents of pg_tablesample_method - * ---------------- - */ - -DATA(insert OID = 3333 ( system false true tsm_system_init tsm_system_nextblock tsm_system_nexttuple - tsm_system_end tsm_system_reset tsm_system_cost )); -DESCR("SYSTEM table sampling method"); -DATA(insert OID = 3334 ( bernoulli true false tsm_bernoulli_init tsm_bernoulli_nextblock tsm_bernoulli_nexttuple - tsm_bernoulli_end tsm_bernoulli_reset tsm_bernoulli_cost )); -DESCR("BERNOULLI table sampling method"); - -#endif /* PG_TABLESAMPLE_METHOD_H */ diff --git a/src/include/catalog/pg_type.h b/src/include/catalog/pg_type.h index da123f6c4957e..7dc95c8d2c651 100644 --- a/src/include/catalog/pg_type.h +++ b/src/include/catalog/pg_type.h @@ -694,6 +694,8 @@ DATA(insert OID = 3500 ( anyenum PGNSP PGUID 4 t p P f t \054 0 0 0 anyenum_in #define ANYENUMOID 3500 DATA(insert OID = 3115 ( fdw_handler PGNSP PGUID 4 t p P f t \054 0 0 0 fdw_handler_in fdw_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); #define FDW_HANDLEROID 3115 +DATA(insert OID = 3310 ( tsm_handler PGNSP PGUID 4 t p P f t \054 0 0 0 tsm_handler_in tsm_handler_out - - - - - i p f 0 -1 0 0 _null_ _null_ _null_ )); +#define TSM_HANDLEROID 3310 DATA(insert OID = 3831 ( anyrange PGNSP PGUID -1 f p P f t \054 0 0 0 anyrange_in anyrange_out - - - - - d x f 0 -1 0 0 _null_ _null_ _null_ )); #define ANYRANGEOID 3831 diff --git a/src/include/executor/nodeSamplescan.h b/src/include/executor/nodeSamplescan.h index 4b769daec8b91..a0cc6ce467a9f 100644 --- a/src/include/executor/nodeSamplescan.h +++ b/src/include/executor/nodeSamplescan.h @@ -4,7 +4,7 @@ * * * - * Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group + * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California * * src/include/executor/nodeSamplescan.h diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 541ee18735685..303fc3c1c77dc 100644 --- a/src/include/nodes/execnodes.h +++ b/src/include/nodes/execnodes.h @@ -1257,13 +1257,22 @@ typedef struct ScanState */ typedef ScanState SeqScanState; -/* - * SampleScan +/* ---------------- + * SampleScanState information + * ---------------- */ typedef struct SampleScanState { ScanState ss; - struct TableSampleDesc *tsdesc; + List *args; /* expr states for TABLESAMPLE params */ + ExprState *repeatable; /* expr state for REPEATABLE expr */ + /* use struct pointer to avoid including tsmapi.h here */ + struct TsmRoutine *tsmroutine; /* descriptor for tablesample method */ + void *tsm_state; /* tablesample method can keep state here */ + bool use_bulkread; /* use bulkread buffer access strategy? */ + bool use_pagemode; /* use page-at-a-time visibility checking? */ + bool begun; /* false means need to call BeginSampleScan */ + uint32 seed; /* random seed */ } SampleScanState; /* diff --git a/src/include/nodes/nodes.h b/src/include/nodes/nodes.h index 290cdb3058517..f4db217379e24 100644 --- a/src/include/nodes/nodes.h +++ b/src/include/nodes/nodes.h @@ -51,6 +51,7 @@ typedef enum NodeTag T_BitmapOr, T_Scan, T_SeqScan, + T_SampleScan, T_IndexScan, T_IndexOnlyScan, T_BitmapIndexScan, @@ -61,7 +62,6 @@ typedef enum NodeTag T_ValuesScan, T_CteScan, T_WorkTableScan, - T_SampleScan, T_ForeignScan, T_CustomScan, T_Join, @@ -399,6 +399,7 @@ typedef enum NodeTag T_WindowDef, T_RangeSubselect, T_RangeFunction, + T_RangeTableSample, T_TypeName, T_ColumnDef, T_IndexElem, @@ -406,6 +407,7 @@ typedef enum NodeTag T_DefElem, T_RangeTblEntry, T_RangeTblFunction, + T_TableSampleClause, T_WithCheckOption, T_SortGroupClause, T_GroupingSet, @@ -424,8 +426,6 @@ typedef enum NodeTag T_OnConflictClause, T_CommonTableExpr, T_RoleSpec, - T_RangeTableSample, - T_TableSampleClause, /* * TAGS FOR REPLICATION GRAMMAR PARSE NODES (replnodes.h) @@ -451,7 +451,8 @@ typedef enum NodeTag T_WindowObjectData, /* private in nodeWindowAgg.c */ T_TIDBitmap, /* in nodes/tidbitmap.h */ T_InlineCodeBlock, /* in nodes/parsenodes.h */ - T_FdwRoutine /* in foreign/fdwapi.h */ + T_FdwRoutine, /* in foreign/fdwapi.h */ + T_TsmRoutine /* in access/tsmapi.h */ } NodeTag; /* diff --git a/src/include/nodes/parsenodes.h b/src/include/nodes/parsenodes.h index cd7b19eac5a17..c5fa78b71c672 100644 --- a/src/include/nodes/parsenodes.h +++ b/src/include/nodes/parsenodes.h @@ -337,26 +337,6 @@ typedef struct FuncCall int location; /* token location, or -1 if unknown */ } FuncCall; -/* - * TableSampleClause - a sampling method information - */ -typedef struct TableSampleClause -{ - NodeTag type; - Oid tsmid; - bool tsmseqscan; - bool tsmpagemode; - Oid tsminit; - Oid tsmnextblock; - Oid tsmnexttuple; - Oid tsmexaminetuple; - Oid tsmend; - Oid tsmreset; - Oid tsmcost; - Node *repeatable; - List *args; -} TableSampleClause; - /* * A_Star - '*' representing all columns of a table or compound field * @@ -558,19 +538,23 @@ typedef struct RangeFunction } RangeFunction; /* - * RangeTableSample - represents TABLESAMPLE () REPEATABLE () + * RangeTableSample - TABLESAMPLE appearing in a raw FROM clause * - * SQL Standard specifies only one parameter which is percentage. But we allow - * custom tablesample methods which may need different input arguments so we - * accept list of arguments. + * This node, appearing only in raw parse trees, represents + * TABLESAMPLE () REPEATABLE () + * Currently, the can only be a RangeVar, but we might in future + * allow RangeSubselect and other options. Note that the RangeTableSample + * is wrapped around the node representing the , rather than being + * a subfield of it. */ typedef struct RangeTableSample { NodeTag type; - RangeVar *relation; - char *method; /* sampling method */ - Node *repeatable; - List *args; /* arguments for sampling method */ + Node *relation; /* relation to be sampled */ + List *method; /* sampling method name (possibly qualified) */ + List *args; /* argument(s) for sampling method */ + Node *repeatable; /* REPEATABLE expression, or NULL if none */ + int location; /* method name location, or -1 if unknown */ } RangeTableSample; /* @@ -810,7 +794,7 @@ typedef struct RangeTblEntry */ Oid relid; /* OID of the relation */ char relkind; /* relation kind (see pg_class.relkind) */ - TableSampleClause *tablesample; /* sampling method and parameters */ + struct TableSampleClause *tablesample; /* sampling info, or NULL */ /* * Fields valid for a subquery RTE (else NULL): @@ -912,6 +896,19 @@ typedef struct RangeTblFunction Bitmapset *funcparams; /* PARAM_EXEC Param IDs affecting this func */ } RangeTblFunction; +/* + * TableSampleClause - TABLESAMPLE appearing in a transformed FROM clause + * + * Unlike RangeTableSample, this is a subnode of the relevant RangeTblEntry. + */ +typedef struct TableSampleClause +{ + NodeTag type; + Oid tsmhandler; /* OID of the tablesample handler function */ + List *args; /* tablesample argument expression(s) */ + Expr *repeatable; /* REPEATABLE expression, or NULL if none */ +} TableSampleClause; + /* * WithCheckOption - * representation of WITH CHECK OPTION checks to be applied to new tuples @@ -2520,7 +2517,7 @@ typedef struct RenameStmt typedef struct AlterObjectSchemaStmt { NodeTag type; - ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */ + ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */ RangeVar *relation; /* in case it's a table */ List *object; /* in case it's some other object */ List *objarg; /* argument types, if applicable */ @@ -2535,7 +2532,7 @@ typedef struct AlterObjectSchemaStmt typedef struct AlterOwnerStmt { NodeTag type; - ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */ + ObjectType objectType; /* OBJECT_TABLE, OBJECT_TYPE, etc */ RangeVar *relation; /* in case it's a table */ List *object; /* in case it's some other object */ List *objarg; /* argument types, if applicable */ diff --git a/src/include/nodes/plannodes.h b/src/include/nodes/plannodes.h index 5f538f3e8ccb5..0654d0266cd6e 100644 --- a/src/include/nodes/plannodes.h +++ b/src/include/nodes/plannodes.h @@ -287,7 +287,12 @@ typedef Scan SeqScan; * table sample scan node * ---------------- */ -typedef Scan SampleScan; +typedef struct SampleScan +{ + Scan scan; + /* use struct pointer to avoid including parsenodes.h here */ + struct TableSampleClause *tablesample; +} SampleScan; /* ---------------- * index scan node diff --git a/src/include/optimizer/cost.h b/src/include/optimizer/cost.h index 24003ae3591b9..dd43e45d0c0a5 100644 --- a/src/include/optimizer/cost.h +++ b/src/include/optimizer/cost.h @@ -68,7 +68,8 @@ extern double index_pages_fetched(double tuples_fetched, BlockNumber pages, double index_pages, PlannerInfo *root); extern void cost_seqscan(Path *path, PlannerInfo *root, RelOptInfo *baserel, ParamPathInfo *param_info); -extern void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel); +extern void cost_samplescan(Path *path, PlannerInfo *root, RelOptInfo *baserel, + ParamPathInfo *param_info); extern void cost_index(IndexPath *path, PlannerInfo *root, double loop_count); extern void cost_bitmap_heap_scan(Path *path, PlannerInfo *root, RelOptInfo *baserel, diff --git a/src/include/parser/parse_func.h b/src/include/parser/parse_func.h index 3194da463948a..32646918e20c4 100644 --- a/src/include/parser/parse_func.h +++ b/src/include/parser/parse_func.h @@ -33,11 +33,6 @@ typedef enum extern Node *ParseFuncOrColumn(ParseState *pstate, List *funcname, List *fargs, FuncCall *fn, int location); -extern TableSampleClause *ParseTableSample(ParseState *pstate, - char *samplemethod, - Node *repeatable, List *args, - int location); - extern FuncDetailCode func_get_detail(List *funcname, List *fargs, List *fargnames, int nargs, Oid *argtypes, diff --git a/src/include/port.h b/src/include/port.h index 71113c03944bd..3787cbfb7614c 100644 --- a/src/include/port.h +++ b/src/include/port.h @@ -357,10 +357,6 @@ extern off_t ftello(FILE *stream); #endif #endif -#define RAND48_SEED_0 (0x330e) -#define RAND48_SEED_1 (0xabcd) -#define RAND48_SEED_2 (0x1234) - extern double pg_erand48(unsigned short xseed[3]); extern long pg_lrand48(void); extern void pg_srand48(long seed); diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index 98556725c8e02..07caf22f962f4 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -566,6 +566,8 @@ extern Datum language_handler_in(PG_FUNCTION_ARGS); extern Datum language_handler_out(PG_FUNCTION_ARGS); extern Datum fdw_handler_in(PG_FUNCTION_ARGS); extern Datum fdw_handler_out(PG_FUNCTION_ARGS); +extern Datum tsm_handler_in(PG_FUNCTION_ARGS); +extern Datum tsm_handler_out(PG_FUNCTION_ARGS); extern Datum internal_in(PG_FUNCTION_ARGS); extern Datum internal_out(PG_FUNCTION_ARGS); extern Datum opaque_in(PG_FUNCTION_ARGS); @@ -1212,6 +1214,12 @@ extern Datum ginqueryarrayextract(PG_FUNCTION_ARGS); extern Datum ginarrayconsistent(PG_FUNCTION_ARGS); extern Datum ginarraytriconsistent(PG_FUNCTION_ARGS); +/* access/tablesample/bernoulli.c */ +extern Datum tsm_bernoulli_handler(PG_FUNCTION_ARGS); + +/* access/tablesample/system.c */ +extern Datum tsm_system_handler(PG_FUNCTION_ARGS); + /* access/transam/twophase.c */ extern Datum pg_prepared_xact(PG_FUNCTION_ARGS); diff --git a/src/include/utils/lsyscache.h b/src/include/utils/lsyscache.h index a40c9b12732da..971153843296d 100644 --- a/src/include/utils/lsyscache.h +++ b/src/include/utils/lsyscache.h @@ -156,7 +156,6 @@ extern void free_attstatsslot(Oid atttype, extern char *get_namespace_name(Oid nspid); extern char *get_namespace_name_or_temp(Oid nspid); extern Oid get_range_subtype(Oid rangeOid); -extern char *get_tablesample_method_name(Oid tsmid); #define type_is_array(typid) (get_element_type(typid) != InvalidOid) /* type_is_array_domain accepts both plain arrays and domains over arrays */ diff --git a/src/include/utils/syscache.h b/src/include/utils/syscache.h index f06f03a996f26..18404e266eb63 100644 --- a/src/include/utils/syscache.h +++ b/src/include/utils/syscache.h @@ -81,8 +81,6 @@ enum SysCacheIdentifier REPLORIGNAME, RULERELNAME, STATRELATTINH, - TABLESAMPLEMETHODNAME, - TABLESAMPLEMETHODOID, TABLESPACEOID, TRFOID, TRFTYPELANG, diff --git a/src/port/erand48.c b/src/port/erand48.c index 12efd8193c4ed..9d471197c3540 100644 --- a/src/port/erand48.c +++ b/src/port/erand48.c @@ -33,6 +33,9 @@ #include +#define RAND48_SEED_0 (0x330e) +#define RAND48_SEED_1 (0xabcd) +#define RAND48_SEED_2 (0x1234) #define RAND48_MULT_0 (0xe66d) #define RAND48_MULT_1 (0xdeec) #define RAND48_MULT_2 (0x0005) diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 414299a694114..e7c242cd22d48 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -101,15 +101,17 @@ NOTICE: f_leak => great manga 44 | 8 | 1 | rls_regress_user2 | great manga | manga (4 rows) -SELECT * FROM document TABLESAMPLE BERNOULLI (50) REPEATABLE(1) WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; NOTICE: f_leak => my first manga NOTICE: f_leak => great science fiction +NOTICE: f_leak => great manga did | cid | dlevel | dauthor | dtitle -----+-----+--------+-------------------+----------------------- - 1 | 11 | 1 | rls_regress_user1 | my first novel 4 | 44 | 1 | rls_regress_user1 | my first manga 6 | 22 | 1 | rls_regress_user2 | great science fiction + 8 | 44 | 1 | rls_regress_user2 | great manga (3 rows) -- viewpoint from rls_regress_user2 @@ -156,20 +158,20 @@ NOTICE: f_leak => great manga 44 | 8 | 1 | rls_regress_user2 | great manga | manga (8 rows) -SELECT * FROM document TABLESAMPLE BERNOULLI (50) REPEATABLE(1) WHERE f_leak(dtitle) ORDER BY did; -NOTICE: f_leak => my first novel -NOTICE: f_leak => my second novel +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; NOTICE: f_leak => my first manga +NOTICE: f_leak => my second manga NOTICE: f_leak => great science fiction -NOTICE: f_leak => great technology book +NOTICE: f_leak => great manga did | cid | dlevel | dauthor | dtitle -----+-----+--------+-------------------+----------------------- - 1 | 11 | 1 | rls_regress_user1 | my first novel - 2 | 11 | 2 | rls_regress_user1 | my second novel 4 | 44 | 1 | rls_regress_user1 | my first manga + 5 | 44 | 2 | rls_regress_user1 | my second manga 6 | 22 | 1 | rls_regress_user2 | great science fiction - 7 | 33 | 2 | rls_regress_user2 | great technology book -(5 rows) + 8 | 44 | 1 | rls_regress_user2 | great manga +(4 rows) EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); QUERY PLAN diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index cd5337531d4b4..1e5b0b9a2c43a 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2202,6 +2202,10 @@ street| SELECT r.name, FROM ONLY road r, real_city c WHERE (c.outline ## r.thepath); +test_tablesample_v1| SELECT test_tablesample.id + FROM test_tablesample TABLESAMPLE system ((10 * 2)) REPEATABLE (2); +test_tablesample_v2| SELECT test_tablesample.id + FROM test_tablesample TABLESAMPLE system (99); toyemp| SELECT emp.name, emp.age, emp.location, diff --git a/src/test/regress/expected/sanity_check.out b/src/test/regress/expected/sanity_check.out index 14acd16da3b3d..eb0bc88ef1fb2 100644 --- a/src/test/regress/expected/sanity_check.out +++ b/src/test/regress/expected/sanity_check.out @@ -128,7 +128,6 @@ pg_shdepend|t pg_shdescription|t pg_shseclabel|t pg_statistic|t -pg_tablesample_method|t pg_tablespace|t pg_transform|t pg_trigger|t diff --git a/src/test/regress/expected/tablesample.out b/src/test/regress/expected/tablesample.out index 04e5eb8b807e2..727a835439734 100644 --- a/src/test/regress/expected/tablesample.out +++ b/src/test/regress/expected/tablesample.out @@ -1,107 +1,123 @@ -CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages -INSERT INTO test_tablesample SELECT i, repeat(i::text, 200) FROM generate_series(0, 9) s(i) ORDER BY i; -SELECT t.id FROM test_tablesample AS t TABLESAMPLE SYSTEM (50) REPEATABLE (10); +CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); +-- use fillfactor so we don't have to load too much data to get multiple pages +INSERT INTO test_tablesample + SELECT i, repeat(i::text, 200) FROM generate_series(0, 9) s(i); +SELECT t.id FROM test_tablesample AS t TABLESAMPLE SYSTEM (50) REPEATABLE (0); id ---- - 0 - 1 - 2 3 4 5 - 9 -(7 rows) - -SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (100.0/11) REPEATABLE (9999); - id ----- 6 7 8 -(3 rows) +(6 rows) -SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100); - count -------- - 10 -(1 row) +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (100.0/11) REPEATABLE (0); + id +---- +(0 rows) -SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (100); +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); id ---- - 0 - 1 - 2 + 3 + 4 + 5 6 7 8 - 9 -(7 rows) +(6 rows) -SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (100); +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (0); id ---- - 0 - 1 - 3 4 5 + 6 + 7 + 8 (5 rows) -SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (5.5) REPEATABLE (1); +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (5.5) REPEATABLE (0); id ---- - 0 - 5 -(2 rows) + 7 +(1 row) -CREATE VIEW test_tablesample_v1 AS SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (10*2) REPEATABLE (2); -CREATE VIEW test_tablesample_v2 AS SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (99); -SELECT pg_get_viewdef('test_tablesample_v1'::regclass); - pg_get_viewdef --------------------------------------------------------------------------------- - SELECT test_tablesample.id + - FROM test_tablesample TABLESAMPLE system (((10 * 2))::real) REPEATABLE (2); +-- 100% should give repeatable count results (ie, all rows) in any case +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100); + count +------- + 10 (1 row) -SELECT pg_get_viewdef('test_tablesample_v2'::regclass); - pg_get_viewdef ------------------------------------------------------------ - SELECT test_tablesample.id + - FROM test_tablesample TABLESAMPLE system ((99)::real); +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100) REPEATABLE (1+2); + count +------- + 10 +(1 row) + +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100) REPEATABLE (0.4); + count +------- + 10 (1 row) +CREATE VIEW test_tablesample_v1 AS + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (10*2) REPEATABLE (2); +CREATE VIEW test_tablesample_v2 AS + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (99); +\d+ test_tablesample_v1 + View "public.test_tablesample_v1" + Column | Type | Modifiers | Storage | Description +--------+---------+-----------+---------+------------- + id | integer | | plain | +View definition: + SELECT test_tablesample.id + FROM test_tablesample TABLESAMPLE system ((10 * 2)) REPEATABLE (2); + +\d+ test_tablesample_v2 + View "public.test_tablesample_v2" + Column | Type | Modifiers | Storage | Description +--------+---------+-----------+---------+------------- + id | integer | | plain | +View definition: + SELECT test_tablesample.id + FROM test_tablesample TABLESAMPLE system (99); + +-- check a sampled query doesn't affect cursor in progress BEGIN; -DECLARE tablesample_cur CURSOR FOR SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (100); +DECLARE tablesample_cur CURSOR FOR + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); FETCH FIRST FROM tablesample_cur; id ---- - 0 + 3 (1 row) FETCH NEXT FROM tablesample_cur; id ---- - 1 + 4 (1 row) FETCH NEXT FROM tablesample_cur; id ---- - 2 + 5 (1 row) -SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (10); +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); id ---- - 0 - 1 - 2 3 4 5 - 9 -(7 rows) + 6 + 7 + 8 +(6 rows) FETCH NEXT FROM tablesample_cur; id @@ -124,19 +140,19 @@ FETCH NEXT FROM tablesample_cur; FETCH FIRST FROM tablesample_cur; id ---- - 0 + 3 (1 row) FETCH NEXT FROM tablesample_cur; id ---- - 1 + 4 (1 row) FETCH NEXT FROM tablesample_cur; id ---- - 2 + 5 (1 row) FETCH NEXT FROM tablesample_cur; @@ -159,41 +175,129 @@ FETCH NEXT FROM tablesample_cur; CLOSE tablesample_cur; END; -EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (10); - QUERY PLAN -------------------------------------------------------------------------------- - Sample Scan (system) on test_tablesample (cost=0.00..26.35 rows=635 width=4) +EXPLAIN (COSTS OFF) + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (2); + QUERY PLAN +-------------------------------------------------------------------- + Sample Scan on test_tablesample + Sampling: system ('50'::real) REPEATABLE ('2'::double precision) +(2 rows) + +EXPLAIN (COSTS OFF) + SELECT * FROM test_tablesample_v1; + QUERY PLAN +-------------------------------------------------------------------- + Sample Scan on test_tablesample + Sampling: system ('20'::real) REPEATABLE ('2'::double precision) +(2 rows) + +-- check inheritance behavior +explain (costs off) + select count(*) from person tablesample bernoulli (100); + QUERY PLAN +------------------------------------------------- + Aggregate + -> Append + -> Sample Scan on person + Sampling: bernoulli ('100'::real) + -> Sample Scan on emp + Sampling: bernoulli ('100'::real) + -> Sample Scan on student + Sampling: bernoulli ('100'::real) + -> Sample Scan on stud_emp + Sampling: bernoulli ('100'::real) +(10 rows) + +select count(*) from person tablesample bernoulli (100); + count +------- + 58 (1 row) -EXPLAIN SELECT * FROM test_tablesample_v1; - QUERY PLAN -------------------------------------------------------------------------------- - Sample Scan (system) on test_tablesample (cost=0.00..10.54 rows=254 width=4) +select count(*) from person; + count +------- + 58 +(1 row) + +-- check that collations get assigned within the tablesample arguments +SELECT count(*) FROM test_tablesample TABLESAMPLE bernoulli (('1'::text < '0'::text)::int); + count +------- + 0 +(1 row) + +-- check behavior during rescans, as well as correct handling of min/max pct +select * from + (values (0),(100)) v(pct), + lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss; + pct | count +-----+------- + 0 | 0 + 100 | 10000 +(2 rows) + +select * from + (values (0),(100)) v(pct), + lateral (select count(*) from tenk1 tablesample system (pct)) ss; + pct | count +-----+------- + 0 | 0 + 100 | 10000 +(2 rows) + +explain (costs off) +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample bernoulli (pct)) ss + group by pct; + QUERY PLAN +-------------------------------------------------------- + HashAggregate + Group Key: "*VALUES*".column1 + -> Nested Loop + -> Values Scan on "*VALUES*" + -> Sample Scan on tenk1 + Sampling: bernoulli ("*VALUES*".column1) +(6 rows) + +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample bernoulli (pct)) ss + group by pct; + pct | count +-----+------- + 100 | 10000 +(1 row) + +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample system (pct)) ss + group by pct; + pct | count +-----+------- + 100 | 10000 (1 row) -- errors SELECT id FROM test_tablesample TABLESAMPLE FOOBAR (1); -ERROR: tablesample method "foobar" does not exist +ERROR: tablesample method foobar does not exist LINE 1: SELECT id FROM test_tablesample TABLESAMPLE FOOBAR (1); - ^ + ^ +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (NULL); +ERROR: TABLESAMPLE parameter cannot be null SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (NULL); -ERROR: REPEATABLE clause must be NOT NULL numeric value -LINE 1: ... test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (NULL); - ^ +ERROR: TABLESAMPLE REPEATABLE parameter cannot be null SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (-1); -ERROR: invalid sample size -HINT: Sample size must be numeric value between 0 and 100 (inclusive). +ERROR: sample percentage must be between 0 and 100 SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (200); -ERROR: invalid sample size -HINT: Sample size must be numeric value between 0 and 100 (inclusive). +ERROR: sample percentage must be between 0 and 100 SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (-1); -ERROR: invalid sample size -HINT: Sample size must be numeric value between 0 and 100 (inclusive). +ERROR: sample percentage must be between 0 and 100 SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (200); -ERROR: invalid sample size -HINT: Sample size must be numeric value between 0 and 100 (inclusive). +ERROR: sample percentage must be between 0 and 100 SELECT id FROM test_tablesample_v1 TABLESAMPLE BERNOULLI (1); -ERROR: TABLESAMPLE clause can only be used on tables and materialized views +ERROR: TABLESAMPLE clause can only be applied to tables and materialized views LINE 1: SELECT id FROM test_tablesample_v1 TABLESAMPLE BERNOULLI (1)... ^ INSERT INTO test_tablesample_v1 VALUES(1); @@ -202,30 +306,10 @@ DETAIL: Views containing TABLESAMPLE are not automatically updatable. HINT: To enable inserting into the view, provide an INSTEAD OF INSERT trigger or an unconditional ON INSERT DO INSTEAD rule. WITH query_select AS (SELECT * FROM test_tablesample) SELECT * FROM query_select TABLESAMPLE BERNOULLI (5.5) REPEATABLE (1); -ERROR: TABLESAMPLE clause can only be used on tables and materialized views +ERROR: TABLESAMPLE clause can only be applied to tables and materialized views LINE 2: SELECT * FROM query_select TABLESAMPLE BERNOULLI (5.5) REPEA... ^ SELECT q.* FROM (SELECT * FROM test_tablesample) as q TABLESAMPLE BERNOULLI (5); ERROR: syntax error at or near "TABLESAMPLE" LINE 1: ...CT q.* FROM (SELECT * FROM test_tablesample) as q TABLESAMPL... ^ --- catalog sanity -SELECT * -FROM pg_tablesample_method -WHERE tsminit IS NULL - OR tsmseqscan IS NULL - OR tsmpagemode IS NULL - OR tsmnextblock IS NULL - OR tsmnexttuple IS NULL - OR tsmend IS NULL - OR tsmreset IS NULL - OR tsmcost IS NULL; - tsmname | tsmseqscan | tsmpagemode | tsminit | tsmnextblock | tsmnexttuple | tsmexaminetuple | tsmend | tsmreset | tsmcost ----------+------------+-------------+---------+--------------+--------------+-----------------+--------+----------+--------- -(0 rows) - --- done -DROP TABLE test_tablesample CASCADE; -NOTICE: drop cascades to 2 other objects -DETAIL: drop cascades to view test_tablesample_v1 -drop cascades to view test_tablesample_v2 diff --git a/src/test/regress/output/misc.source b/src/test/regress/output/misc.source index 70c9cc356a642..9eedb363d06be 100644 --- a/src/test/regress/output/misc.source +++ b/src/test/regress/output/misc.source @@ -686,6 +686,9 @@ SELECT user_relns() AS user_relns test_range_excl test_range_gist test_range_spgist + test_tablesample + test_tablesample_v1 + test_tablesample_v2 test_tsvector testjsonb text_tbl @@ -705,7 +708,7 @@ SELECT user_relns() AS user_relns tvvmv varchar_tbl xacttest -(127 rows) +(130 rows) SELECT name(equipment(hobby_construct(text 'skywalking', text 'mer'))); name diff --git a/src/test/regress/serial_schedule b/src/test/regress/serial_schedule index a2e0cebbdb500..187cb127014f7 100644 --- a/src/test/regress/serial_schedule +++ b/src/test/regress/serial_schedule @@ -110,6 +110,7 @@ test: lock test: replica_identity test: rowsecurity test: object_address +test: tablesample test: alter_generic test: misc test: psql @@ -155,4 +156,3 @@ test: with test: xml test: event_trigger test: stats -test: tablesample diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 039070b85b733..e86f8143142cb 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -94,14 +94,18 @@ SET row_security TO ON; SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; -SELECT * FROM document TABLESAMPLE BERNOULLI (50) REPEATABLE(1) WHERE f_leak(dtitle) ORDER BY did; +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; -- viewpoint from rls_regress_user2 SET SESSION AUTHORIZATION rls_regress_user2; SELECT * FROM document WHERE f_leak(dtitle) ORDER BY did; SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle) ORDER BY did; -SELECT * FROM document TABLESAMPLE BERNOULLI (50) REPEATABLE(1) WHERE f_leak(dtitle) ORDER BY did; +-- try a sampled version +SELECT * FROM document TABLESAMPLE BERNOULLI(50) REPEATABLE(0) + WHERE f_leak(dtitle) ORDER BY did; EXPLAIN (COSTS OFF) SELECT * FROM document WHERE f_leak(dtitle); EXPLAIN (COSTS OFF) SELECT * FROM document NATURAL JOIN category WHERE f_leak(dtitle); diff --git a/src/test/regress/sql/tablesample.sql b/src/test/regress/sql/tablesample.sql index 7b3eb9bedf7bb..eec9793496696 100644 --- a/src/test/regress/sql/tablesample.sql +++ b/src/test/regress/sql/tablesample.sql @@ -1,26 +1,37 @@ -CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); -- force smaller pages so we don't have to load too much data to get multiple pages +CREATE TABLE test_tablesample (id int, name text) WITH (fillfactor=10); +-- use fillfactor so we don't have to load too much data to get multiple pages -INSERT INTO test_tablesample SELECT i, repeat(i::text, 200) FROM generate_series(0, 9) s(i) ORDER BY i; +INSERT INTO test_tablesample + SELECT i, repeat(i::text, 200) FROM generate_series(0, 9) s(i); -SELECT t.id FROM test_tablesample AS t TABLESAMPLE SYSTEM (50) REPEATABLE (10); -SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (100.0/11) REPEATABLE (9999); +SELECT t.id FROM test_tablesample AS t TABLESAMPLE SYSTEM (50) REPEATABLE (0); +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (100.0/11) REPEATABLE (0); +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (0); +SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (5.5) REPEATABLE (0); + +-- 100% should give repeatable count results (ie, all rows) in any case SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100); -SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (100); -SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (50) REPEATABLE (100); -SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (5.5) REPEATABLE (1); +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100) REPEATABLE (1+2); +SELECT count(*) FROM test_tablesample TABLESAMPLE SYSTEM (100) REPEATABLE (0.4); -CREATE VIEW test_tablesample_v1 AS SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (10*2) REPEATABLE (2); -CREATE VIEW test_tablesample_v2 AS SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (99); -SELECT pg_get_viewdef('test_tablesample_v1'::regclass); -SELECT pg_get_viewdef('test_tablesample_v2'::regclass); +CREATE VIEW test_tablesample_v1 AS + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (10*2) REPEATABLE (2); +CREATE VIEW test_tablesample_v2 AS + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (99); +\d+ test_tablesample_v1 +\d+ test_tablesample_v2 +-- check a sampled query doesn't affect cursor in progress BEGIN; -DECLARE tablesample_cur CURSOR FOR SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (100); +DECLARE tablesample_cur CURSOR FOR + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); + FETCH FIRST FROM tablesample_cur; FETCH NEXT FROM tablesample_cur; FETCH NEXT FROM tablesample_cur; -SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (10); +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (0); FETCH NEXT FROM tablesample_cur; FETCH NEXT FROM tablesample_cur; @@ -36,12 +47,45 @@ FETCH NEXT FROM tablesample_cur; CLOSE tablesample_cur; END; -EXPLAIN SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (10); -EXPLAIN SELECT * FROM test_tablesample_v1; +EXPLAIN (COSTS OFF) + SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (2); +EXPLAIN (COSTS OFF) + SELECT * FROM test_tablesample_v1; + +-- check inheritance behavior +explain (costs off) + select count(*) from person tablesample bernoulli (100); +select count(*) from person tablesample bernoulli (100); +select count(*) from person; + +-- check that collations get assigned within the tablesample arguments +SELECT count(*) FROM test_tablesample TABLESAMPLE bernoulli (('1'::text < '0'::text)::int); + +-- check behavior during rescans, as well as correct handling of min/max pct +select * from + (values (0),(100)) v(pct), + lateral (select count(*) from tenk1 tablesample bernoulli (pct)) ss; +select * from + (values (0),(100)) v(pct), + lateral (select count(*) from tenk1 tablesample system (pct)) ss; +explain (costs off) +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample bernoulli (pct)) ss + group by pct; +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample bernoulli (pct)) ss + group by pct; +select pct, count(unique1) from + (values (0),(100)) v(pct), + lateral (select * from tenk1 tablesample system (pct)) ss + group by pct; -- errors SELECT id FROM test_tablesample TABLESAMPLE FOOBAR (1); +SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (NULL); SELECT id FROM test_tablesample TABLESAMPLE SYSTEM (50) REPEATABLE (NULL); SELECT id FROM test_tablesample TABLESAMPLE BERNOULLI (-1); @@ -56,19 +100,3 @@ WITH query_select AS (SELECT * FROM test_tablesample) SELECT * FROM query_select TABLESAMPLE BERNOULLI (5.5) REPEATABLE (1); SELECT q.* FROM (SELECT * FROM test_tablesample) as q TABLESAMPLE BERNOULLI (5); - --- catalog sanity - -SELECT * -FROM pg_tablesample_method -WHERE tsminit IS NULL - OR tsmseqscan IS NULL - OR tsmpagemode IS NULL - OR tsmnextblock IS NULL - OR tsmnexttuple IS NULL - OR tsmend IS NULL - OR tsmreset IS NULL - OR tsmcost IS NULL; - --- done -DROP TABLE test_tablesample CASCADE; From 68c3549fb56834f47d57ef010ffbd3bdbd80b941 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 25 Jul 2015 15:46:26 -0400 Subject: [PATCH 086/442] Update oidjoins regression test for 9.5. New FK relationships for pg_transform. Also findoidjoins now detects a few relationships it didn't before for pre-existing catalogs, as a result of new regression tests leaving entries in those catalogs that weren't there before. --- src/test/regress/expected/oidjoins.out | 72 ++++++++++++++++++++++++++ src/test/regress/sql/oidjoins.sql | 36 +++++++++++++ 2 files changed, 108 insertions(+) diff --git a/src/test/regress/expected/oidjoins.out b/src/test/regress/expected/oidjoins.out index bee76ce8f275c..d85bc83e11e4e 100644 --- a/src/test/regress/expected/oidjoins.out +++ b/src/test/regress/expected/oidjoins.out @@ -545,6 +545,30 @@ WHERE extnamespace != 0 AND ------+-------------- (0 rows) +SELECT ctid, fdwowner +FROM pg_catalog.pg_foreign_data_wrapper fk +WHERE fdwowner != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.fdwowner); + ctid | fdwowner +------+---------- +(0 rows) + +SELECT ctid, srvowner +FROM pg_catalog.pg_foreign_server fk +WHERE srvowner != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.srvowner); + ctid | srvowner +------+---------- +(0 rows) + +SELECT ctid, srvfdw +FROM pg_catalog.pg_foreign_server fk +WHERE srvfdw != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_foreign_data_wrapper pk WHERE pk.oid = fk.srvfdw); + ctid | srvfdw +------+-------- +(0 rows) + SELECT ctid, indexrelid FROM pg_catalog.pg_index fk WHERE indexrelid != 0 AND @@ -609,6 +633,22 @@ WHERE lanvalidator != 0 AND ------+-------------- (0 rows) +SELECT ctid, loid +FROM pg_catalog.pg_largeobject fk +WHERE loid != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_largeobject_metadata pk WHERE pk.oid = fk.loid); + ctid | loid +------+------ +(0 rows) + +SELECT ctid, lomowner +FROM pg_catalog.pg_largeobject_metadata fk +WHERE lomowner != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.lomowner); + ctid | lomowner +------+---------- +(0 rows) + SELECT ctid, nspowner FROM pg_catalog.pg_namespace fk WHERE nspowner != 0 AND @@ -945,6 +985,38 @@ WHERE spcowner != 0 AND ------+---------- (0 rows) +SELECT ctid, trftype +FROM pg_catalog.pg_transform fk +WHERE trftype != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.trftype); + ctid | trftype +------+--------- +(0 rows) + +SELECT ctid, trflang +FROM pg_catalog.pg_transform fk +WHERE trflang != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_language pk WHERE pk.oid = fk.trflang); + ctid | trflang +------+--------- +(0 rows) + +SELECT ctid, trffromsql +FROM pg_catalog.pg_transform fk +WHERE trffromsql != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.trffromsql); + ctid | trffromsql +------+------------ +(0 rows) + +SELECT ctid, trftosql +FROM pg_catalog.pg_transform fk +WHERE trftosql != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.trftosql); + ctid | trftosql +------+---------- +(0 rows) + SELECT ctid, tgrelid FROM pg_catalog.pg_trigger fk WHERE tgrelid != 0 AND diff --git a/src/test/regress/sql/oidjoins.sql b/src/test/regress/sql/oidjoins.sql index 651068b964cdc..2fa628d0f7384 100644 --- a/src/test/regress/sql/oidjoins.sql +++ b/src/test/regress/sql/oidjoins.sql @@ -273,6 +273,18 @@ SELECT ctid, extnamespace FROM pg_catalog.pg_extension fk WHERE extnamespace != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_namespace pk WHERE pk.oid = fk.extnamespace); +SELECT ctid, fdwowner +FROM pg_catalog.pg_foreign_data_wrapper fk +WHERE fdwowner != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.fdwowner); +SELECT ctid, srvowner +FROM pg_catalog.pg_foreign_server fk +WHERE srvowner != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.srvowner); +SELECT ctid, srvfdw +FROM pg_catalog.pg_foreign_server fk +WHERE srvfdw != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_foreign_data_wrapper pk WHERE pk.oid = fk.srvfdw); SELECT ctid, indexrelid FROM pg_catalog.pg_index fk WHERE indexrelid != 0 AND @@ -305,6 +317,14 @@ SELECT ctid, lanvalidator FROM pg_catalog.pg_language fk WHERE lanvalidator != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.lanvalidator); +SELECT ctid, loid +FROM pg_catalog.pg_largeobject fk +WHERE loid != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_largeobject_metadata pk WHERE pk.oid = fk.loid); +SELECT ctid, lomowner +FROM pg_catalog.pg_largeobject_metadata fk +WHERE lomowner != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.lomowner); SELECT ctid, nspowner FROM pg_catalog.pg_namespace fk WHERE nspowner != 0 AND @@ -473,6 +493,22 @@ SELECT ctid, spcowner FROM pg_catalog.pg_tablespace fk WHERE spcowner != 0 AND NOT EXISTS(SELECT 1 FROM pg_catalog.pg_authid pk WHERE pk.oid = fk.spcowner); +SELECT ctid, trftype +FROM pg_catalog.pg_transform fk +WHERE trftype != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_type pk WHERE pk.oid = fk.trftype); +SELECT ctid, trflang +FROM pg_catalog.pg_transform fk +WHERE trflang != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_language pk WHERE pk.oid = fk.trflang); +SELECT ctid, trffromsql +FROM pg_catalog.pg_transform fk +WHERE trffromsql != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.trffromsql); +SELECT ctid, trftosql +FROM pg_catalog.pg_transform fk +WHERE trftosql != 0 AND + NOT EXISTS(SELECT 1 FROM pg_catalog.pg_proc pk WHERE pk.oid = fk.trftosql); SELECT ctid, tgrelid FROM pg_catalog.pg_trigger fk WHERE tgrelid != 0 AND From 87221867e8b8fedb347e32d3d919e62ae85edc81 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 25 Jul 2015 15:58:14 -0400 Subject: [PATCH 087/442] In pg_ctl, report unexpected failure to stat() the postmaster.pid file. Any error other than ENOENT is a bit suspicious here, and perhaps should not be grounds for assuming the postmaster has failed. For the moment though, just report it, and don't change the behavior otherwise. The intent is mainly to try to determine why we are seeing intermittent failures in this area on some buildfarm members. Back-patch to 9.5 where some of these failures have happened. --- src/bin/pg_ctl/pg_ctl.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/bin/pg_ctl/pg_ctl.c b/src/bin/pg_ctl/pg_ctl.c index 74764fabdaf1f..6a36d29f43b1b 100644 --- a/src/bin/pg_ctl/pg_ctl.c +++ b/src/bin/pg_ctl/pg_ctl.c @@ -648,7 +648,12 @@ test_postmaster_connection(bool do_checkpoint) struct stat statbuf; if (stat(pid_file, &statbuf) != 0) + { + if (errno != ENOENT) + write_stderr(_("\n%s: could not stat file \"%s\": %s\n"), + progname, pid_file, strerror(errno)); return PQPING_NO_RESPONSE; + } if (found_stale_pidfile) { From 62005e9465a409d317a7a41d821afc5ed235670b Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 25 Jul 2015 16:37:12 -0400 Subject: [PATCH 088/442] Some platforms now need contrib/tsm_system_time to be linked with libm. Buildfarm member hornet, at least, seems to want -lm in the link command. Probably this is due to the just-added use of isnan(). --- contrib/tsm_system_time/Makefile | 2 ++ 1 file changed, 2 insertions(+) diff --git a/contrib/tsm_system_time/Makefile b/contrib/tsm_system_time/Makefile index 168becf54e2ff..4c08a9935ab68 100644 --- a/contrib/tsm_system_time/Makefile +++ b/contrib/tsm_system_time/Makefile @@ -9,6 +9,8 @@ DATA = tsm_system_time--1.0.sql REGRESS = tsm_system_time +SHLIB_LINK += $(filter -lm, $(LIBS)) + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) From 08012455cd31a4148c5072a6aac1ad41a89e6d4b Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Sat, 25 Jul 2015 17:14:36 -0400 Subject: [PATCH 089/442] Restore use of zlib default compression in pg_dump directory mode. This was broken by commit 0e7e355f27302b62af3e1add93853ccd45678443 and friends, which ignored the fact that gzopen() will treat "-1" in the mode argument as an invalid character, which it ignores, and a flag for compression level 1. Now, when this value is encountered no compression level flag is passed to gzopen, leaving it to use the zlib default. Also, enforce the documented allowed range for pg_dump's -Z option, namely 0 .. 9, and remove some consequently dead code from pg_backup_tar.c. Problem reported by Marc Mamin. Backpatch to 9.1, like the patch that introduced the bug. --- src/bin/pg_dump/compress_io.c | 18 ++++++++++++++---- src/bin/pg_dump/pg_backup_tar.c | 7 ------- src/bin/pg_dump/pg_dump.c | 5 +++++ 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/bin/pg_dump/compress_io.c b/src/bin/pg_dump/compress_io.c index 912fc2f695a15..6e1469bb75fce 100644 --- a/src/bin/pg_dump/compress_io.c +++ b/src/bin/pg_dump/compress_io.c @@ -547,11 +547,21 @@ cfopen(const char *path, const char *mode, int compression) if (compression != 0) { #ifdef HAVE_LIBZ - char mode_compression[32]; + if (compression != Z_DEFAULT_COMPRESSION) + { + /* user has specified a compression level, so tell zlib to use it */ + char mode_compression[32]; + + snprintf(mode_compression, sizeof(mode_compression), "%s%d", + mode, compression); + fp->compressedfp = gzopen(path, mode_compression); + } + else + { + /* don't specify a level, just use the zlib default */ + fp->compressedfp = gzopen(path, mode); + } - snprintf(mode_compression, sizeof(mode_compression), "%s%d", - mode, compression); - fp->compressedfp = gzopen(path, mode_compression); fp->uncompressedfp = NULL; if (fp->compressedfp == NULL) { diff --git a/src/bin/pg_dump/pg_backup_tar.c b/src/bin/pg_dump/pg_backup_tar.c index 309b4b53dbc05..8730c5ea914c2 100644 --- a/src/bin/pg_dump/pg_backup_tar.c +++ b/src/bin/pg_dump/pg_backup_tar.c @@ -208,13 +208,6 @@ InitArchiveFmt_Tar(ArchiveHandle *AH) ctx->hasSeek = checkSeek(ctx->tarFH); - if (AH->compression < 0 || AH->compression > 9) - AH->compression = Z_DEFAULT_COMPRESSION; - - /* Don't compress into tar files unless asked to do so */ - if (AH->compression == Z_DEFAULT_COMPRESSION) - AH->compression = 0; - /* * We don't support compression because reading the files back is not * possible since gzdopen uses buffered IO which totally screws file diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 6664cee3b4cd8..0b5262a8a1a7d 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -485,6 +485,11 @@ main(int argc, char **argv) case 'Z': /* Compression Level */ compressLevel = atoi(optarg); + if (compressLevel < 0 || compressLevel > 9) + { + write_msg(NULL, "compression level must be in range 0..9\n"); + exit_nicely(1); + } break; case 0: From d5b132bb626d126b6d0696f2f4068815053da115 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 25 Jul 2015 19:42:32 -0400 Subject: [PATCH 090/442] Dodge portability issue (apparent compiler bug) in new tablesample code. Some of the older OS X critters in the buildfarm are failing regression, with symptoms showing that a request for 100% sampling in BERNOULLI or SYSTEM methods actually gets only around 50% of the table. gdb revealed that the computation of the "cutoff" number was producing 0x7FFFFFFF rather than the expected 0x100000000. Inspecting the assembly code, it looks like gcc is trying to use lrint() instead of rint() and then fumbling the conversion from long double to uint64. This seems like a clear compiler bug, but assigning the intermediate result into a plain double variable works around it, so let's just do that. (Another idea would be to give up one bit of hash width so that we don't need to use a uint64 cutoff, but let's see if this is enough.) --- src/backend/access/tablesample/bernoulli.c | 4 +++- src/backend/access/tablesample/system.c | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/backend/access/tablesample/bernoulli.c b/src/backend/access/tablesample/bernoulli.c index cf88f95e757b1..ccef4f7f84388 100644 --- a/src/backend/access/tablesample/bernoulli.c +++ b/src/backend/access/tablesample/bernoulli.c @@ -144,6 +144,7 @@ bernoulli_beginsamplescan(SampleScanState *node, { BernoulliSamplerData *sampler = (BernoulliSamplerData *) node->tsm_state; double percent = DatumGetFloat4(params[0]); + double dcutoff; if (percent < 0 || percent > 100 || isnan(percent)) ereport(ERROR, @@ -155,7 +156,8 @@ bernoulli_beginsamplescan(SampleScanState *node, * store that as a uint64, of course. Note that this gives strictly * correct behavior at the limits of zero or one probability. */ - sampler->cutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); + dcutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); + sampler->cutoff = (uint64) dcutoff; sampler->seed = seed; sampler->lt = InvalidOffsetNumber; diff --git a/src/backend/access/tablesample/system.c b/src/backend/access/tablesample/system.c index 43c5dab71619a..080a3121141e0 100644 --- a/src/backend/access/tablesample/system.c +++ b/src/backend/access/tablesample/system.c @@ -148,6 +148,7 @@ system_beginsamplescan(SampleScanState *node, { SystemSamplerData *sampler = (SystemSamplerData *) node->tsm_state; double percent = DatumGetFloat4(params[0]); + double dcutoff; if (percent < 0 || percent > 100 || isnan(percent)) ereport(ERROR, @@ -159,7 +160,8 @@ system_beginsamplescan(SampleScanState *node, * store that as a uint64, of course. Note that this gives strictly * correct behavior at the limits of zero or one probability. */ - sampler->cutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); + dcutoff = rint(((double) PG_UINT32_MAX + 1) * percent / 100); + sampler->cutoff = (uint64) dcutoff; sampler->seed = seed; sampler->nextblock = 0; sampler->lt = InvalidOffsetNumber; From 60624f45fc12a249fd48ac7557238c974d8e5011 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Sat, 25 Jul 2015 17:46:33 -0700 Subject: [PATCH 091/442] Improve markup for row_security. Wrap the literals on, off, force, and BYPASSRLS with appropriate markup. Per Kevin Grittner. --- doc/src/sgml/config.sgml | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index b91d6c75d276e..bbe1eb0d19a68 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -5568,20 +5568,22 @@ COPY postgres_log FROM '/full/path/to/logfile.csv' WITH csv; This variable controls if row security policies are to be applied to queries which are run against tables that have row security enabled. - The default is 'on'. When set to 'on', all users, except superusers - and the owner of the table, will have the row policies for the table - applied to their queries. The table owner and superuser can request - that row policies be applied to their queries by setting this to - 'force'. Lastly, this can also be set to 'off' which will bypass row - policies for the table, if possible, and error if not. + The default is on. When set to on, all users, + except superusers and the owner of the table, will have the row + policies for the table applied to their queries. The table owner and + superuser can request that row policies be applied to their queries by + setting this to force. Lastly, this can also be set to + off which will bypass row policies for the table, if + possible, and error if not. For a user who is not a superuser and not the table owner to bypass - row policies for the table, they must have the BYPASSRLS role attribute. - If this is set to 'off' and the user queries a table which has row - policies enabled and the user does not have the right to bypass - row policies then a permission denied error will be returned. + row policies for the table, they must have the BYPASSRLS + role attribute. If this is set to off and the user queries + a table which has row policies enabled and the user does not have the + right to bypass row policies then a permission denied error will be + returned. From 65b86c1767a7dac0cc79dcfba7ba4cbd326dc03f Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 26 Jul 2015 15:17:44 +0200 Subject: [PATCH 092/442] Build column mapping for grouping sets in all required cases. The previous coding frequently failed to fail because for one it's unusual to have rollup clauses with one column, and for another sometimes the wrong mapping didn't cause obvious problems. Author: Jeevan Chalke Reviewed-By: Andrew Gierth Discussion: CAM2+6=W=9=hQOipH0HAPbkun3Z3TFWij_EiHue0_6UX=oR=1kw@mail.gmail.com Backpatch: 9.5, where grouping sets were introduced --- src/backend/optimizer/plan/planner.c | 9 ++------- src/test/regress/expected/groupingsets.out | 23 ++++++++++++++++++++++ src/test/regress/sql/groupingsets.sql | 4 ++++ 3 files changed, 29 insertions(+), 7 deletions(-) diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index b95cc95e5d9a2..11678388fab7c 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -2401,13 +2401,8 @@ build_grouping_chain(PlannerInfo *root, * Prepare the grpColIdx for the real Agg node first, because we may need * it for sorting */ - if (list_length(rollup_groupclauses) > 1) - { - Assert(rollup_lists && llast(rollup_lists)); - - top_grpColIdx = - remap_groupColIdx(root, llast(rollup_groupclauses)); - } + if (parse->groupingSets) + top_grpColIdx = remap_groupColIdx(root, llast(rollup_groupclauses)); /* * If we need a Sort operation on the input, generate that. diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out index 842c2aec7e210..2e12a53d69fd6 100644 --- a/src/test/regress/expected/groupingsets.out +++ b/src/test/regress/expected/groupingsets.out @@ -587,4 +587,27 @@ select array(select row(v.a,s1.*) from (select two,four, count(*) from onek grou {"(2,0,0,250)","(2,0,2,250)","(2,0,,500)","(2,1,1,250)","(2,1,3,250)","(2,1,,500)","(2,,0,250)","(2,,1,250)","(2,,2,250)","(2,,3,250)","(2,,,1000)"} (2 rows) +-- Grouping on text columns +select sum(ten) from onek group by two, rollup(four::text) order by 1; + sum +------ + 1000 + 1000 + 1250 + 1250 + 2000 + 2500 +(6 rows) + +select sum(ten) from onek group by rollup(four::text), two order by 1; + sum +------ + 1000 + 1000 + 1250 + 1250 + 2000 + 2500 +(6 rows) + -- end diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql index 0bffb8531c2c6..eeea995f337a2 100644 --- a/src/test/regress/sql/groupingsets.sql +++ b/src/test/regress/sql/groupingsets.sql @@ -162,4 +162,8 @@ group by rollup(ten); select * from (values (1),(2)) v(a) left join lateral (select v.a, four, ten, count(*) from onek group by cube(four,ten)) s on true order by v.a,four,ten; select array(select row(v.a,s1.*) from (select two,four, count(*) from onek group by cube(two,four) order by two,four) s1) from (values (1),(2)) v(a); +-- Grouping on text columns +select sum(ten) from onek group by two, rollup(four::text) order by 1; +select sum(ten) from onek group by rollup(four::text), two order by 1; + -- end From 3500d1cc78f61927e05c0e73158b87ff24f81c09 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 26 Jul 2015 15:34:29 +0200 Subject: [PATCH 093/442] Recognize GROUPING() as a aggregate expression. Previously GROUPING() was not recognized as a aggregate expression, erroneously allowing the planner to move it from HAVING to WHERE. Author: Jeevan Chalke Reviewed-By: Andrew Gierth Discussion: CAM2+6=WG9omG5rFOMAYBweJxmpTaapvVp5pCeMrE6BfpCwr4Og@mail.gmail.com Backpatch: 9.5, where grouping sets were introduced --- src/backend/optimizer/util/clauses.c | 7 ++- src/test/regress/expected/groupingsets.out | 62 ++++++++++++++++++++++ src/test/regress/sql/groupingsets.sql | 17 ++++++ 3 files changed, 85 insertions(+), 1 deletion(-) diff --git a/src/backend/optimizer/util/clauses.c b/src/backend/optimizer/util/clauses.c index 0137e0ecfced2..c72dbef1c8344 100644 --- a/src/backend/optimizer/util/clauses.c +++ b/src/backend/optimizer/util/clauses.c @@ -390,7 +390,7 @@ make_ands_implicit(Expr *clause) /* * contain_agg_clause - * Recursively search for Aggref nodes within a clause. + * Recursively search for Aggref/GroupingFunc nodes within a clause. * * Returns true if any aggregate found. * @@ -417,6 +417,11 @@ contain_agg_clause_walker(Node *node, void *context) Assert(((Aggref *) node)->agglevelsup == 0); return true; /* abort the tree traversal and return true */ } + if (IsA(node, GroupingFunc)) + { + Assert(((GroupingFunc *) node)->agglevelsup == 0); + return true; /* abort the tree traversal and return true */ + } Assert(!IsA(node, SubLink)); return expression_tree_walker(node, contain_agg_clause_walker, context); } diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out index 2e12a53d69fd6..bdd77f8979c72 100644 --- a/src/test/regress/expected/groupingsets.out +++ b/src/test/regress/expected/groupingsets.out @@ -486,6 +486,68 @@ having exists (select 1 from onek b where sum(distinct a.four) = b.four); 9 | 3 (25 rows) +-- HAVING with GROUPING queries +select ten, grouping(ten) from onek +group by grouping sets(ten) having grouping(ten) >= 0 +order by 2,1; + ten | grouping +-----+---------- + 0 | 0 + 1 | 0 + 2 | 0 + 3 | 0 + 4 | 0 + 5 | 0 + 6 | 0 + 7 | 0 + 8 | 0 + 9 | 0 +(10 rows) + +select ten, grouping(ten) from onek +group by grouping sets(ten, four) having grouping(ten) > 0 +order by 2,1; + ten | grouping +-----+---------- + | 1 + | 1 + | 1 + | 1 +(4 rows) + +select ten, grouping(ten) from onek +group by rollup(ten) having grouping(ten) > 0 +order by 2,1; + ten | grouping +-----+---------- + | 1 +(1 row) + +select ten, grouping(ten) from onek +group by cube(ten) having grouping(ten) > 0 +order by 2,1; + ten | grouping +-----+---------- + | 1 +(1 row) + +select ten, grouping(ten) from onek +group by (ten) having grouping(ten) >= 0 +order by 2,1; + ten | grouping +-----+---------- + 0 | 0 + 1 | 0 + 2 | 0 + 3 | 0 + 4 | 0 + 5 | 0 + 6 | 0 + 7 | 0 + 8 | 0 + 9 | 0 +(10 rows) + -- FILTER queries select ten, sum(distinct four) filter (where four::text ~ '123') from onek a group by rollup(ten); diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql index eeea995f337a2..8eb580812a9ef 100644 --- a/src/test/regress/sql/groupingsets.sql +++ b/src/test/regress/sql/groupingsets.sql @@ -154,6 +154,23 @@ select ten, sum(distinct four) from onek a group by grouping sets((ten,four),(ten)) having exists (select 1 from onek b where sum(distinct a.four) = b.four); +-- HAVING with GROUPING queries +select ten, grouping(ten) from onek +group by grouping sets(ten) having grouping(ten) >= 0 +order by 2,1; +select ten, grouping(ten) from onek +group by grouping sets(ten, four) having grouping(ten) > 0 +order by 2,1; +select ten, grouping(ten) from onek +group by rollup(ten) having grouping(ten) > 0 +order by 2,1; +select ten, grouping(ten) from onek +group by cube(ten) having grouping(ten) > 0 +order by 2,1; +select ten, grouping(ten) from onek +group by (ten) having grouping(ten) >= 0 +order by 2,1; + -- FILTER queries select ten, sum(distinct four) filter (where four::text ~ '123') from onek a group by rollup(ten); From 29e4455d7139d0b1bf8d3b62e566e7bb20cf0ec6 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 26 Jul 2015 15:56:26 +0200 Subject: [PATCH 094/442] Allow to push down clauses from HAVING to WHERE when grouping sets are used. Previously we disallowed pushing down quals to WHERE in the presence of grouping sets. That's overly restrictive. We now instead copy quals to WHERE if applicable, leaving the one in HAVING in place. That's because, at that stage of the planning process, it's nontrivial to determine if it's safe to remove the one in HAVING. Author: Andrew Gierth Discussion: 874mkt3l59.fsf@news-spur.riddles.org.uk Backpatch: 9.5, where grouping sets were introduced. This isn't exactly a bugfix, but it seems better to keep the branches in sync at this point. --- src/backend/optimizer/plan/planner.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 11678388fab7c..6ee411eec870a 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -570,13 +570,12 @@ subquery_planner(PlannerGlobal *glob, Query *parse, if (contain_agg_clause(havingclause) || contain_volatile_functions(havingclause) || - contain_subplans(havingclause) || - parse->groupingSets) + contain_subplans(havingclause)) { /* keep it in HAVING */ newHaving = lappend(newHaving, havingclause); } - else if (parse->groupClause) + else if (parse->groupClause && !parse->groupingSets) { /* move it to WHERE */ parse->jointree->quals = (Node *) From b17ae36ba9521014c5ae30cb3a3f77c439b41bb3 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 26 Jul 2015 16:37:49 +0200 Subject: [PATCH 095/442] Fix flattening of nested grouping sets. Previously nested grouping set specifications accidentally weren't flattened, but instead contained the nested specification as a element in the outer list. Fix this by, as actually documented in comments, concatenating the nested set specification into the outer one. Also add tests to prevent this from breaking again. Author: Andrew Gierth, with tests from Jeevan Chalke Reported-By: Jeevan Chalke Discussion: CAM2+6=V5YvuxB+EyN4iH=GbD-XTA435TCNvnDFSD--YvXs+pww@mail.gmail.com Backpatch: 9.5, where grouping sets were introduced --- src/backend/parser/parse_clause.c | 13 ++- src/test/regress/expected/groupingsets.out | 121 +++++++++++++++++++++ src/test/regress/sql/groupingsets.sql | 29 +++++ 3 files changed, 160 insertions(+), 3 deletions(-) diff --git a/src/backend/parser/parse_clause.c b/src/backend/parser/parse_clause.c index 4e490b23b4e27..59808568a52e2 100644 --- a/src/backend/parser/parse_clause.c +++ b/src/backend/parser/parse_clause.c @@ -1812,7 +1812,7 @@ findTargetlistEntrySQL99(ParseState *pstate, Node *node, List **tlist, * Inside a grouping set (ROLLUP, CUBE, or GROUPING SETS), we expect the * content to be nested no more than 2 deep: i.e. ROLLUP((a,b),(c,d)) is * ok, but ROLLUP((a,(b,c)),d) is flattened to ((a,b,c),d), which we then - * normalize to ((a,b,c),(d)). + * (later) normalize to ((a,b,c),(d)). * * CUBE or ROLLUP can be nested inside GROUPING SETS (but not the reverse), * and we leave that alone if we find it. But if we see GROUPING SETS inside @@ -1881,9 +1881,16 @@ flatten_grouping_sets(Node *expr, bool toplevel, bool *hasGroupingSets) foreach(l2, gset->content) { - Node *n2 = flatten_grouping_sets(lfirst(l2), false, NULL); + Node *n1 = lfirst(l2); + Node *n2 = flatten_grouping_sets(n1, false, NULL); - result_set = lappend(result_set, n2); + if (IsA(n1, GroupingSet) && + ((GroupingSet *)n1)->kind == GROUPING_SET_SETS) + { + result_set = list_concat(result_set, (List *) n2); + } + else + result_set = lappend(result_set, n2); } /* diff --git a/src/test/regress/expected/groupingsets.out b/src/test/regress/expected/groupingsets.out index bdd77f8979c72..b0b8c4b7f26d6 100644 --- a/src/test/regress/expected/groupingsets.out +++ b/src/test/regress/expected/groupingsets.out @@ -145,6 +145,127 @@ select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum | | 12 | 36 (6 rows) +-- nesting with grouping sets +select sum(c) from gstest2 + group by grouping sets((), grouping sets((), grouping sets(()))) + order by 1 desc; + sum +----- + 12 + 12 + 12 +(3 rows) + +select sum(c) from gstest2 + group by grouping sets((), grouping sets((), grouping sets(((a, b))))) + order by 1 desc; + sum +----- + 12 + 12 + 8 + 2 + 2 +(5 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets(rollup(c), grouping sets(cube(c)))) + order by 1 desc; + sum +----- + 12 + 12 + 6 + 6 + 6 + 6 +(6 rows) + +select sum(c) from gstest2 + group by grouping sets(a, grouping sets(a, cube(b))) + order by 1 desc; + sum +----- + 12 + 10 + 10 + 8 + 4 + 2 + 2 +(7 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets((a, (b)))) + order by 1 desc; + sum +----- + 8 + 2 + 2 +(3 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets((a, b))) + order by 1 desc; + sum +----- + 8 + 2 + 2 +(3 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets(a, grouping sets(a), a)) + order by 1 desc; + sum +----- + 10 + 10 + 10 + 2 + 2 + 2 +(6 rows) + +select sum(c) from gstest2 + group by grouping sets(grouping sets(a, grouping sets(a, grouping sets(a), ((a)), a, grouping sets(a), (a)), a)) + order by 1 desc; + sum +----- + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 10 + 2 + 2 + 2 + 2 + 2 + 2 + 2 + 2 +(16 rows) + +select sum(c) from gstest2 + group by grouping sets((a,(a,b)), grouping sets((a,(a,b)),a)) + order by 1 desc; + sum +----- + 10 + 8 + 8 + 2 + 2 + 2 + 2 + 2 +(8 rows) + -- empty input: first is 0 rows, second 1, third 3 etc. select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); a | b | sum | count diff --git a/src/test/regress/sql/groupingsets.sql b/src/test/regress/sql/groupingsets.sql index 8eb580812a9ef..bff85d0db558c 100644 --- a/src/test/regress/sql/groupingsets.sql +++ b/src/test/regress/sql/groupingsets.sql @@ -73,6 +73,35 @@ select grouping(a), a, array_agg(b), select a, b, sum(c), sum(sum(c)) over (order by a,b) as rsum from gstest2 group by rollup (a,b) order by rsum, a, b; +-- nesting with grouping sets +select sum(c) from gstest2 + group by grouping sets((), grouping sets((), grouping sets(()))) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets((), grouping sets((), grouping sets(((a, b))))) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets(grouping sets(rollup(c), grouping sets(cube(c)))) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets(a, grouping sets(a, cube(b))) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets(grouping sets((a, (b)))) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets(grouping sets((a, b))) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets(grouping sets(a, grouping sets(a), a)) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets(grouping sets(a, grouping sets(a, grouping sets(a), ((a)), a, grouping sets(a), (a)), a)) + order by 1 desc; +select sum(c) from gstest2 + group by grouping sets((a,(a,b)), grouping sets((a,(a,b)),a)) + order by 1 desc; + -- empty input: first is 0 rows, second 1, third 3 etc. select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),a); select a, b, sum(v), count(*) from gstest_empty group by grouping sets ((a,b),()); From 13d0053f98390ad17e373cefb95e27273c0c345c Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Sun, 26 Jul 2015 18:20:41 +0200 Subject: [PATCH 096/442] Check the relevant index element in ON CONFLICT unique index inference. ON CONFLICT unique index inference had a thinko that could affect cases where the user-supplied inference clause required that an attribute match a particular (user specified) collation and/or opclass. infer_collation_opclass_match() has to check for opclass and/or collation matches and that the attribute is in the list of attributes or expressions known to be in the definition of the index under consideration. The bug was that these two conditions weren't necessarily evaluated for the same index attribute. Author: Peter Geoghegan Discussion: CAM3SWZR4uug=WvmGk7UgsqHn2MkEzy9YU-+8jKGO4JPhesyeWg@mail.gmail.com Backpatch: 9.5, where ON CONFLICT was introduced --- src/backend/optimizer/util/plancat.c | 46 ++++++++++++------- src/test/regress/expected/insert_conflict.out | 18 ++++++++ src/test/regress/sql/insert_conflict.sql | 12 +++++ 3 files changed, 60 insertions(+), 16 deletions(-) diff --git a/src/backend/optimizer/util/plancat.c b/src/backend/optimizer/util/plancat.c index 662b97755ab28..9442e5fa32b31 100644 --- a/src/backend/optimizer/util/plancat.c +++ b/src/backend/optimizer/util/plancat.c @@ -52,7 +52,7 @@ get_relation_info_hook_type get_relation_info_hook = NULL; static bool infer_collation_opclass_match(InferenceElem *elem, Relation idxRel, - Bitmapset *inferAttrs, List *idxExprs); + List *idxExprs); static int32 get_rel_data_width(Relation rel, int32 *attr_widths); static List *get_relation_constraints(PlannerInfo *root, Oid relationObjectId, RelOptInfo *rel, @@ -616,8 +616,7 @@ infer_arbiter_indexes(PlannerInfo *root) * this for both expressions and ordinary (non-expression) * attributes appearing as inference elements. */ - if (!infer_collation_opclass_match(elem, idxRel, inferAttrs, - idxExprs)) + if (!infer_collation_opclass_match(elem, idxRel, idxExprs)) goto next; /* @@ -682,11 +681,10 @@ infer_arbiter_indexes(PlannerInfo *root) * infer_collation_opclass_match - ensure infer element opclass/collation match * * Given unique index inference element from inference specification, if - * collation was specified, or if opclass (represented here as opfamily + - * opcintype) was specified, verify that there is at least one matching - * indexed attribute (occasionally, there may be more). Skip this in the - * common case where inference specification does not include collation or - * opclass (instead matching everything, regardless of cataloged + * collation was specified, or if opclass was specified, verify that there is + * at least one matching indexed attribute (occasionally, there may be more). + * Skip this in the common case where inference specification does not include + * collation or opclass (instead matching everything, regardless of cataloged * collation/opclass of indexed attribute). * * At least historically, Postgres has not offered collations or opclasses @@ -708,11 +706,12 @@ infer_arbiter_indexes(PlannerInfo *root) */ static bool infer_collation_opclass_match(InferenceElem *elem, Relation idxRel, - Bitmapset *inferAttrs, List *idxExprs) + List *idxExprs) { AttrNumber natt; - Oid inferopfamily = InvalidOid; /* OID of att opfamily */ - Oid inferopcinputtype = InvalidOid; /* OID of att opfamily */ + Oid inferopfamily = InvalidOid; /* OID of opclass opfamily */ + Oid inferopcinputtype = InvalidOid; /* OID of opclass input type */ + int nplain = 0; /* # plain attrs observed */ /* * If inference specification element lacks collation/opclass, then no @@ -735,6 +734,10 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel, Oid opfamily = idxRel->rd_opfamily[natt - 1]; Oid opcinputtype = idxRel->rd_opcintype[natt - 1]; Oid collation = idxRel->rd_indcollation[natt - 1]; + int attno = idxRel->rd_index->indkey.values[natt - 1]; + + if (attno != 0) + nplain++; if (elem->inferopclass != InvalidOid && (inferopfamily != opfamily || inferopcinputtype != opcinputtype)) @@ -750,12 +753,23 @@ infer_collation_opclass_match(InferenceElem *elem, Relation idxRel, continue; } - if ((IsA(elem->expr, Var) && - bms_is_member(((Var *) elem->expr)->varattno, inferAttrs)) || - list_member(idxExprs, elem->expr)) + /* If one matching index att found, good enough -- return true */ + if (IsA(elem->expr, Var)) { - /* Found one match - good enough */ - return true; + if (((Var *) elem->expr)->varattno == attno) + return true; + } + else if (attno == 0) + { + Node *nattExpr = list_nth(idxExprs, (natt - 1) - nplain); + + /* + * Note that unlike routines like match_index_to_operand() we + * don't need to care about RelabelType. Neither the index + * definition nor the inference clause should contain them. + */ + if (equal(elem->expr, nattExpr)) + return true; } } diff --git a/src/test/regress/expected/insert_conflict.out b/src/test/regress/expected/insert_conflict.out index 325e88b572f0e..09b67db43c186 100644 --- a/src/test/regress/expected/insert_conflict.out +++ b/src/test/regress/expected/insert_conflict.out @@ -141,6 +141,24 @@ drop index collation_index_key; drop index both_index_key; drop index both_index_expr_key; -- +-- Make sure that cross matching of attribute opclass/collation does not occur +-- +create unique index cross_match on insertconflicttest(lower(fruit) collate "C", upper(fruit) text_pattern_ops); +-- fails: +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) text_pattern_ops, upper(fruit) collate "C") do nothing; +ERROR: there is no unique or exclusion constraint matching the ON CONFLICT specification +-- works: +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C", upper(fruit) text_pattern_ops) do nothing; + QUERY PLAN +----------------------------------------- + Insert on insertconflicttest + Conflict Resolution: NOTHING + Conflict Arbiter Indexes: cross_match + -> Result +(4 rows) + +drop index cross_match; +-- -- Single key tests -- create unique index key_index on insertconflicttest(key); diff --git a/src/test/regress/sql/insert_conflict.sql b/src/test/regress/sql/insert_conflict.sql index 7dd5032212839..e981e67fd27b9 100644 --- a/src/test/regress/sql/insert_conflict.sql +++ b/src/test/regress/sql/insert_conflict.sql @@ -57,6 +57,18 @@ drop index collation_index_key; drop index both_index_key; drop index both_index_expr_key; +-- +-- Make sure that cross matching of attribute opclass/collation does not occur +-- +create unique index cross_match on insertconflicttest(lower(fruit) collate "C", upper(fruit) text_pattern_ops); + +-- fails: +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) text_pattern_ops, upper(fruit) collate "C") do nothing; +-- works: +explain (costs off) insert into insertconflicttest values(0, 'Crowberry') on conflict (lower(fruit) collate "C", upper(fruit) text_pattern_ops) do nothing; + +drop index cross_match; + -- -- Single key tests -- From 7481c6c2aa379f8d3427819fcaa0eac5c93b1dcf Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 26 Jul 2015 16:19:08 -0400 Subject: [PATCH 097/442] Make entirely-dummy appendrels get marked as such in set_append_rel_size. The planner generally expects that the estimated rowcount of any relation is at least one row, *unless* it has been proven empty by constraint exclusion or similar mechanisms, which is marked by installing a dummy path as the rel's cheapest path (cf. IS_DUMMY_REL). When I split up allpaths.c's processing of base rels into separate set_base_rel_sizes and set_base_rel_pathlists steps, the intention was that dummy rels would get marked as such during the "set size" step; this is what justifies an Assert in indxpath.c's get_loop_count that other relations should either be dummy or have positive rowcount. Unfortunately I didn't get that quite right for append relations: if all the child rels have been proven empty then set_append_rel_size would come up with a rowcount of zero, which is correct, but it didn't then do set_dummy_rel_pathlist. (We would have ended up with the right state after set_append_rel_pathlist, but that's too late, if we generate indexpaths for some other rel first.) In addition to fixing the actual bug, I installed an Assert enforcing this convention in set_rel_size; that then allows simplification of a couple of now-redundant tests for zero rowcount in set_append_rel_size. Also, to cover the possibility that third-party FDWs have been careless about not returning a zero rowcount estimate, apply clamp_row_est to whatever an FDW comes up with as the rows estimate. Per report from Andreas Seltenreich. Back-patch to 9.2. Earlier branches did not have the separation between set_base_rel_sizes and set_base_rel_pathlists steps, so there was no intermediate state where an appendrel would have had inconsistent rowcount and pathlist. It's possible that adding the Assert to set_rel_size would be a good idea in older branches too; but since they're not under development any more, it's likely not worth the trouble. --- src/backend/optimizer/path/allpaths.c | 104 +++++++++++++++----------- src/test/regress/expected/join.out | 20 +++++ src/test/regress/sql/join.sql | 12 +++ 3 files changed, 94 insertions(+), 42 deletions(-) diff --git a/src/backend/optimizer/path/allpaths.c b/src/backend/optimizer/path/allpaths.c index 1590be1167508..8fc1cfd15f533 100644 --- a/src/backend/optimizer/path/allpaths.c +++ b/src/backend/optimizer/path/allpaths.c @@ -360,6 +360,11 @@ set_rel_size(PlannerInfo *root, RelOptInfo *rel, break; } } + + /* + * We insist that all non-dummy rels have a nonzero rowcount estimate. + */ + Assert(rel->rows > 0 || IS_DUMMY_REL(rel)); } /* @@ -579,6 +584,9 @@ set_foreign_size(PlannerInfo *root, RelOptInfo *rel, RangeTblEntry *rte) /* Let FDW adjust the size estimates, if it can */ rel->fdwroutine->GetForeignRelSize(root, rel, rte->relid); + + /* ... but do not let it set the rows estimate to zero */ + rel->rows = clamp_row_est(rel->rows); } /* @@ -608,6 +616,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, Index rti, RangeTblEntry *rte) { int parentRTindex = rti; + bool has_live_children; double parent_rows; double parent_size; double *parent_attrsizes; @@ -628,6 +637,7 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, * Note: if you consider changing this logic, beware that child rels could * have zero rows and/or width, if they were excluded by constraints. */ + has_live_children = false; parent_rows = 0; parent_size = 0; nattrs = rel->max_attr - rel->min_attr + 1; @@ -755,70 +765,80 @@ set_append_rel_size(PlannerInfo *root, RelOptInfo *rel, if (IS_DUMMY_REL(childrel)) continue; + /* We have at least one live child. */ + has_live_children = true; + /* * Accumulate size information from each live child. */ - if (childrel->rows > 0) + Assert(childrel->rows > 0); + + parent_rows += childrel->rows; + parent_size += childrel->width * childrel->rows; + + /* + * Accumulate per-column estimates too. We need not do anything for + * PlaceHolderVars in the parent list. If child expression isn't a + * Var, or we didn't record a width estimate for it, we have to fall + * back on a datatype-based estimate. + * + * By construction, child's reltargetlist is 1-to-1 with parent's. + */ + forboth(parentvars, rel->reltargetlist, + childvars, childrel->reltargetlist) { - parent_rows += childrel->rows; - parent_size += childrel->width * childrel->rows; + Var *parentvar = (Var *) lfirst(parentvars); + Node *childvar = (Node *) lfirst(childvars); - /* - * Accumulate per-column estimates too. We need not do anything - * for PlaceHolderVars in the parent list. If child expression - * isn't a Var, or we didn't record a width estimate for it, we - * have to fall back on a datatype-based estimate. - * - * By construction, child's reltargetlist is 1-to-1 with parent's. - */ - forboth(parentvars, rel->reltargetlist, - childvars, childrel->reltargetlist) + if (IsA(parentvar, Var)) { - Var *parentvar = (Var *) lfirst(parentvars); - Node *childvar = (Node *) lfirst(childvars); + int pndx = parentvar->varattno - rel->min_attr; + int32 child_width = 0; - if (IsA(parentvar, Var)) + if (IsA(childvar, Var) && + ((Var *) childvar)->varno == childrel->relid) { - int pndx = parentvar->varattno - rel->min_attr; - int32 child_width = 0; + int cndx = ((Var *) childvar)->varattno - childrel->min_attr; - if (IsA(childvar, Var) && - ((Var *) childvar)->varno == childrel->relid) - { - int cndx = ((Var *) childvar)->varattno - childrel->min_attr; - - child_width = childrel->attr_widths[cndx]; - } - if (child_width <= 0) - child_width = get_typavgwidth(exprType(childvar), - exprTypmod(childvar)); - Assert(child_width > 0); - parent_attrsizes[pndx] += child_width * childrel->rows; + child_width = childrel->attr_widths[cndx]; } + if (child_width <= 0) + child_width = get_typavgwidth(exprType(childvar), + exprTypmod(childvar)); + Assert(child_width > 0); + parent_attrsizes[pndx] += child_width * childrel->rows; } } } - /* - * Save the finished size estimates. - */ - rel->rows = parent_rows; - if (parent_rows > 0) + if (has_live_children) { + /* + * Save the finished size estimates. + */ int i; + Assert(parent_rows > 0); + rel->rows = parent_rows; rel->width = rint(parent_size / parent_rows); for (i = 0; i < nattrs; i++) rel->attr_widths[i] = rint(parent_attrsizes[i] / parent_rows); + + /* + * Set "raw tuples" count equal to "rows" for the appendrel; needed + * because some places assume rel->tuples is valid for any baserel. + */ + rel->tuples = parent_rows; } else - rel->width = 0; /* attr_widths should be zero already */ - - /* - * Set "raw tuples" count equal to "rows" for the appendrel; needed - * because some places assume rel->tuples is valid for any baserel. - */ - rel->tuples = parent_rows; + { + /* + * All children were excluded by constraints, so mark the whole + * appendrel dummy. We must do this in this phase so that the rel's + * dummy-ness is visible when we generate paths for other rels. + */ + set_dummy_rel_pathlist(rel); + } pfree(parent_attrsizes); } diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 139f7e049874b..96ec997ed1661 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -2164,6 +2164,26 @@ select count(*) from tenk1 x where (1 row) rollback; +-- +-- regression test: be sure we cope with proven-dummy append rels +-- +explain (costs off) +select aa, bb, unique1, unique1 + from tenk1 right join b on aa = unique1 + where bb < bb and bb is null; + QUERY PLAN +-------------------------- + Result + One-Time Filter: false +(2 rows) + +select aa, bb, unique1, unique1 + from tenk1 right join b on aa = unique1 + where bb < bb and bb is null; + aa | bb | unique1 | unique1 +----+----+---------+--------- +(0 rows) + -- -- Clean up -- diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 5b65ea8c9224a..ada78db264470 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -353,6 +353,17 @@ select count(*) from tenk1 x where x.unique1 in (select aa.f1 from int4_tbl aa,float8_tbl bb where aa.f1=bb.f1); rollback; +-- +-- regression test: be sure we cope with proven-dummy append rels +-- +explain (costs off) +select aa, bb, unique1, unique1 + from tenk1 right join b on aa = unique1 + where bb < bb and bb is null; + +select aa, bb, unique1, unique1 + from tenk1 right join b on aa = unique1 + where bb < bb and bb is null; -- -- Clean up @@ -1120,6 +1131,7 @@ select atts.relid::regclass, s.* from pg_stats s join a.attrelid::regclass::text join (select unnest(indkey) attnum, indexrelid from pg_index i) atts on atts.attnum = a.attnum where schemaname != 'pg_catalog'; + -- -- Test LATERAL -- From 8fb61e0b5430b8dada0eca18b99e3956f4eaf6cd Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 26 Jul 2015 17:44:27 -0400 Subject: [PATCH 098/442] Fix oversight in flattening of subqueries with empty FROM. I missed a restriction that commit f4abd0241de20d5d6a79b84992b9e88603d44134 should have enforced: we can't pull up an empty-FROM subquery if it's under an outer join, because then we'd need to wrap its output columns in PlaceHolderVars. As the code currently stands, the PHVs end up with empty relid sets, which doesn't work (and is correctly caught by an Assert). It's possible that this could be fixed by assigning the PHVs the relid sets of the parent FromExpr/JoinExpr, but getting that to work is more complication than I care to add right now; indeed it's likely that we'll never bother, since pulling up empty-FROM subqueries is a rather marginal optimization anyway. Per report from Andreas Seltenreich. Back-patch to 9.5 where the faulty code was added. --- src/backend/optimizer/prep/prepjointree.c | 52 +++++++++++++++-------- src/test/regress/expected/join.out | 34 +++++++++++++++ src/test/regress/sql/join.sql | 13 ++++++ 3 files changed, 81 insertions(+), 18 deletions(-) diff --git a/src/backend/optimizer/prep/prepjointree.c b/src/backend/optimizer/prep/prepjointree.c index 34144ccaf0fa6..9bf1c662b5371 100644 --- a/src/backend/optimizer/prep/prepjointree.c +++ b/src/backend/optimizer/prep/prepjointree.c @@ -1435,25 +1435,40 @@ is_simple_subquery(Query *subquery, RangeTblEntry *rte, /* * Don't pull up a subquery with an empty jointree, unless it has no quals - * and deletion_ok is TRUE. query_planner() will correctly generate a - * Result plan for a jointree that's totally empty, but we can't cope with - * an empty FromExpr appearing lower down in a jointree: we identify join - * rels via baserelid sets, so we couldn't distinguish a join containing - * such a FromExpr from one without it. This would for example break the - * PlaceHolderVar mechanism, since we'd have no way to identify where to - * evaluate a PHV coming out of the subquery. We can only handle such - * cases if the place where the subquery is linked is a FromExpr or inner - * JOIN that would still be nonempty after removal of the subquery, so - * that it's still identifiable via its contained baserelids. Safe - * contexts are signaled by deletion_ok. But even in a safe context, we - * must keep the subquery if it has any quals, because it's unclear where - * to put them in the upper query. (Note that deletion of a subquery is - * also dependent on the check below that its targetlist contains no - * set-returning functions. Deletion from a FROM list or inner JOIN is - * okay only if the subquery must return exactly one row.) + * and deletion_ok is TRUE and we're not underneath an outer join. + * + * query_planner() will correctly generate a Result plan for a jointree + * that's totally empty, but we can't cope with an empty FromExpr + * appearing lower down in a jointree: we identify join rels via baserelid + * sets, so we couldn't distinguish a join containing such a FromExpr from + * one without it. We can only handle such cases if the place where the + * subquery is linked is a FromExpr or inner JOIN that would still be + * nonempty after removal of the subquery, so that it's still identifiable + * via its contained baserelids. Safe contexts are signaled by + * deletion_ok. + * + * But even in a safe context, we must keep the subquery if it has any + * quals, because it's unclear where to put them in the upper query. + * + * Also, we must forbid pullup if such a subquery is underneath an outer + * join, because then we might need to wrap its output columns with + * PlaceHolderVars, and the PHVs would then have empty relid sets meaning + * we couldn't tell where to evaluate them. (This test is separate from + * the deletion_ok flag for possible future expansion: deletion_ok tells + * whether the immediate parent site in the jointree could cope, not + * whether we'd have PHV issues. It's possible this restriction could be + * fixed by letting the PHVs use the relids of the parent jointree item, + * but that complication is for another day.) + * + * Note that deletion of a subquery is also dependent on the check below + * that its targetlist contains no set-returning functions. Deletion from + * a FROM list or inner JOIN is okay only if the subquery must return + * exactly one row. */ if (subquery->jointree->fromlist == NIL && - (subquery->jointree->quals || !deletion_ok)) + (subquery->jointree->quals != NULL || + !deletion_ok || + lowest_outer_join != NULL)) return false; /* @@ -1667,7 +1682,8 @@ is_simple_values(PlannerInfo *root, RangeTblEntry *rte, bool deletion_ok) /* * Because VALUES can't appear under an outer join (or at least, we won't - * try to pull it up if it does), we need not worry about LATERAL. + * try to pull it up if it does), we need not worry about LATERAL, nor + * about validity of PHVs for the VALUES' outputs. */ /* diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 96ec997ed1661..4ce01cbcd5b6b 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -2184,6 +2184,40 @@ select aa, bb, unique1, unique1 ----+----+---------+--------- (0 rows) +-- +-- regression test: check handling of empty-FROM subquery underneath outer join +-- +explain (costs off) +select * from int8_tbl i1 left join (int8_tbl i2 join + (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2 +order by 1, 2; + QUERY PLAN +------------------------------------------------- + Sort + Sort Key: i1.q1, i1.q2 + -> Hash Left Join + Hash Cond: (i1.q2 = i2.q2) + -> Seq Scan on int8_tbl i1 + -> Hash + -> Hash Join + Hash Cond: (i2.q1 = (123)) + -> Seq Scan on int8_tbl i2 + -> Hash + -> Result +(11 rows) + +select * from int8_tbl i1 left join (int8_tbl i2 join + (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2 +order by 1, 2; + q1 | q2 | q1 | q2 | x +------------------+-------------------+-----+------------------+----- + 123 | 456 | 123 | 456 | 123 + 123 | 4567890123456789 | 123 | 4567890123456789 | 123 + 4567890123456789 | -4567890123456789 | | | + 4567890123456789 | 123 | | | + 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 123 +(5 rows) + -- -- Clean up -- diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index ada78db264470..3a71dbf4dffe3 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -365,6 +365,19 @@ select aa, bb, unique1, unique1 from tenk1 right join b on aa = unique1 where bb < bb and bb is null; +-- +-- regression test: check handling of empty-FROM subquery underneath outer join +-- +explain (costs off) +select * from int8_tbl i1 left join (int8_tbl i2 join + (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2 +order by 1, 2; + +select * from int8_tbl i1 left join (int8_tbl i2 join + (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2 +order by 1, 2; + + -- -- Clean up -- From dd20a97219b569b92bdcbd0c195c214340298b4a Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 11:46:11 +0300 Subject: [PATCH 099/442] Remove false comment about speculative insertion. There is no full discussion of speculative insertions in the executor README. There is a high-level explanation in execIndexing.c, but it doesn't seem necessary to refer it from here. Peter Geoghegan --- src/backend/executor/nodeModifyTable.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 874ca6a69bcd2..1ef76d0928593 100644 --- a/src/backend/executor/nodeModifyTable.c +++ b/src/backend/executor/nodeModifyTable.c @@ -351,8 +351,7 @@ ExecInsert(ModifyTableState *mtstate, * * We loop back here if we find a conflict below, either during * the pre-check, or when we re-check after inserting the tuple - * speculatively. See the executor README for a full discussion - * of speculative insertion. + * speculatively. */ vlock: specConflict = false; From 6a0a388c202098db207fff8e571f599296aa57d8 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 12:24:27 +0300 Subject: [PATCH 100/442] Avoid calling PageGetSpecialPointer() on an all-zeros page. That was otherwise harmless, but tripped the new assertion in PageGetSpecialPointer(). Reported by Amit Langote. Backpatch to 9.5, where the assertion was added. --- src/backend/access/nbtree/nbtree.c | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/backend/access/nbtree/nbtree.c b/src/backend/access/nbtree/nbtree.c index 9431ab5d04281..cf4a6dc7c4780 100644 --- a/src/backend/access/nbtree/nbtree.c +++ b/src/backend/access/nbtree/nbtree.c @@ -881,7 +881,7 @@ btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno) BlockNumber recurse_to; Buffer buf; Page page; - BTPageOpaque opaque; + BTPageOpaque opaque = NULL; restart: delete_now = false; @@ -900,9 +900,11 @@ btvacuumpage(BTVacState *vstate, BlockNumber blkno, BlockNumber orig_blkno) info->strategy); LockBuffer(buf, BT_READ); page = BufferGetPage(buf); - opaque = (BTPageOpaque) PageGetSpecialPointer(page); if (!PageIsNew(page)) + { _bt_checkpage(rel, buf); + opaque = (BTPageOpaque) PageGetSpecialPointer(page); + } /* * If we are recursing, the only case we want to do anything with is a From 2fa8ba34804211714a6e0a7fcf5512423c77f8dd Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 12:28:21 +0300 Subject: [PATCH 101/442] Fix handling of all-zero pages in SP-GiST vacuum. SP-GiST initialized an all-zeros page at vacuum, but that was not WAL-logged, which is not safe. You might get a torn page write, when it gets flushed to disk, and end-up with a half-initialized index page. To fix, leave it in the all-zeros state, and add it to the FSM. It will be initialized when reused. Also don't set the page-deleted flag when recycling an empty page. That was also not WAL-logged, and a torn write of that would cause the page to have an invalid checksum. Backpatch to 9.2, where SP-GiST indexes were added. --- src/backend/access/spgist/spgvacuum.c | 27 ++++++++------------------- src/include/access/spgist_private.h | 4 ++-- 2 files changed, 10 insertions(+), 21 deletions(-) diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index dc69d1ed20fcc..d40da0eeccad2 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -621,14 +621,10 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno) { /* * We found an all-zero page, which could happen if the database - * crashed just after extending the file. Initialize and recycle it. + * crashed just after extending the file. Recycle it. */ - SpGistInitBuffer(buffer, 0); - SpGistPageSetDeleted(page); - /* We don't bother to WAL-log this action; easy to redo */ - MarkBufferDirty(buffer); } - else if (SpGistPageIsDeleted(page)) + else if (PageIsEmpty(page)) { /* nothing to do */ } @@ -654,30 +650,23 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno) /* * The root pages must never be deleted, nor marked as available in FSM, * because we don't want them ever returned by a search for a place to put - * a new tuple. Otherwise, check for empty/deletable page, and make sure - * FSM knows about it. + * a new tuple. Otherwise, check for empty page, and make sure the FSM + * knows about it. */ if (!SpGistBlockIsRoot(blkno)) { - /* If page is now empty, mark it deleted */ - if (PageIsEmpty(page) && !SpGistPageIsDeleted(page)) - { - SpGistPageSetDeleted(page); - /* We don't bother to WAL-log this action; easy to redo */ - MarkBufferDirty(buffer); - } - - if (SpGistPageIsDeleted(page)) + if (PageIsEmpty(page)) { RecordFreeIndexPage(index, blkno); bds->stats->pages_deleted++; } else + { + SpGistSetLastUsedPage(index, buffer); bds->lastFilledBlock = blkno; + } } - SpGistSetLastUsedPage(index, buffer); - UnlockReleaseBuffer(buffer); } diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h index 413f71e7298d3..48dadd5b2c6c7 100644 --- a/src/include/access/spgist_private.h +++ b/src/include/access/spgist_private.h @@ -48,14 +48,14 @@ typedef SpGistPageOpaqueData *SpGistPageOpaque; /* Flag bits in page special space */ #define SPGIST_META (1<<0) -#define SPGIST_DELETED (1<<1) +#define SPGIST_DELETED (1<<1) /* never set, but keep for backwards + * compatibility */ #define SPGIST_LEAF (1<<2) #define SPGIST_NULLS (1<<3) #define SpGistPageGetOpaque(page) ((SpGistPageOpaque) PageGetSpecialPointer(page)) #define SpGistPageIsMeta(page) (SpGistPageGetOpaque(page)->flags & SPGIST_META) #define SpGistPageIsDeleted(page) (SpGistPageGetOpaque(page)->flags & SPGIST_DELETED) -#define SpGistPageSetDeleted(page) (SpGistPageGetOpaque(page)->flags |= SPGIST_DELETED) #define SpGistPageIsLeaf(page) (SpGistPageGetOpaque(page)->flags & SPGIST_LEAF) #define SpGistPageStoresNulls(page) (SpGistPageGetOpaque(page)->flags & SPGIST_NULLS) From 202aea62a84135256c6aa394af2c4dbfa1700c85 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 12:30:26 +0300 Subject: [PATCH 102/442] Reuse all-zero pages in GIN. In GIN, an all-zeros page would be leaked forever, and never reused. Just add them to the FSM in vacuum, and they will be reinitialized when grabbed from the FSM. On master and 9.5, attempting to access the page's opaque struct also caused an assertion failure, although that was otherwise harmless. Reported by Jeff Janes. Backpatch to all supported versions. --- src/backend/access/gin/ginvacuum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/access/gin/ginvacuum.c b/src/backend/access/gin/ginvacuum.c index eba572b0d8afa..1315762ecf970 100644 --- a/src/backend/access/gin/ginvacuum.c +++ b/src/backend/access/gin/ginvacuum.c @@ -710,7 +710,7 @@ ginvacuumcleanup(PG_FUNCTION_ARGS) LockBuffer(buffer, GIN_SHARE); page = (Page) BufferGetPage(buffer); - if (GinPageIsDeleted(page)) + if (PageIsNew(page) || GinPageIsDeleted(page)) { Assert(blkno != GIN_ROOT_BLKNO); RecordFreeIndexPage(index, blkno); From 03a0a3532b47b2a634cd2700d49edc086af748a0 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 18:27:27 +0300 Subject: [PATCH 103/442] Fix memory leak in xlogreader facility. XLogReaderFree failed to free the per-block data buffers, when they happened to not be used by the latest read WAL record. Michael Paquier. Backpatch to 9.5, where the per-block buffers were added. --- src/backend/access/transam/xlogreader.c | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index a9e926c5a283d..f1b209b1ad1a8 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -126,11 +126,8 @@ XLogReaderFree(XLogReaderState *state) for (block_id = 0; block_id <= state->max_block_id; block_id++) { - if (state->blocks[block_id].in_use) - { - if (state->blocks[block_id].data) - pfree(state->blocks[block_id].data); - } + if (state->blocks[block_id].data) + pfree(state->blocks[block_id].data); } if (state->main_data) pfree(state->main_data); From 9c88e06b5a24341e0e82fbab7b02de271adf1f47 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 18:54:09 +0300 Subject: [PATCH 104/442] Don't assume that PageIsEmpty() returns true on an all-zeros page. It does currently, and I don't see us changing that any time soon, but we don't make that assumption anywhere else. Per Tom Lane's suggestion. Backpatch to 9.2, like the previous patch that added this assumption. --- src/backend/access/spgist/spgvacuum.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/access/spgist/spgvacuum.c b/src/backend/access/spgist/spgvacuum.c index d40da0eeccad2..06c0b0af7eedc 100644 --- a/src/backend/access/spgist/spgvacuum.c +++ b/src/backend/access/spgist/spgvacuum.c @@ -655,7 +655,7 @@ spgvacuumpage(spgBulkDeleteState *bds, BlockNumber blkno) */ if (!SpGistBlockIsRoot(blkno)) { - if (PageIsEmpty(page)) + if (PageIsNew(page) || PageIsEmpty(page)) { RecordFreeIndexPage(index, blkno); bds->stats->pages_deleted++; From d09c873f637b783ef36770c88de551efb08c9e4a Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 20:38:44 +0300 Subject: [PATCH 105/442] Fix memory leaks in pg_rewind. Several PQclear() calls were missing. Originally reported by Vladimir Borodin in the pg_rewind github project, patch by Michael Paquier. --- src/bin/pg_rewind/libpq_fetch.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/src/bin/pg_rewind/libpq_fetch.c b/src/bin/pg_rewind/libpq_fetch.c index 1979fbcb8a5f3..0e186f2c36f58 100644 --- a/src/bin/pg_rewind/libpq_fetch.c +++ b/src/bin/pg_rewind/libpq_fetch.c @@ -350,6 +350,8 @@ libpqGetFile(const char *filename, size_t *filesize) memcpy(result, PQgetvalue(res, 0, 0), len); result[len] = '\0'; + PQclear(res); + pg_log(PG_DEBUG, "fetched file \"%s\", length %d\n", filename, len); if (filesize) @@ -410,6 +412,7 @@ libpq_executeFileMap(filemap_t *map) if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("could not create temporary table: %s", PQresultErrorMessage(res)); + PQclear(res); sql = "COPY fetchchunks FROM STDIN"; res = PQexec(conn, sql); @@ -417,6 +420,7 @@ libpq_executeFileMap(filemap_t *map) if (PQresultStatus(res) != PGRES_COPY_IN) pg_fatal("could not send file list: %s", PQresultErrorMessage(res)); + PQclear(res); for (i = 0; i < map->narray; i++) { @@ -464,6 +468,7 @@ libpq_executeFileMap(filemap_t *map) if (PQresultStatus(res) != PGRES_COMMAND_OK) pg_fatal("unexpected result while sending file list: %s", PQresultErrorMessage(res)); + PQclear(res); } /* From 1b7f125bf7e74fb3b128b3bcbe593d9e7327ff50 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 27 Jul 2015 21:48:51 +0300 Subject: [PATCH 106/442] Don't assume that 'char' is signed. On some platforms, notably ARM and PowerPC, 'char' is unsigned by default. This fixes an assertion failure at WAL replay on such platforms. Reported by Noah Misch. Backpatch to 9.5, where this was broken. --- src/include/access/spgist_private.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/include/access/spgist_private.h b/src/include/access/spgist_private.h index 48dadd5b2c6c7..fae10501a24f5 100644 --- a/src/include/access/spgist_private.h +++ b/src/include/access/spgist_private.h @@ -465,7 +465,7 @@ typedef struct spgxlogAddNode * -1: parent not updated *---- */ - char parentBlk; + int8 parentBlk; OffsetNumber offnumParent; /* offset within the parent page */ uint16 nodeI; From f3cf8b6b6edc69f94fa1bcaa5b9b806e14281098 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 27 Jul 2015 15:58:46 -0400 Subject: [PATCH 107/442] Fix pointer-arithmetic thinko in pg_stat_ssl patch. Nasty memory-stomp bug in commit 9029f4b37406b21a. It's not apparent how this survived even cursory testing :-(. Per report from Peter Holzer. --- src/backend/postmaster/pgstat.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index e9fbc381cc97a..887095def0666 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -3024,7 +3024,7 @@ pgstat_read_current_status(void) localappname += NAMEDATALEN; localactivity += pgstat_track_activity_query_size; #ifdef USE_SSL - localsslstatus += sizeof(PgBackendSSLStatus); + localsslstatus++; #endif localNumBackends++; } From cb0bb53204d84cecf51022313fe47d625de8f01e Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 27 Jul 2015 16:29:14 -0400 Subject: [PATCH 108/442] Further code review for pg_stat_ssl patch. Fix additional bogosity in commit 9029f4b37406b21a. Include the BackendSslStatusBuffer in the BackendStatusShmemSize calculation, avoid ugly and error-prone casts to char* and back, put related code stanzas into a consistent order (and fix a couple of previous instances of that sin). All cosmetic except for the size oversight. --- src/backend/postmaster/pgstat.c | 65 +++++++++++++++++++-------------- 1 file changed, 38 insertions(+), 27 deletions(-) diff --git a/src/backend/postmaster/pgstat.c b/src/backend/postmaster/pgstat.c index 887095def0666..ab018c474140a 100644 --- a/src/backend/postmaster/pgstat.c +++ b/src/backend/postmaster/pgstat.c @@ -2478,8 +2478,8 @@ pgstat_fetch_global(void) static PgBackendStatus *BackendStatusArray = NULL; static PgBackendStatus *MyBEEntry = NULL; -static char *BackendClientHostnameBuffer = NULL; static char *BackendAppnameBuffer = NULL; +static char *BackendClientHostnameBuffer = NULL; static char *BackendActivityBuffer = NULL; static Size BackendActivityBufferSize = 0; #ifdef USE_SSL @@ -2495,13 +2495,22 @@ BackendStatusShmemSize(void) { Size size; + /* BackendStatusArray: */ size = mul_size(sizeof(PgBackendStatus), MaxBackends); + /* BackendAppnameBuffer: */ + size = add_size(size, + mul_size(NAMEDATALEN, MaxBackends)); + /* BackendClientHostnameBuffer: */ size = add_size(size, mul_size(NAMEDATALEN, MaxBackends)); + /* BackendActivityBuffer: */ size = add_size(size, mul_size(pgstat_track_activity_query_size, MaxBackends)); +#ifdef USE_SSL + /* BackendSslStatusBuffer: */ size = add_size(size, - mul_size(NAMEDATALEN, MaxBackends)); + mul_size(sizeof(PgBackendSSLStatus), MaxBackends)); +#endif return size; } @@ -2566,26 +2575,6 @@ CreateSharedBackendStatus(void) } } -#ifdef USE_SSL - /* Create or attach to the shared SSL status buffer */ - size = mul_size(sizeof(PgBackendSSLStatus), MaxBackends); - BackendSslStatusBuffer = (PgBackendSSLStatus *) - ShmemInitStruct("Backend SSL Status Buffer", size, &found); - - if (!found) - { - MemSet(BackendSslStatusBuffer, 0, size); - - /* Initialize st_sslstatus pointers. */ - buffer = (char *) BackendSslStatusBuffer; - for (i = 0; i < MaxBackends; i++) - { - BackendStatusArray[i].st_sslstatus = (PgBackendSSLStatus *) buffer; - buffer += sizeof(PgBackendSSLStatus); - } - } -#endif - /* Create or attach to the shared activity buffer */ BackendActivityBufferSize = mul_size(pgstat_track_activity_query_size, MaxBackends); @@ -2606,6 +2595,28 @@ CreateSharedBackendStatus(void) buffer += pgstat_track_activity_query_size; } } + +#ifdef USE_SSL + /* Create or attach to the shared SSL status buffer */ + size = mul_size(sizeof(PgBackendSSLStatus), MaxBackends); + BackendSslStatusBuffer = (PgBackendSSLStatus *) + ShmemInitStruct("Backend SSL Status Buffer", size, &found); + + if (!found) + { + PgBackendSSLStatus *ptr; + + MemSet(BackendSslStatusBuffer, 0, size); + + /* Initialize st_sslstatus pointers. */ + ptr = BackendSslStatusBuffer; + for (i = 0; i < MaxBackends; i++) + { + BackendStatusArray[i].st_sslstatus = ptr; + ptr++; + } + } +#endif } @@ -2932,11 +2943,11 @@ pgstat_read_current_status(void) volatile PgBackendStatus *beentry; LocalPgBackendStatus *localtable; LocalPgBackendStatus *localentry; + char *localappname, + *localactivity; #ifdef USE_SSL PgBackendSSLStatus *localsslstatus; #endif - char *localappname, - *localactivity; int i; Assert(!pgStatRunningInCollector); @@ -2951,15 +2962,15 @@ pgstat_read_current_status(void) localappname = (char *) MemoryContextAlloc(pgStatLocalContext, NAMEDATALEN * MaxBackends); + localactivity = (char *) + MemoryContextAlloc(pgStatLocalContext, + pgstat_track_activity_query_size * MaxBackends); #ifdef USE_SSL localsslstatus = (PgBackendSSLStatus *) MemoryContextAlloc(pgStatLocalContext, sizeof(PgBackendSSLStatus) * MaxBackends); #endif - localactivity = (char *) - MemoryContextAlloc(pgStatLocalContext, - pgstat_track_activity_query_size * MaxBackends); localNumBackends = 0; beentry = BackendStatusArray; From 5d179a28fb4c819f3812c40fa7e626b1d3081982 Mon Sep 17 00:00:00 2001 From: Stephen Frost Date: Mon, 27 Jul 2015 16:48:26 -0400 Subject: [PATCH 109/442] Improve RLS handling in copy.c To avoid a race condition where the relation being COPY'd could be changed into a view or otherwise modified, keep the original lock on the relation. Further, fully qualify the relation when building the query up. Also remove the poorly thought-out Assert() and check the entire relationOids list as, post-RLS, there can certainly be multiple relations involved and the planner does not guarantee their ordering. Per discussion with Noah and Andres. Back-patch to 9.5 where RLS was introduced. --- src/backend/commands/copy.c | 45 ++++++++++++-------- src/test/regress/expected/rowsecurity.out | 50 +++++++++++++++++++++- src/test/regress/sql/rowsecurity.sql | 51 ++++++++++++++++++++++- 3 files changed, 126 insertions(+), 20 deletions(-) diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 8904676609d51..47dd3accafe24 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -896,8 +896,12 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) target->val = (Node *) cr; target->location = 1; - /* Build FROM clause */ - from = stmt->relation; + /* + * Build RangeVar for from clause, fully qualified based on the + * relation which we have opened and locked. + */ + from = makeRangeVar(get_namespace_name(RelationGetNamespace(rel)), + RelationGetRelationName(rel), -1); /* Build query */ select = makeNode(SelectStmt); @@ -906,8 +910,13 @@ DoCopy(const CopyStmt *stmt, const char *queryString, uint64 *processed) query = (Node *) select; - /* Close the handle to the relation as it is no longer needed. */ - heap_close(rel, (is_from ? RowExclusiveLock : AccessShareLock)); + /* + * Close the relation for now, but keep the lock on it to prevent + * changes between now and when we start the query-based COPY. + * + * We'll reopen it later as part of the query-based COPY. + */ + heap_close(rel, NoLock); rel = NULL; } } @@ -1407,25 +1416,25 @@ BeginCopy(bool is_from, plan = planner(query, 0, NULL); /* - * If we were passed in a relid, make sure we got the same one back - * after planning out the query. It's possible that it changed - * between when we checked the policies on the table and decided to - * use a query and now. + * With row level security and a user using "COPY relation TO", we + * have to convert the "COPY relation TO" to a query-based COPY (eg: + * "COPY (SELECT * FROM relation) TO"), to allow the rewriter to add + * in any RLS clauses. + * + * When this happens, we are passed in the relid of the originally + * found relation (which we have locked). As the planner will look + * up the relation again, we double-check here to make sure it found + * the same one that we have locked. */ if (queryRelId != InvalidOid) { - Oid relid = linitial_oid(plan->relationOids); - /* - * There should only be one relationOid in this case, since we - * will only get here when we have changed the command for the - * user from a "COPY relation TO" to "COPY (SELECT * FROM - * relation) TO", to allow row level security policies to be - * applied. + * Note that with RLS involved there may be multiple relations, + * and while the one we need is almost certainly first, we don't + * make any guarantees of that in the planner, so check the whole + * list and make sure we find the original relation. */ - Assert(list_length(plan->relationOids) == 1); - - if (relid != queryRelId) + if (!list_member_oid(plan->relationOids, queryRelId)) ereport(ERROR, (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("relation referenced by COPY statement has changed"))); diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index e7c242cd22d48..72361e82a5f23 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -2672,7 +2672,7 @@ COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok 6,1679091c5a880faf6fb5e6087eb1b2dc 8,c9f0f895fb98ab9159f51fd0297e236d 10,d3d9446802a44259755d38e6d163e820 --- Check COPY TO as user without permissions.SET row_security TO OFF; +-- Check COPY TO as user without permissions. SET row_security TO OFF; SET SESSION AUTHORIZATION rls_regress_user2; SET row_security TO OFF; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - insufficient to bypass rls @@ -2683,6 +2683,53 @@ ERROR: permission denied for relation copy_t SET row_security TO FORCE; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied ERROR: permission denied for relation copy_t +-- Check COPY relation TO; keep it just one row to avoid reordering issues +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +CREATE TABLE copy_rel_to (a integer, b text); +CREATE POLICY p1 ON copy_rel_to USING (a % 2 = 0); +ALTER TABLE copy_rel_to ENABLE ROW LEVEL SECURITY; +GRANT ALL ON copy_rel_to TO rls_regress_user1, rls_regress_exempt_user; +INSERT INTO copy_rel_to VALUES (1, md5('1')); +-- Check COPY TO as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +1,c4ca4238a0b923820dcc509a6f75849b +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +1,c4ca4238a0b923820dcc509a6f75849b +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +-- Check COPY TO as user with permissions. +SET SESSION AUTHORIZATION rls_regress_user1; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - insufficient to bypass rls +ERROR: insufficient privilege to bypass row security. +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +-- Check COPY TO as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION rls_regress_exempt_user; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +1,c4ca4238a0b923820dcc509a6f75849b +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +-- Check COPY TO as user without permissions. SET row_security TO OFF; +SET SESSION AUTHORIZATION rls_regress_user2; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for relation copy_rel_to +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for relation copy_rel_to +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +ERROR: permission denied for relation copy_rel_to -- Check COPY FROM as Superuser/owner. RESET SESSION AUTHORIZATION; SET row_security TO OFF; @@ -2731,6 +2778,7 @@ COPY copy_t FROM STDIN; --fail - permission denied. ERROR: permission denied for relation copy_t RESET SESSION AUTHORIZATION; DROP TABLE copy_t; +DROP TABLE copy_rel_to CASCADE; -- Check WHERE CURRENT OF SET SESSION AUTHORIZATION rls_regress_user0; CREATE TABLE current_check (currentid int, payload text, rlsuser text); diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index e86f8143142cb..f588fa2337738 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1028,7 +1028,7 @@ COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok SET row_security TO FORCE; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --ok --- Check COPY TO as user without permissions.SET row_security TO OFF; +-- Check COPY TO as user without permissions. SET row_security TO OFF; SET SESSION AUTHORIZATION rls_regress_user2; SET row_security TO OFF; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - insufficient to bypass rls @@ -1037,6 +1037,54 @@ COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail SET row_security TO FORCE; COPY (SELECT * FROM copy_t ORDER BY a ASC) TO STDOUT WITH DELIMITER ','; --fail - permission denied +-- Check COPY relation TO; keep it just one row to avoid reordering issues +RESET SESSION AUTHORIZATION; +SET row_security TO ON; +CREATE TABLE copy_rel_to (a integer, b text); +CREATE POLICY p1 ON copy_rel_to USING (a % 2 = 0); + +ALTER TABLE copy_rel_to ENABLE ROW LEVEL SECURITY; + +GRANT ALL ON copy_rel_to TO rls_regress_user1, rls_regress_exempt_user; + +INSERT INTO copy_rel_to VALUES (1, md5('1')); + +-- Check COPY TO as Superuser/owner. +RESET SESSION AUTHORIZATION; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; + +-- Check COPY TO as user with permissions. +SET SESSION AUTHORIZATION rls_regress_user1; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - insufficient to bypass rls +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok + +-- Check COPY TO as user with permissions and BYPASSRLS +SET SESSION AUTHORIZATION rls_regress_exempt_user; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --ok + +-- Check COPY TO as user without permissions. SET row_security TO OFF; +SET SESSION AUTHORIZATION rls_regress_user2; +SET row_security TO OFF; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +SET row_security TO ON; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied +SET row_security TO FORCE; +COPY copy_rel_to TO STDOUT WITH DELIMITER ','; --fail - permission denied + -- Check COPY FROM as Superuser/owner. RESET SESSION AUTHORIZATION; SET row_security TO OFF; @@ -1090,6 +1138,7 @@ COPY copy_t FROM STDIN; --fail - permission denied. RESET SESSION AUTHORIZATION; DROP TABLE copy_t; +DROP TABLE copy_rel_to CASCADE; -- Check WHERE CURRENT OF SET SESSION AUTHORIZATION rls_regress_user0; From 510aad31eaf2129d28ae3dbfc58f98775192ee94 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Mon, 27 Jul 2015 20:24:27 -0700 Subject: [PATCH 110/442] Fix pg_dump output of policies. pg_dump neglected to wrap parenthesis around USING and WITH CHECK expressions -- fixed. Reported by Noah Misch. --- src/bin/pg_dump/pg_dump.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bin/pg_dump/pg_dump.c b/src/bin/pg_dump/pg_dump.c index 0b5262a8a1a7d..ba1497fed49d7 100644 --- a/src/bin/pg_dump/pg_dump.c +++ b/src/bin/pg_dump/pg_dump.c @@ -3012,10 +3012,10 @@ dumpPolicy(Archive *fout, DumpOptions *dopt, PolicyInfo *polinfo) appendPQExpBuffer(query, " TO %s", polinfo->polroles); if (polinfo->polqual != NULL) - appendPQExpBuffer(query, " USING %s", polinfo->polqual); + appendPQExpBuffer(query, " USING (%s)", polinfo->polqual); if (polinfo->polwithcheck != NULL) - appendPQExpBuffer(query, " WITH CHECK %s", polinfo->polwithcheck); + appendPQExpBuffer(query, " WITH CHECK (%s)", polinfo->polwithcheck); appendPQExpBuffer(query, ";\n"); From beebb259d2a994cd2021a1506b7af1716b16f476 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Tue, 28 Jul 2015 09:05:46 +0300 Subject: [PATCH 111/442] Another attempt at fixing memory leak in xlogreader. max_block_id is also reset between reading records. Michael Paquier --- src/backend/access/transam/xlogreader.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/access/transam/xlogreader.c b/src/backend/access/transam/xlogreader.c index f1b209b1ad1a8..3b5d32a5815d5 100644 --- a/src/backend/access/transam/xlogreader.c +++ b/src/backend/access/transam/xlogreader.c @@ -124,7 +124,7 @@ XLogReaderFree(XLogReaderState *state) { int block_id; - for (block_id = 0; block_id <= state->max_block_id; block_id++) + for (block_id = 0; block_id <= XLR_MAX_BLOCK_ID; block_id++) { if (state->blocks[block_id].data) pfree(state->blocks[block_id].data); From fa4a4df93c8c28d5684cacb1677fbd13f58bb9f2 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Tue, 28 Jul 2015 12:22:21 -0400 Subject: [PATCH 112/442] Improve logging of TAP tests. Create a log file for each test run. Stdout and stderr of the test script, as well as any subprocesses run as part of the test, are redirected to the log file. This makes it a lot easier to debug test failures. Also print the test output (ok 12 - ... messages) to the log file, and the command line of any external programs executed with the system_or_bail and run_log functions. This makes it a lot easier to debug failing tests. Modify some of the pg_ctl and other command invocations to not use 'silent' or 'quiet' options, and don't redirect output to /dev/null, so that you get all the information in the log instead. In the passing, construct some command lines in a way that works if $tempdir contains quote-characters. I haven't systematically gone through all of them or tested that, so I don't know if this is enough to make that work. pg_rewind tests had a custom mechanism for creating a similar log file. Use the new generic facility instead. Michael Paquier and Heikki Linnakangas. This is a backpatch of Heikki's commit 1ea06203b82b98b5098808667f6ba652181ef5b2. --- src/Makefile.global.in | 1 + src/bin/pg_basebackup/t/010_pg_basebackup.pl | 2 +- .../pg_controldata/t/001_pg_controldata.pl | 2 +- src/bin/pg_ctl/t/001_start_stop.pl | 2 +- src/bin/pg_ctl/t/002_status.pl | 4 +- src/bin/pg_rewind/.gitignore | 1 - src/bin/pg_rewind/Makefile | 2 +- src/bin/pg_rewind/RewindTest.pm | 85 +++++++---------- src/bin/pg_rewind/t/001_basic.pl | 1 - src/bin/pg_rewind/t/002_databases.pl | 1 - src/bin/pg_rewind/t/003_extrafiles.pl | 1 - src/test/perl/SimpleTee.pm | 27 ++++++ src/test/perl/TestLib.pm | 93 +++++++++++++++---- src/test/ssl/ServerSetup.pm | 6 +- 14 files changed, 143 insertions(+), 85 deletions(-) create mode 100644 src/test/perl/SimpleTee.pm diff --git a/src/Makefile.global.in b/src/Makefile.global.in index 46331194a3a43..e2f7211160df1 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -338,6 +338,7 @@ cd $(srcdir) && TESTDIR='$(CURDIR)' PATH="$(bindir):$$PATH" PGPORT='6$(DEF_PGPOR endef define prove_check +rm -rf $(srcdir)/tmp_check/log cd $(srcdir) && TESTDIR='$(CURDIR)' $(with_temp_install) PGPORT='6$(DEF_PGPORT)' top_builddir='$(CURDIR)/$(top_builddir)' $(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) t/*.pl endef diff --git a/src/bin/pg_basebackup/t/010_pg_basebackup.pl b/src/bin/pg_basebackup/t/010_pg_basebackup.pl index c8c9250b3845f..e47c3a0bb7cdd 100644 --- a/src/bin/pg_basebackup/t/010_pg_basebackup.pl +++ b/src/bin/pg_basebackup/t/010_pg_basebackup.pl @@ -30,7 +30,7 @@ print HBA "host replication all 127.0.0.1/32 trust\n"; print HBA "host replication all ::1/128 trust\n"; close HBA; -system_or_bail 'pg_ctl', '-s', '-D', "$tempdir/pgdata", 'reload'; +system_or_bail 'pg_ctl', '-D', "$tempdir/pgdata", 'reload'; command_fails( [ 'pg_basebackup', '-D', "$tempdir/backup" ], diff --git a/src/bin/pg_controldata/t/001_pg_controldata.pl b/src/bin/pg_controldata/t/001_pg_controldata.pl index a4180e7ed18a5..e36fa2d45d99c 100644 --- a/src/bin/pg_controldata/t/001_pg_controldata.pl +++ b/src/bin/pg_controldata/t/001_pg_controldata.pl @@ -11,6 +11,6 @@ command_fails(['pg_controldata'], 'pg_controldata without arguments fails'); command_fails([ 'pg_controldata', 'nonexistent' ], 'pg_controldata with nonexistent directory fails'); -system_or_bail "initdb -D '$tempdir'/data -A trust >/dev/null"; +system_or_bail 'initdb', '-D', "$tempdir/data", '-A', 'trust'; command_like([ 'pg_controldata', "$tempdir/data" ], qr/checkpoint/, 'pg_controldata produces output'); diff --git a/src/bin/pg_ctl/t/001_start_stop.pl b/src/bin/pg_ctl/t/001_start_stop.pl index 6c9ec5c717af6..bcceb57d7bcdf 100644 --- a/src/bin/pg_ctl/t/001_start_stop.pl +++ b/src/bin/pg_ctl/t/001_start_stop.pl @@ -36,4 +36,4 @@ command_ok([ 'pg_ctl', 'restart', '-D', "$tempdir/data", '-w', '-m', 'fast' ], 'pg_ctl restart with server running'); -system_or_bail 'pg_ctl', '-s', 'stop', '-D', "$tempdir/data", '-m', 'fast'; +system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data", '-m', 'fast'; diff --git a/src/bin/pg_ctl/t/002_status.pl b/src/bin/pg_ctl/t/002_status.pl index 055885495ab14..ec0a2a786e09c 100644 --- a/src/bin/pg_ctl/t/002_status.pl +++ b/src/bin/pg_ctl/t/002_status.pl @@ -18,9 +18,9 @@ command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/data" ], 3, 'pg_ctl status with server not running'); -system_or_bail 'pg_ctl', '-s', '-l', "$tempdir/logfile", '-D', +system_or_bail 'pg_ctl', '-l', "$tempdir/logfile", '-D', "$tempdir/data", '-w', 'start'; command_exit_is([ 'pg_ctl', 'status', '-D', "$tempdir/data" ], 0, 'pg_ctl status with server running'); -system_or_bail 'pg_ctl', '-s', 'stop', '-D', "$tempdir/data", '-m', 'fast'; +system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/data", '-m', 'fast'; diff --git a/src/bin/pg_rewind/.gitignore b/src/bin/pg_rewind/.gitignore index 9ade7efcce4e0..79ddca3eec901 100644 --- a/src/bin/pg_rewind/.gitignore +++ b/src/bin/pg_rewind/.gitignore @@ -4,4 +4,3 @@ # Generated by test suite /tmp_check/ -/regress_log/ diff --git a/src/bin/pg_rewind/Makefile b/src/bin/pg_rewind/Makefile index 7d607157e3883..92b5d20afa707 100644 --- a/src/bin/pg_rewind/Makefile +++ b/src/bin/pg_rewind/Makefile @@ -45,7 +45,7 @@ uninstall: clean distclean maintainer-clean: rm -f pg_rewind$(X) $(OBJS) xlogreader.c - rm -rf tmp_check regress_log + rm -rf tmp_check check: $(prove_check) diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm index 5219ec967ae94..e57b41734df23 100644 --- a/src/bin/pg_rewind/RewindTest.pm +++ b/src/bin/pg_rewind/RewindTest.pm @@ -79,7 +79,6 @@ mkdir "regress_log"; my $port_master = $ENV{PGPORT}; my $port_standby = $port_master + 1; -my $log_path; my $tempdir_short; my $connstr_master = "port=$port_master"; @@ -91,14 +90,16 @@ sub master_psql { my $cmd = shift; - system_or_bail("psql -q --no-psqlrc -d $connstr_master -c \"$cmd\""); + system_or_bail 'psql', '-q', '--no-psqlrc', '-d', $connstr_master, + '-c', "$cmd"; } sub standby_psql { my $cmd = shift; - system_or_bail("psql -q --no-psqlrc -d $connstr_standby -c \"$cmd\""); + system_or_bail 'psql', '-q', '--no-psqlrc', '-d', $connstr_standby, + '-c', "$cmd"; } # Run a query against the master, and check that the output matches what's @@ -171,16 +172,6 @@ sub append_to_file close $fh; } -sub init_rewind_test -{ - my $testname = shift; - my $test_mode = shift; - - $log_path = "regress_log/pg_rewind_log_${testname}_${test_mode}"; - - remove_tree $log_path; -} - sub setup_cluster { $tempdir_short = tempdir_short; @@ -209,9 +200,10 @@ max_connections = 10 local replication all trust )); - system_or_bail( -"pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1" - ); + system_or_bail('pg_ctl' , '-w', + '-D' , $test_master_datadir, + "-o", "-k $tempdir_short --listen-addresses='' -p $port_master", + 'start'); #### Now run the test-specific parts to initialize the master before setting # up standby @@ -225,8 +217,8 @@ sub create_standby remove_tree $test_standby_datadir; # Base backup is taken with xlog files included - system_or_bail( -"pg_basebackup -D $test_standby_datadir -p $port_master -x >>$log_path 2>&1"); + system_or_bail('pg_basebackup', '-D', $test_standby_datadir, + '-p', $port_master, '-x'); append_to_file( "$test_standby_datadir/recovery.conf", qq( primary_conninfo='$connstr_master application_name=rewind_standby' @@ -235,9 +227,9 @@ recovery_target_timeline='latest' )); # Start standby - system_or_bail( -"pg_ctl -w -D $test_standby_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_standby\" start >>$log_path 2>&1" - ); + system_or_bail('pg_ctl', '-w', '-D', $test_standby_datadir, + '-o', "-k $tempdir_short --listen-addresses='' -p $port_standby", + 'start'); # Wait until the standby has caught up with the primary, by polling # pg_stat_replication. @@ -255,8 +247,7 @@ sub promote_standby # Now promote slave and insert some new data on master, this will put # the master out-of-sync with the standby. Wait until the standby is # out of recovery mode, and is ready to accept read-write connections. - system_or_bail( - "pg_ctl -w -D $test_standby_datadir promote >>$log_path 2>&1"); + system_or_bail('pg_ctl', '-w', '-D', $test_standby_datadir, 'promote'); poll_query_until("SELECT NOT pg_is_in_recovery()", $connstr_standby) or die "Timed out while waiting for promotion of standby"; @@ -274,8 +265,7 @@ sub run_pg_rewind my $test_mode = shift; # Stop the master and be ready to perform the rewind - system_or_bail( - "pg_ctl -w -D $test_master_datadir stop -m fast >>$log_path 2>&1"); + system_or_bail('pg_ctl', '-D', $test_master_datadir, 'stop', '-m', 'fast'); # At this point, the rewind processing is ready to run. # We now have a very simple scenario with a few diverged WAL record. @@ -291,35 +281,24 @@ sub run_pg_rewind # Now run pg_rewind if ($test_mode eq "local") { - # Do rewind using a local pgdata as source # Stop the master and be ready to perform the rewind - system_or_bail( - "pg_ctl -w -D $test_standby_datadir stop -m fast >>$log_path 2>&1" - ); - my $result = run( - [ 'pg_rewind', - "--debug", - "--source-pgdata=$test_standby_datadir", - "--target-pgdata=$test_master_datadir" ], - '>>', - $log_path, - '2>&1'); - ok($result, 'pg_rewind local'); + system_or_bail('pg_ctl', '-D', $test_standby_datadir, 'stop', + '-m', 'fast'); + command_ok(['pg_rewind', + "--debug", + "--source-pgdata=$test_standby_datadir", + "--target-pgdata=$test_master_datadir"], + 'pg_rewind local'); } elsif ($test_mode eq "remote") { - # Do rewind using a remote connection as source - my $result = run( - [ 'pg_rewind', - "--source-server", - "port=$port_standby dbname=postgres", - "--target-pgdata=$test_master_datadir" ], - '>>', - $log_path, - '2>&1'); - ok($result, 'pg_rewind remote'); + command_ok(['pg_rewind', + "--source-server", + "port=$port_standby dbname=postgres", + "--target-pgdata=$test_master_datadir"], + 'pg_rewind remote'); } else { @@ -342,9 +321,9 @@ recovery_target_timeline='latest' )); # Restart the master to check that rewind went correctly - system_or_bail( -"pg_ctl -w -D $test_master_datadir -o \"-k $tempdir_short --listen-addresses='' -p $port_master\" start >>$log_path 2>&1" - ); + system_or_bail('pg_ctl', '-w', '-D', $test_master_datadir, + '-o', "-k $tempdir_short --listen-addresses='' -p $port_master", + 'start'); #### Now run the test-specific parts to check the result } @@ -355,12 +334,12 @@ sub clean_rewind_test if ($test_master_datadir) { system - "pg_ctl -D $test_master_datadir -s -m immediate stop 2> /dev/null"; + 'pg_ctl', '-D', $test_master_datadir, '-m', 'immediate', 'stop'; } if ($test_standby_datadir) { system - "pg_ctl -D $test_standby_datadir -s -m immediate stop 2> /dev/null"; + 'pg_ctl', '-D', $test_standby_datadir, '-m', 'immediate', 'stop'; } } diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl index f60368bd307e7..be7d887bb7c97 100644 --- a/src/bin/pg_rewind/t/001_basic.pl +++ b/src/bin/pg_rewind/t/001_basic.pl @@ -9,7 +9,6 @@ sub run_test { my $test_mode = shift; - RewindTest::init_rewind_test('basic', $test_mode); RewindTest::setup_cluster(); # Create a test table and insert a row in master. diff --git a/src/bin/pg_rewind/t/002_databases.pl b/src/bin/pg_rewind/t/002_databases.pl index 7564fa98a5377..b0b007a763ad2 100644 --- a/src/bin/pg_rewind/t/002_databases.pl +++ b/src/bin/pg_rewind/t/002_databases.pl @@ -9,7 +9,6 @@ sub run_test { my $test_mode = shift; - RewindTest::init_rewind_test('databases', $test_mode); RewindTest::setup_cluster(); # Create a database in master. diff --git a/src/bin/pg_rewind/t/003_extrafiles.pl b/src/bin/pg_rewind/t/003_extrafiles.pl index 9a952685be91a..0cd0ac4d5677b 100644 --- a/src/bin/pg_rewind/t/003_extrafiles.pl +++ b/src/bin/pg_rewind/t/003_extrafiles.pl @@ -14,7 +14,6 @@ sub run_test { my $test_mode = shift; - RewindTest::init_rewind_test('extrafiles', $test_mode); RewindTest::setup_cluster(); my $test_master_datadir = $RewindTest::test_master_datadir; diff --git a/src/test/perl/SimpleTee.pm b/src/test/perl/SimpleTee.pm new file mode 100644 index 0000000000000..8d31a4013c878 --- /dev/null +++ b/src/test/perl/SimpleTee.pm @@ -0,0 +1,27 @@ +# A simple 'tee' implementation, using perl tie. +# +# Whenever you print to the handle, it gets forwarded to a list of +# handles. The list of output filehandles is passed to the constructor. +# +# This is similar to IO::Tee, but only used for output. Only the PRINT +# method is currently implemented; that's all we need. We don't want to +# depend on IO::Tee just for this. + +package SimpleTee; +use strict; + +sub TIEHANDLE { + my $self = shift; + bless \@_, $self; +} + +sub PRINT { + my $self = shift; + my $ok = 1; + for my $fh (@$self) { + print $fh @_ or $ok = 0; + } + return $ok; +} + +1; diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index ef42366888e5c..0193d575ff78b 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -12,6 +12,8 @@ our @EXPORT = qw( restart_test_server psql system_or_bail + system_log + run_log command_ok command_fails @@ -24,11 +26,47 @@ our @EXPORT = qw( ); use Cwd; +use File::Basename; use File::Spec; use File::Temp (); use IPC::Run qw(run start); + +use SimpleTee; + use Test::More; +# Open log file. For each test, the log file name uses the name of the +# file launching this module, without the .pl suffix. +my $log_path = 'tmp_check/log'; +mkdir 'tmp_check'; +mkdir $log_path; +my $test_logfile = basename($0); +$test_logfile =~ s/\.[^.]+$//; +$test_logfile = "$log_path/regress_log_$test_logfile"; +open TESTLOG, '>', $test_logfile or die "Cannot open STDOUT to logfile: $!"; + +# Hijack STDOUT and STDERR to the log file +open(ORIG_STDOUT, ">&STDOUT"); +open(ORIG_STDERR, ">&STDERR"); +open(STDOUT, ">&TESTLOG"); +open(STDERR, ">&TESTLOG"); + +# The test output (ok ...) needs to be printed to the original STDOUT so +# that the 'prove' program can parse it, and display it to the user in +# real time. But also copy it to the log file, to provide more context +# in the log. +my $builder = Test::More->builder; +my $fh = $builder->output; +tie *$fh, "SimpleTee", *ORIG_STDOUT, *TESTLOG; +$fh = $builder->failure_output; +tie *$fh, "SimpleTee", *ORIG_STDERR, *TESTLOG; + +# Enable auto-flushing for all the file handles. Stderr and stdout are +# redirected to the same file, and buffering causes the lines to appear +# in the log in confusing order. +autoflush STDOUT 1; +autoflush STDERR 1; +autoflush TESTLOG 1; # Set to untranslated messages, to be able to compare program output # with expected strings. @@ -77,7 +115,7 @@ sub tempdir_short sub standard_initdb { my $pgdata = shift; - system_or_bail("initdb -D '$pgdata' -A trust -N >/dev/null"); + system_or_bail('initdb', '-D', "$pgdata", '-A' , 'trust', '-N'); system_or_bail("$ENV{top_builddir}/src/test/regress/pg_regress", '--config-auth', $pgdata); } @@ -91,14 +129,15 @@ sub start_test_server my $tempdir_short = tempdir_short; + print("### Starting test server in $tempdir\n"); standard_initdb "$tempdir/pgdata"; - $ret = system 'pg_ctl', '-D', "$tempdir/pgdata", '-s', '-w', '-l', + $ret = system_log('pg_ctl', '-D', "$tempdir/pgdata", '-w', '-l', "$tempdir/logfile", '-o', -"--fsync=off -k $tempdir_short --listen-addresses='' --log-statement=all", - 'start'; - +"--fsync=off -k \"$tempdir_short\" --listen-addresses='' --log-statement=all", + 'start'); if ($ret != 0) { + print "# pg_ctl failed; logfile:\n"; system('cat', "$tempdir/logfile"); BAIL_OUT("pg_ctl failed"); } @@ -110,28 +149,45 @@ sub start_test_server sub restart_test_server { - system 'pg_ctl', '-s', '-D', $test_server_datadir, '-w', '-l', - $test_server_logfile, 'restart'; + print("### Restarting test server\n"); + system_log('pg_ctl', '-D', $test_server_datadir, '-w', '-l', + $test_server_logfile, 'restart'); } END { if ($test_server_datadir) { - system 'pg_ctl', '-D', $test_server_datadir, '-s', '-w', '-m', - 'immediate', 'stop'; + system_log('pg_ctl', '-D', $test_server_datadir, '-m', + 'immediate', 'stop'); } } sub psql { my ($dbname, $sql) = @_; + print("# Running SQL command: $sql\n"); run [ 'psql', '-X', '-q', '-d', $dbname, '-f', '-' ], '<', \$sql or die; } sub system_or_bail { - system(@_) == 0 or BAIL_OUT("system @_ failed: $?"); + if (system_log(@_) != 0) + { + BAIL_OUT("system $_[0] failed: $?"); + } +} + +sub system_log +{ + print("# Running: " . join(" ", @_) ."\n"); + return system(@_); +} + +sub run_log +{ + print("# Running: " . join(" ", @{$_[0]}) ."\n"); + return run (@_); } @@ -143,24 +199,22 @@ sub system_or_bail sub command_ok { my ($cmd, $test_name) = @_; - my $result = run $cmd, '>', File::Spec->devnull(), '2>', - File::Spec->devnull(); + my $result = run_log($cmd); ok($result, $test_name); } sub command_fails { my ($cmd, $test_name) = @_; - my $result = run $cmd, '>', File::Spec->devnull(), '2>', - File::Spec->devnull(); + my $result = run_log($cmd); ok(!$result, $test_name); } sub command_exit_is { my ($cmd, $expected, $test_name) = @_; - my $h = start $cmd, '>', File::Spec->devnull(), '2>', - File::Spec->devnull(); + print("# Running: " . join(" ", @{$cmd}) ."\n"); + my $h = start $cmd; $h->finish(); is($h->result(0), $expected, $test_name); } @@ -169,6 +223,7 @@ sub program_help_ok { my ($cmd) = @_; my ($stdout, $stderr); + print("# Running: $cmd --help\n"); my $result = run [ $cmd, '--help' ], '>', \$stdout, '2>', \$stderr; ok($result, "$cmd --help exit code 0"); isnt($stdout, '', "$cmd --help goes to stdout"); @@ -179,6 +234,7 @@ sub program_version_ok { my ($cmd) = @_; my ($stdout, $stderr); + print("# Running: $cmd --version\n"); my $result = run [ $cmd, '--version' ], '>', \$stdout, '2>', \$stderr; ok($result, "$cmd --version exit code 0"); isnt($stdout, '', "$cmd --version goes to stdout"); @@ -189,6 +245,7 @@ sub program_options_handling_ok { my ($cmd) = @_; my ($stdout, $stderr); + print("# Running: $cmd --not-a-valid-option\n"); my $result = run [ $cmd, '--not-a-valid-option' ], '>', \$stdout, '2>', \$stderr; ok(!$result, "$cmd with invalid option nonzero exit code"); @@ -199,6 +256,7 @@ sub command_like { my ($cmd, $expected_stdout, $test_name) = @_; my ($stdout, $stderr); + print("# Running: " . join(" ", @{$cmd}) . "\n"); my $result = run $cmd, '>', \$stdout, '2>', \$stderr; ok($result, "@$cmd exit code 0"); is($stderr, '', "@$cmd no stderr"); @@ -208,9 +266,8 @@ sub command_like sub issues_sql_like { my ($cmd, $expected_sql, $test_name) = @_; - my ($stdout, $stderr); truncate $test_server_logfile, 0; - my $result = run $cmd, '>', \$stdout, '2>', \$stderr; + my $result = run_log($cmd); ok($result, "@$cmd exit code 0"); my $log = `cat '$test_server_logfile'`; like($log, $expected_sql, "$test_name: SQL found in server log"); diff --git a/src/test/ssl/ServerSetup.pm b/src/test/ssl/ServerSetup.pm index bbff99a3bdb8c..8c1b517575500 100644 --- a/src/test/ssl/ServerSetup.pm +++ b/src/test/ssl/ServerSetup.pm @@ -125,8 +125,6 @@ sub switch_server_cert # restart_test_server() because that overrides listen_addresses to only all # Unix domain socket connections. - system_or_bail 'pg_ctl', 'stop', '-s', '-D', "$tempdir/pgdata", '-w'; - system_or_bail 'pg_ctl', 'start', '-s', '-D', "$tempdir/pgdata", '-w', - '-l', - "$tempdir/logfile"; + system_or_bail 'pg_ctl', 'stop', '-D', "$tempdir/pgdata"; + system_or_bail 'pg_ctl', 'start', '-D', "$tempdir/pgdata", '-w'; } From f7cdc518e613b08831ccd798257df3ba3556ea21 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 28 Jul 2015 13:20:39 -0400 Subject: [PATCH 113/442] Remove an unsafe Assert, and explain join_clause_is_movable_into() better. join_clause_is_movable_into() is approximate, in the sense that it might sometimes return "false" when actually it would be valid to push the given join clause down to the specified level. This is okay ... but there was an Assert in get_joinrel_parampathinfo() that's only safe if the answers are always exact. Comment out the Assert, and add a bunch of commentary to clarify what's going on. Per fuzz testing by Andreas Seltenreich. The added regression test is a pretty silly query, but it's based on his crasher example. Back-patch to 9.2 where the faulty logic was introduced. --- src/backend/optimizer/util/relnode.c | 9 +++++ src/backend/optimizer/util/restrictinfo.c | 39 ++++++++++++++---- src/test/regress/expected/join.out | 48 +++++++++++++++++++++++ src/test/regress/sql/join.sql | 26 ++++++++++++ 4 files changed, 115 insertions(+), 7 deletions(-) diff --git a/src/backend/optimizer/util/relnode.c b/src/backend/optimizer/util/relnode.c index be2ef3becfe0a..68a93a1a5bdf9 100644 --- a/src/backend/optimizer/util/relnode.c +++ b/src/backend/optimizer/util/relnode.c @@ -982,9 +982,18 @@ get_joinrel_parampathinfo(PlannerInfo *root, RelOptInfo *joinrel, { RestrictInfo *rinfo = (RestrictInfo *) lfirst(lc); + /* + * In principle, join_clause_is_movable_into() should accept anything + * returned by generate_join_implied_equalities(); but because its + * analysis is only approximate, sometimes it doesn't. So we + * currently cannot use this Assert; instead just assume it's okay to + * apply the joinclause at this level. + */ +#ifdef NOT_USED Assert(join_clause_is_movable_into(rinfo, joinrel->relids, join_and_req)); +#endif if (!join_clause_is_movable_into(rinfo, outer_path->parent->relids, outer_and_req) && diff --git a/src/backend/optimizer/util/restrictinfo.c b/src/backend/optimizer/util/restrictinfo.c index e5f7836517584..65499902f6c1d 100644 --- a/src/backend/optimizer/util/restrictinfo.c +++ b/src/backend/optimizer/util/restrictinfo.c @@ -464,10 +464,9 @@ extract_actual_join_clauses(List *restrictinfo_list, * outer join, as that would change the results (rows would be suppressed * rather than being null-extended). * - * Also the target relation must not be in the clause's nullable_relids, i.e., - * there must not be an outer join below the clause that would null the Vars - * coming from the target relation. Otherwise the clause might give results - * different from what it would give at its normal semantic level. + * Also there must not be an outer join below the clause that would null the + * Vars coming from the target relation. Otherwise the clause might give + * results different from what it would give at its normal semantic level. * * Also, the join clause must not use any relations that have LATERAL * references to the target relation, since we could not put such rels on @@ -516,10 +515,31 @@ join_clause_is_movable_to(RestrictInfo *rinfo, RelOptInfo *baserel) * not pushing the clause into its outer-join outer side, nor down into * a lower outer join's inner side. * + * The check about pushing a clause down into a lower outer join's inner side + * is only approximate; it sometimes returns "false" when actually it would + * be safe to use the clause here because we're still above the outer join + * in question. This is okay as long as the answers at different join levels + * are consistent: it just means we might sometimes fail to push a clause as + * far down as it could safely be pushed. It's unclear whether it would be + * worthwhile to do this more precisely. (But if it's ever fixed to be + * exactly accurate, there's an Assert in get_joinrel_parampathinfo() that + * should be re-enabled.) + * * There's no check here equivalent to join_clause_is_movable_to's test on * lateral_referencers. We assume the caller wouldn't be inquiring unless * it'd verified that the proposed outer rels don't have lateral references - * to the current rel(s). + * to the current rel(s). (If we are considering join paths with the outer + * rels on the outside and the current rels on the inside, then this should + * have been checked at the outset of such consideration; see join_is_legal + * and the path parameterization checks in joinpath.c.) On the other hand, + * in join_clause_is_movable_to we are asking whether the clause could be + * moved for some valid set of outer rels, so we don't have the benefit of + * relying on prior checks for lateral-reference validity. + * + * Note: if this returns true, it means that the clause could be moved to + * this join relation, but that doesn't mean that this is the lowest join + * it could be moved to. Caller may need to make additional calls to verify + * that this doesn't succeed on either of the inputs of a proposed join. * * Note: get_joinrel_parampathinfo depends on the fact that if * current_and_outer is NULL, this function will always return false @@ -534,7 +554,7 @@ join_clause_is_movable_into(RestrictInfo *rinfo, if (!bms_is_subset(rinfo->clause_relids, current_and_outer)) return false; - /* Clause must physically reference target rel(s) */ + /* Clause must physically reference at least one target rel */ if (!bms_overlap(currentrelids, rinfo->clause_relids)) return false; @@ -542,7 +562,12 @@ join_clause_is_movable_into(RestrictInfo *rinfo, if (bms_overlap(currentrelids, rinfo->outer_relids)) return false; - /* Target rel(s) must not be nullable below the clause */ + /* + * Target rel(s) must not be nullable below the clause. This is + * approximate, in the safe direction, because the current join might be + * above the join where the nulling would happen, in which case the clause + * would work correctly here. But we don't have enough info to be sure. + */ if (bms_overlap(currentrelids, rinfo->nullable_relids)) return false; diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 4ce01cbcd5b6b..1afd0c328b5b8 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -2218,6 +2218,54 @@ order by 1, 2; 4567890123456789 | 4567890123456789 | 123 | 4567890123456789 | 123 (5 rows) +-- +-- regression test: check a case where join_clause_is_movable_into() gives +-- an imprecise result +-- +analyze pg_enum; +explain (costs off) +select anname, outname, enumtypid +from + (select pa.proname as anname, coalesce(po.proname, typname) as outname + from pg_type t + left join pg_proc po on po.oid = t.typoutput + join pg_proc pa on pa.oid = t.typanalyze) ss, + pg_enum, + pg_type t2 +where anname = enumlabel and outname = t2.typname and enumtypid = t2.oid; + QUERY PLAN +----------------------------------------------------------------------- + Nested Loop + Join Filter: (pg_enum.enumtypid = t2.oid) + -> Nested Loop Left Join + -> Hash Join + Hash Cond: ((t.typanalyze)::oid = pa.oid) + -> Seq Scan on pg_type t + -> Hash + -> Hash Join + Hash Cond: (pa.proname = pg_enum.enumlabel) + -> Seq Scan on pg_proc pa + -> Hash + -> Seq Scan on pg_enum + -> Index Scan using pg_proc_oid_index on pg_proc po + Index Cond: (oid = (t.typoutput)::oid) + -> Index Scan using pg_type_typname_nsp_index on pg_type t2 + Index Cond: (typname = COALESCE(po.proname, t.typname)) +(16 rows) + +select anname, outname, enumtypid +from + (select pa.proname as anname, coalesce(po.proname, typname) as outname + from pg_type t + left join pg_proc po on po.oid = t.typoutput + join pg_proc pa on pa.oid = t.typanalyze) ss, + pg_enum, + pg_type t2 +where anname = enumlabel and outname = t2.typname and enumtypid = t2.oid; + anname | outname | enumtypid +--------+---------+----------- +(0 rows) + -- -- Clean up -- diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 3a71dbf4dffe3..d34cefac5a18f 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -377,6 +377,32 @@ select * from int8_tbl i1 left join (int8_tbl i2 join (select 123 as x) ss on i2.q1 = x) on i1.q2 = i2.q2 order by 1, 2; +-- +-- regression test: check a case where join_clause_is_movable_into() gives +-- an imprecise result +-- +analyze pg_enum; +explain (costs off) +select anname, outname, enumtypid +from + (select pa.proname as anname, coalesce(po.proname, typname) as outname + from pg_type t + left join pg_proc po on po.oid = t.typoutput + join pg_proc pa on pa.oid = t.typanalyze) ss, + pg_enum, + pg_type t2 +where anname = enumlabel and outname = t2.typname and enumtypid = t2.oid; + +select anname, outname, enumtypid +from + (select pa.proname as anname, coalesce(po.proname, typname) as outname + from pg_type t + left join pg_proc po on po.oid = t.typoutput + join pg_proc pa on pa.oid = t.typanalyze) ss, + pg_enum, + pg_type t2 +where anname = enumlabel and outname = t2.typname and enumtypid = t2.oid; + -- -- Clean up From da7db24cc22e3f0d96cfda134f3ed194279bb513 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Tue, 28 Jul 2015 16:04:05 -0400 Subject: [PATCH 114/442] Make tap tests store postmaster logs and handle vpaths correctly Given this it is possible that the buildfarm animals running these tests will be able to capture adequate logging to allow diagnosis of failures. --- src/Makefile.global.in | 2 +- src/bin/pg_rewind/RewindTest.pm | 6 +++--- src/test/perl/TestLib.pm | 15 ++++++++++----- 3 files changed, 14 insertions(+), 9 deletions(-) diff --git a/src/Makefile.global.in b/src/Makefile.global.in index e2f7211160df1..e87885bb2b039 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -338,7 +338,7 @@ cd $(srcdir) && TESTDIR='$(CURDIR)' PATH="$(bindir):$$PATH" PGPORT='6$(DEF_PGPOR endef define prove_check -rm -rf $(srcdir)/tmp_check/log +rm -rf $(CURDIR)/tmp_check/log cd $(srcdir) && TESTDIR='$(CURDIR)' $(with_temp_install) PGPORT='6$(DEF_PGPORT)' top_builddir='$(CURDIR)/$(top_builddir)' $(PROVE) $(PG_PROVE_FLAGS) $(PROVE_FLAGS) t/*.pl endef diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm index e57b41734df23..1045caa09adb5 100644 --- a/src/bin/pg_rewind/RewindTest.pm +++ b/src/bin/pg_rewind/RewindTest.pm @@ -72,9 +72,6 @@ our $test_standby_datadir = "$testroot/data_standby"; mkdir $testroot; -# Log files are created here -mkdir "regress_log"; - # Define non-conflicting ports for both nodes. my $port_master = $ENV{PGPORT}; my $port_standby = $port_master + 1; @@ -202,6 +199,7 @@ local replication all trust system_or_bail('pg_ctl' , '-w', '-D' , $test_master_datadir, + '-l', "$log_path/master.log", "-o", "-k $tempdir_short --listen-addresses='' -p $port_master", 'start'); @@ -228,6 +226,7 @@ recovery_target_timeline='latest' # Start standby system_or_bail('pg_ctl', '-w', '-D', $test_standby_datadir, + '-l', "$log_path/standby.log", '-o', "-k $tempdir_short --listen-addresses='' -p $port_standby", 'start'); @@ -322,6 +321,7 @@ recovery_target_timeline='latest' # Restart the master to check that rewind went correctly system_or_bail('pg_ctl', '-w', '-D', $test_master_datadir, + '-l', "$log_path/master.log", '-o', "-k $tempdir_short --listen-addresses='' -p $port_master", 'start'); diff --git a/src/test/perl/TestLib.pm b/src/test/perl/TestLib.pm index 0193d575ff78b..5ef95f0246ebe 100644 --- a/src/test/perl/TestLib.pm +++ b/src/test/perl/TestLib.pm @@ -23,6 +23,9 @@ our @EXPORT = qw( program_options_handling_ok command_like issues_sql_like + + $tmp_check + $log_path ); use Cwd; @@ -37,8 +40,10 @@ use Test::More; # Open log file. For each test, the log file name uses the name of the # file launching this module, without the .pl suffix. -my $log_path = 'tmp_check/log'; -mkdir 'tmp_check'; +our ($tmp_check, $log_path); +$tmp_check = $ENV{TESTDIR} ? "$ENV{TESTDIR}/tmp_check" : "tmp_check"; +$log_path = "$tmp_check/log"; +mkdir $tmp_check; mkdir $log_path; my $test_logfile = basename($0); $test_logfile =~ s/\.[^.]+$//; @@ -132,19 +137,19 @@ sub start_test_server print("### Starting test server in $tempdir\n"); standard_initdb "$tempdir/pgdata"; $ret = system_log('pg_ctl', '-D', "$tempdir/pgdata", '-w', '-l', - "$tempdir/logfile", '-o', + "$log_path/postmaster.log", '-o', "--fsync=off -k \"$tempdir_short\" --listen-addresses='' --log-statement=all", 'start'); if ($ret != 0) { print "# pg_ctl failed; logfile:\n"; - system('cat', "$tempdir/logfile"); + system('cat', "$log_path/postmaster.log"); BAIL_OUT("pg_ctl failed"); } $ENV{PGHOST} = $tempdir_short; $test_server_datadir = "$tempdir/pgdata"; - $test_server_logfile = "$tempdir/logfile"; + $test_server_logfile = "$log_path/postmaster.log"; } sub restart_test_server From 6087d952b31fce56642e1c63cfed243aeb4d09bd Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Tue, 28 Jul 2015 21:39:32 +0200 Subject: [PATCH 115/442] Remove ssl renegotiation support. While postgres' use of SSL renegotiation is a good idea in theory, it turned out to not work well in practice. The specification and openssl's implementation of it have lead to several security issues. Postgres' use of renegotiation also had its share of bugs. Additionally OpenSSL has a bunch of bugs around renegotiation, reported and open for years, that regularly lead to connections breaking with obscure error messages. We tried increasingly complex workarounds to get around these bugs, but we didn't find anything complete. Since these connection breakages often lead to hard to debug problems, e.g. spuriously failing base backups and significant latency spikes when synchronous replication is used, we have decided to change the default setting for ssl renegotiation to 0 (disabled) in the released backbranches and remove it entirely in 9.5 and master. Author: Andres Freund Discussion: 20150624144148.GQ4797@alap3.anarazel.de Backpatch: 9.5 and master, 9.0-9.4 get a different patch --- doc/src/sgml/config.sgml | 29 -------- src/backend/libpq/be-secure-openssl.c | 70 +------------------ src/backend/libpq/be-secure.c | 7 -- src/backend/utils/misc/guc.c | 11 --- src/backend/utils/misc/postgresql.conf.sample | 1 - src/include/libpq/libpq-be.h | 5 -- 6 files changed, 2 insertions(+), 121 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index bbe1eb0d19a68..e900dccb11cfa 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -1034,35 +1034,6 @@ include_dir 'conf.d' - - ssl_renegotiation_limit (integer) - - ssl_renegotiation_limit configuration parameter - - - - - Specifies how much data can flow over an SSL-encrypted - connection before renegotiation of the session keys will take - place. Renegotiation decreases an attacker's chances of doing - cryptanalysis when large amounts of traffic can be examined, but it - also carries a large performance penalty. The sum of sent and received - traffic is used to check the limit. If this parameter is set to 0, - renegotiation is disabled. The default is 512MB. - - - - SSL libraries from before November 2009 are insecure when using SSL - renegotiation, due to a vulnerability in the SSL protocol. As a - stop-gap fix for this vulnerability, some vendors shipped SSL - libraries incapable of doing renegotiation. If any such libraries - are in use on the client or server, SSL renegotiation should be - disabled. - - - - - ssl_ciphers (string) diff --git a/src/backend/libpq/be-secure-openssl.c b/src/backend/libpq/be-secure-openssl.c index f0774fe8c9529..e9bc282d2e720 100644 --- a/src/backend/libpq/be-secure-openssl.c +++ b/src/backend/libpq/be-secure-openssl.c @@ -16,12 +16,8 @@ * backend can restart automatically, it is important that * we select an algorithm that continues to provide confidentiality * even if the attacker has the server's private key. Ephemeral - * DH (EDH) keys provide this, and in fact provide Perfect Forward - * Secrecy (PFS) except for situations where the session can - * be hijacked during a periodic handshake/renegotiation. - * Even that backdoor can be closed if client certificates - * are used (since the imposter will be unable to successfully - * complete renegotiation). + * DH (EDH) keys provide this and more (Perfect Forward Secrecy + * aka PFS). * * N.B., the static private key should still be protected to * the largest extent possible, to minimize the risk of @@ -37,12 +33,6 @@ * session. In this case you'll need to temporarily disable * EDH by commenting out the callback. * - * ... - * - * Because the risk of cryptanalysis increases as large - * amounts of data are sent with the same session key, the - * session keys are periodically renegotiated. - * *------------------------------------------------------------------------- */ @@ -92,9 +82,6 @@ static const char *SSLerrmessage(void); static char *X509_NAME_to_cstring(X509_NAME *name); -/* are we in the middle of a renegotiation? */ -static bool in_ssl_renegotiation = false; - static SSL_CTX *SSL_context = NULL; /* ------------------------------------------------------------ */ @@ -570,37 +557,6 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor) ssize_t n; int err; - /* - * If SSL renegotiations are enabled and we're getting close to the limit, - * start one now; but avoid it if there's one already in progress. - * Request the renegotiation 1kB before the limit has actually expired. - */ - if (ssl_renegotiation_limit && !in_ssl_renegotiation && - port->count > (ssl_renegotiation_limit - 1) * 1024L) - { - in_ssl_renegotiation = true; - - /* - * The way we determine that a renegotiation has completed is by - * observing OpenSSL's internal renegotiation counter. Make sure we - * start out at zero, and assume that the renegotiation is complete - * when the counter advances. - * - * OpenSSL provides SSL_renegotiation_pending(), but this doesn't seem - * to work in testing. - */ - SSL_clear_num_renegotiations(port->ssl); - - /* without this, renegotiation fails when a client cert is used */ - SSL_set_session_id_context(port->ssl, (void *) &SSL_context, - sizeof(SSL_context)); - - if (SSL_renegotiate(port->ssl) <= 0) - ereport(COMMERROR, - (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("SSL failure during renegotiation start"))); - } - errno = 0; n = SSL_write(port->ssl, ptr, len); err = SSL_get_error(port->ssl, n); @@ -646,28 +602,6 @@ be_tls_write(Port *port, void *ptr, size_t len, int *waitfor) break; } - if (n >= 0) - { - /* is renegotiation complete? */ - if (in_ssl_renegotiation && - SSL_num_renegotiations(port->ssl) >= 1) - { - in_ssl_renegotiation = false; - port->count = 0; - } - - /* - * if renegotiation is still ongoing, and we've gone beyond the limit, - * kill the connection now -- continuing to use it can be considered a - * security problem. - */ - if (in_ssl_renegotiation && - port->count > ssl_renegotiation_limit * 1024L) - ereport(FATAL, - (errcode(ERRCODE_PROTOCOL_VIOLATION), - errmsg("SSL failed to renegotiate connection before limit expired"))); - } - return n; } diff --git a/src/backend/libpq/be-secure.c b/src/backend/libpq/be-secure.c index 4a650cc001254..26d8faaf773a8 100644 --- a/src/backend/libpq/be-secure.c +++ b/src/backend/libpq/be-secure.c @@ -43,13 +43,6 @@ char *ssl_key_file; char *ssl_ca_file; char *ssl_crl_file; -/* - * How much data can be sent across a secure connection - * (total in both directions) before we require renegotiation. - * Set to 0 to disable renegotiation completely. - */ -int ssl_renegotiation_limit; - #ifdef USE_SSL bool ssl_loaded_verify_locations = false; #endif diff --git a/src/backend/utils/misc/guc.c b/src/backend/utils/misc/guc.c index 34c23f9560d32..c907ef1e41ca4 100644 --- a/src/backend/utils/misc/guc.c +++ b/src/backend/utils/misc/guc.c @@ -2577,17 +2577,6 @@ static struct config_int ConfigureNamesInt[] = NULL, assign_tcp_keepalives_interval, show_tcp_keepalives_interval }, - { - {"ssl_renegotiation_limit", PGC_USERSET, CONN_AUTH_SECURITY, - gettext_noop("Set the amount of traffic to send and receive before renegotiating the encryption keys."), - NULL, - GUC_UNIT_KB, - }, - &ssl_renegotiation_limit, - 512 * 1024, 0, MAX_KILOBYTES, - NULL, NULL, NULL - }, - { {"tcp_keepalives_count", PGC_USERSET, CLIENT_CONN_OTHER, gettext_noop("Maximum number of TCP keepalive retransmits."), diff --git a/src/backend/utils/misc/postgresql.conf.sample b/src/backend/utils/misc/postgresql.conf.sample index 06dfc067b03b3..e5d275df2155a 100644 --- a/src/backend/utils/misc/postgresql.conf.sample +++ b/src/backend/utils/misc/postgresql.conf.sample @@ -83,7 +83,6 @@ # (change requires restart) #ssl_prefer_server_ciphers = on # (change requires restart) #ssl_ecdh_curve = 'prime256v1' # (change requires restart) -#ssl_renegotiation_limit = 512MB # amount of data between renegotiations #ssl_cert_file = 'server.crt' # (change requires restart) #ssl_key_file = 'server.key' # (change requires restart) #ssl_ca_file = '' # (change requires restart) diff --git a/src/include/libpq/libpq-be.h b/src/include/libpq/libpq-be.h index 6171ef3a1ffca..caaa8b5f890f9 100644 --- a/src/include/libpq/libpq-be.h +++ b/src/include/libpq/libpq-be.h @@ -92,11 +92,6 @@ typedef struct } pg_gssinfo; #endif -/* - * SSL renegotiations - */ -extern int ssl_renegotiation_limit; - /* * This is used by the postmaster in its communication with frontends. It * contains all state information needed during this communication before the From cfa928ff6f944ac101802718f64db942060187b1 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Tue, 28 Jul 2015 13:21:37 -0700 Subject: [PATCH 116/442] Plug RLS related information leak in pg_stats view. The pg_stats view is supposed to be restricted to only show rows about tables the user can read. However, it sometimes can leak information which could not otherwise be seen when row level security is enabled. Fix that by not showing pg_stats rows to users that would be subject to RLS on the table the row is related to. This is done by creating/using the newly introduced SQL visible function, row_security_active(). Along the way, clean up three call sites of check_enable_rls(). The second argument of that function should only be specified as other than InvalidOid when we are checking as a different user than the current one, as in when querying through a view. These sites were passing GetUserId() instead of InvalidOid, which can cause the function to return incorrect results if the current user has the BYPASSRLS privilege and row_security has been set to OFF. Additionally fix a bug causing RI Trigger error messages to unintentionally leak information when RLS is enabled, and other minor cleanup and improvements. Also add WITH (security_barrier) to the definition of pg_stats. Bumped CATVERSION due to new SQL functions and pg_stats view definition. Back-patch to 9.5 where RLS was introduced. Reported by Yaroslav. Patch by Joe Conway and Dean Rasheed with review and input by Michael Paquier and Stephen Frost. --- doc/src/sgml/func.sgml | 16 +++++++ src/backend/access/index/genam.c | 2 +- src/backend/catalog/system_views.sql | 6 ++- src/backend/executor/execMain.c | 2 +- src/backend/rewrite/rowsecurity.c | 16 ++----- src/backend/utils/adt/ri_triggers.c | 4 +- src/backend/utils/cache/plancache.c | 7 +-- src/backend/utils/init/miscinit.c | 14 +++++- src/backend/utils/misc/rls.c | 53 ++++++++++++++++++++++- src/include/catalog/catversion.h | 2 +- src/include/catalog/pg_proc.h | 6 +++ src/include/miscadmin.h | 1 + src/include/utils/builtins.h | 4 ++ src/test/regress/expected/rowsecurity.out | 38 +++++++++++++++- src/test/regress/expected/rules.out | 2 +- src/test/regress/sql/rowsecurity.sql | 17 +++++++- 16 files changed, 159 insertions(+), 31 deletions(-) diff --git a/doc/src/sgml/func.sgml b/doc/src/sgml/func.sgml index ef50fa581135b..17aa1d77c9f07 100644 --- a/doc/src/sgml/func.sgml +++ b/doc/src/sgml/func.sgml @@ -15244,6 +15244,12 @@ SET search_path TO schema , schema, .. boolean does current user have privilege for role + + row_security_active(table) + + boolean + does current user have row level security active for table +
@@ -15284,6 +15290,9 @@ SET search_path TO schema , schema, .. pg_has_role + + row_security_active + has_table_privilege checks whether a user @@ -15447,6 +15456,13 @@ SELECT has_function_privilege('joeuser', 'myfunc(int, text)', 'execute'); are immediately available without doing SET ROLE. + + row_security_active checks whether row level + security is active for the specified table in the context of the + current_user and environment. The table can + be specified by name or by OID. + + shows functions that determine whether a certain object is visible in the diff --git a/src/backend/access/index/genam.c b/src/backend/access/index/genam.c index 1043362f914e2..aa5b28c61a07c 100644 --- a/src/backend/access/index/genam.c +++ b/src/backend/access/index/genam.c @@ -204,7 +204,7 @@ BuildIndexValueDescription(Relation indexRelation, Assert(indexrelid == idxrec->indexrelid); /* RLS check- if RLS is enabled then we don't return anything. */ - if (check_enable_rls(indrelid, GetUserId(), true) == RLS_ENABLED) + if (check_enable_rls(indrelid, InvalidOid, true) == RLS_ENABLED) { ReleaseSysCache(ht_idx); return NULL; diff --git a/src/backend/catalog/system_views.sql b/src/backend/catalog/system_views.sql index e82a53aee9364..c0bd6fa96b750 100644 --- a/src/backend/catalog/system_views.sql +++ b/src/backend/catalog/system_views.sql @@ -150,7 +150,7 @@ CREATE VIEW pg_indexes AS LEFT JOIN pg_tablespace T ON (T.oid = I.reltablespace) WHERE C.relkind IN ('r', 'm') AND I.relkind = 'i'; -CREATE VIEW pg_stats AS +CREATE VIEW pg_stats WITH (security_barrier) AS SELECT nspname AS schemaname, relname AS tablename, @@ -211,7 +211,9 @@ CREATE VIEW pg_stats AS FROM pg_statistic s JOIN pg_class c ON (c.oid = s.starelid) JOIN pg_attribute a ON (c.oid = attrelid AND attnum = s.staattnum) LEFT JOIN pg_namespace n ON (n.oid = c.relnamespace) - WHERE NOT attisdropped AND has_column_privilege(c.oid, a.attnum, 'select'); + WHERE NOT attisdropped + AND has_column_privilege(c.oid, a.attnum, 'select') + AND (c.relrowsecurity = false OR NOT row_security_active(c.oid)); REVOKE ALL on pg_statistic FROM public; diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index a1561ce0cc0ff..2c65a901d945d 100644 --- a/src/backend/executor/execMain.c +++ b/src/backend/executor/execMain.c @@ -1874,7 +1874,7 @@ ExecBuildSlotValueDescription(Oid reloid, * then don't return anything. Otherwise, go through normal permission * checks. */ - if (check_enable_rls(reloid, GetUserId(), true) == RLS_ENABLED) + if (check_enable_rls(reloid, InvalidOid, true) == RLS_ENABLED) return NULL; initStringInfo(&buf); diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index aaf0061164b29..2386cf016fbc5 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -107,7 +107,6 @@ get_row_security_policies(Query *root, CmdType commandType, RangeTblEntry *rte, Relation rel; Oid user_id; - int sec_context; int rls_status; bool defaultDeny = false; @@ -117,22 +116,13 @@ get_row_security_policies(Query *root, CmdType commandType, RangeTblEntry *rte, *hasRowSecurity = false; *hasSubLinks = false; - /* This is just to get the security context */ - GetUserIdAndSecContext(&user_id, &sec_context); + /* If this is not a normal relation, just return immediately */ + if (rte->relkind != RELKIND_RELATION) + return; /* Switch to checkAsUser if it's set */ user_id = rte->checkAsUser ? rte->checkAsUser : GetUserId(); - /* - * If this is not a normal relation, or we have been told to explicitly - * skip RLS (perhaps because this is an FK check) then just return - * immediately. - */ - if (rte->relid < FirstNormalObjectId - || rte->relkind != RELKIND_RELATION - || (sec_context & SECURITY_ROW_LEVEL_DISABLED)) - return; - /* Determine the state of RLS for this, pass checkAsUser explicitly */ rls_status = check_enable_rls(rte->relid, rte->checkAsUser, false); diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index 88dd3faf2d9a0..61edde9c5d35a 100644 --- a/src/backend/utils/adt/ri_triggers.c +++ b/src/backend/utils/adt/ri_triggers.c @@ -3243,7 +3243,7 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo, * privileges. */ - if (check_enable_rls(rel_oid, GetUserId(), true) != RLS_ENABLED) + if (check_enable_rls(rel_oid, InvalidOid, true) != RLS_ENABLED) { aclresult = pg_class_aclcheck(rel_oid, GetUserId(), ACL_SELECT); if (aclresult != ACLCHECK_OK) @@ -3264,6 +3264,8 @@ ri_ReportViolation(const RI_ConstraintInfo *riinfo, } } } + else + has_perm = false; if (has_perm) { diff --git a/src/backend/utils/cache/plancache.c b/src/backend/utils/cache/plancache.c index e6808e7576359..525794fb64450 100644 --- a/src/backend/utils/cache/plancache.c +++ b/src/backend/utils/cache/plancache.c @@ -153,8 +153,6 @@ CreateCachedPlan(Node *raw_parse_tree, CachedPlanSource *plansource; MemoryContext source_context; MemoryContext oldcxt; - Oid user_id; - int security_context; Assert(query_string != NULL); /* required as of 8.4 */ @@ -177,8 +175,6 @@ CreateCachedPlan(Node *raw_parse_tree, */ oldcxt = MemoryContextSwitchTo(source_context); - GetUserIdAndSecContext(&user_id, &security_context); - plansource = (CachedPlanSource *) palloc0(sizeof(CachedPlanSource)); plansource->magic = CACHEDPLANSOURCE_MAGIC; plansource->raw_parse_tree = copyObject(raw_parse_tree); @@ -208,8 +204,7 @@ CreateCachedPlan(Node *raw_parse_tree, plansource->total_custom_cost = 0; plansource->num_custom_plans = 0; plansource->hasRowSecurity = false; - plansource->rowSecurityDisabled - = (security_context & SECURITY_ROW_LEVEL_DISABLED) != 0; + plansource->rowSecurityDisabled = InRowLevelSecurityDisabled(); plansource->row_security_env = row_security; plansource->planUserId = InvalidOid; diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index acc4752015b32..ac3e764e8b8c2 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -341,7 +341,7 @@ GetAuthenticatedUserId(void) * GetUserIdAndSecContext/SetUserIdAndSecContext - get/set the current user ID * and the SecurityRestrictionContext flags. * - * Currently there are two valid bits in SecurityRestrictionContext: + * Currently there are three valid bits in SecurityRestrictionContext: * * SECURITY_LOCAL_USERID_CHANGE indicates that we are inside an operation * that is temporarily changing CurrentUserId via these functions. This is @@ -359,6 +359,9 @@ GetAuthenticatedUserId(void) * where the called functions are really supposed to be side-effect-free * anyway, such as VACUUM/ANALYZE/REINDEX. * + * SECURITY_ROW_LEVEL_DISABLED indicates that we are inside an operation that + * needs to bypass row level security checks, for example FK checks. + * * Unlike GetUserId, GetUserIdAndSecContext does *not* Assert that the current * value of CurrentUserId is valid; nor does SetUserIdAndSecContext require * the new value to be valid. In fact, these routines had better not @@ -401,6 +404,15 @@ InSecurityRestrictedOperation(void) return (SecurityRestrictionContext & SECURITY_RESTRICTED_OPERATION) != 0; } +/* + * InRowLevelSecurityDisabled - are we inside a RLS-disabled operation? + */ +bool +InRowLevelSecurityDisabled(void) +{ + return (SecurityRestrictionContext & SECURITY_ROW_LEVEL_DISABLED) != 0; +} + /* * These are obsolete versions of Get/SetUserIdAndSecContext that are diff --git a/src/backend/utils/misc/rls.c b/src/backend/utils/misc/rls.c index 44cb3743034a1..7b8d51d956f29 100644 --- a/src/backend/utils/misc/rls.c +++ b/src/backend/utils/misc/rls.c @@ -16,9 +16,12 @@ #include "access/htup.h" #include "access/htup_details.h" +#include "access/transam.h" #include "catalog/pg_class.h" +#include "catalog/namespace.h" #include "miscadmin.h" #include "utils/acl.h" +#include "utils/builtins.h" #include "utils/elog.h" #include "utils/rls.h" #include "utils/syscache.h" @@ -37,7 +40,10 @@ extern int check_enable_rls(Oid relid, Oid checkAsUser, bool noError); * for the table and the plan cache needs to be invalidated if the environment * changes. * - * Handle checking as another role via checkAsUser (for views, etc). + * Handle checking as another role via checkAsUser (for views, etc). Note that + * if *not* checking as another role, the caller should pass InvalidOid rather + * than GetUserId(). Otherwise the check for row_security = OFF is skipped, and + * so we may falsely report that RLS is active when the user has bypassed it. * * If noError is set to 'true' then we just return RLS_ENABLED instead of doing * an ereport() if the user has attempted to bypass RLS and they are not @@ -53,6 +59,17 @@ check_enable_rls(Oid relid, Oid checkAsUser, bool noError) bool relrowsecurity; Oid user_id = checkAsUser ? checkAsUser : GetUserId(); + /* Nothing to do for built-in relations */ + if (relid < FirstNormalObjectId) + return RLS_NONE; + + /* + * Check if we have been told to explicitly skip RLS (perhaps because this + * is a foreign key check) + */ + if (InRowLevelSecurityDisabled()) + return RLS_NONE; + tuple = SearchSysCache1(RELOID, ObjectIdGetDatum(relid)); if (!HeapTupleIsValid(tuple)) return RLS_NONE; @@ -111,3 +128,37 @@ check_enable_rls(Oid relid, Oid checkAsUser, bool noError) /* RLS should be fully enabled for this relation. */ return RLS_ENABLED; } + +/* + * row_security_active + * + * check_enable_rls wrapped as a SQL callable function except + * RLS_NONE_ENV and RLS_NONE are the same for this purpose. + */ +Datum +row_security_active(PG_FUNCTION_ARGS) +{ + /* By OID */ + Oid tableoid = PG_GETARG_OID(0); + int rls_status; + + rls_status = check_enable_rls(tableoid, InvalidOid, true); + PG_RETURN_BOOL(rls_status == RLS_ENABLED); +} + +Datum +row_security_active_name(PG_FUNCTION_ARGS) +{ + /* By qualified name */ + text *tablename = PG_GETARG_TEXT_P(0); + RangeVar *tablerel; + Oid tableoid; + int rls_status; + + /* Look up table name. Can't lock it - we might not have privileges. */ + tablerel = makeRangeVarFromNameList(textToQualifiedNameList(tablename)); + tableoid = RangeVarGetRelid(tablerel, NoLock, false); + + rls_status = check_enable_rls(tableoid, InvalidOid, true); + PG_RETURN_BOOL(rls_status == RLS_ENABLED); +} diff --git a/src/include/catalog/catversion.h b/src/include/catalog/catversion.h index 349dd2531028a..0bc1ee222a3db 100644 --- a/src/include/catalog/catversion.h +++ b/src/include/catalog/catversion.h @@ -53,6 +53,6 @@ */ /* yyyymmddN */ -#define CATALOG_VERSION_NO 201507251 +#define CATALOG_VERSION_NO 201507281 #endif diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index be55666dd076b..9a27399c5128e 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -5337,6 +5337,12 @@ DESCR("get progress for all replication origins"); #define PROVOLATILE_STABLE 's' /* does not change within a scan */ #define PROVOLATILE_VOLATILE 'v' /* can change even within a scan */ +/* rls */ +DATA(insert OID = 3298 ( row_security_active PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "26" _null_ _null_ _null_ _null_ _null_ row_security_active _null_ _null_ _null_ )); +DESCR("row security for current context active on table by table oid"); +DATA(insert OID = 3299 ( row_security_active PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 16 "25" _null_ _null_ _null_ _null_ _null_ row_security_active_name _null_ _null_ _null_ )); +DESCR("row security for current context active on table by table name"); + /* * Symbolic values for proargmodes column. Note that these must agree with * the FunctionParameterMode enum in parsenodes.h; we declare them here to diff --git a/src/include/miscadmin.h b/src/include/miscadmin.h index b5391673609b7..e0cc69f27ef1e 100644 --- a/src/include/miscadmin.h +++ b/src/include/miscadmin.h @@ -305,6 +305,7 @@ extern void GetUserIdAndSecContext(Oid *userid, int *sec_context); extern void SetUserIdAndSecContext(Oid userid, int sec_context); extern bool InLocalUserIdChange(void); extern bool InSecurityRestrictedOperation(void); +extern bool InRowLevelSecurityDisabled(void); extern void GetUserIdAndContext(Oid *userid, bool *sec_def_context); extern void SetUserIdAndContext(Oid userid, bool sec_def_context); extern void InitializeSessionUserId(const char *rolename, Oid useroid); diff --git a/src/include/utils/builtins.h b/src/include/utils/builtins.h index 07caf22f962f4..95f2a848d39d3 100644 --- a/src/include/utils/builtins.h +++ b/src/include/utils/builtins.h @@ -1120,6 +1120,10 @@ extern Datum set_config_by_name(PG_FUNCTION_ARGS); extern Datum show_all_settings(PG_FUNCTION_ARGS); extern Datum show_all_file_settings(PG_FUNCTION_ARGS); +/* rls.c */ +extern Datum row_security_active(PG_FUNCTION_ARGS); +extern Datum row_security_active_name(PG_FUNCTION_ARGS); + /* lockfuncs.c */ extern Datum pg_lock_status(PG_FUNCTION_ARGS); extern Datum pg_advisory_lock_int8(PG_FUNCTION_ARGS); diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 72361e82a5f23..fd8e180f8a8e8 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -307,7 +307,7 @@ SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid; DELETE FROM category WHERE cid = 33; -- fails with FK violation ERROR: update or delete on table "category" violates foreign key constraint "document_cid_fkey" on table "document" -DETAIL: Key (cid)=(33) is still referenced from table "document". +DETAIL: Key is still referenced from table "document". -- can insert FK referencing invisible PK SET SESSION AUTHORIZATION rls_regress_user2; SELECT * FROM document d FULL OUTER JOIN category c on d.cid = c.cid; @@ -2886,11 +2886,45 @@ SELECT * FROM current_check; (1 row) COMMIT; +-- +-- check pg_stats view filtering +-- +SET row_security TO ON; +SET SESSION AUTHORIZATION rls_regress_user0; +ANALYZE current_check; +-- Stats visible +SELECT row_security_active('current_check'); + row_security_active +--------------------- + f +(1 row) + +SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; + most_common_vals +--------------------- + + + {rls_regress_user1} +(3 rows) + +SET SESSION AUTHORIZATION rls_regress_user1; +-- Stats not visible +SELECT row_security_active('current_check'); + row_security_active +--------------------- + t +(1 row) + +SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; + most_common_vals +------------------ +(0 rows) + -- -- Collation support -- BEGIN; -SET row_security = force; +SET row_security TO FORCE; CREATE TABLE coll_t (c) AS VALUES ('bar'::text); CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; diff --git a/src/test/regress/expected/rules.out b/src/test/regress/expected/rules.out index 1e5b0b9a2c43a..6206c819cd872 100644 --- a/src/test/regress/expected/rules.out +++ b/src/test/regress/expected/rules.out @@ -2061,7 +2061,7 @@ pg_stats| SELECT n.nspname AS schemaname, JOIN pg_class c ON ((c.oid = s.starelid))) JOIN pg_attribute a ON (((c.oid = a.attrelid) AND (a.attnum = s.staattnum)))) LEFT JOIN pg_namespace n ON ((n.oid = c.relnamespace))) - WHERE ((NOT a.attisdropped) AND has_column_privilege(c.oid, a.attnum, 'select'::text)); + WHERE ((NOT a.attisdropped) AND has_column_privilege(c.oid, a.attnum, 'select'::text) AND ((c.relrowsecurity = false) OR (NOT row_security_active(c.oid)))); pg_tables| SELECT n.nspname AS schemaname, c.relname AS tablename, pg_get_userbyid(c.relowner) AS tableowner, diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index f588fa2337738..32f10d8649f1e 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1189,11 +1189,26 @@ SELECT * FROM current_check; COMMIT; +-- +-- check pg_stats view filtering +-- +SET row_security TO ON; +SET SESSION AUTHORIZATION rls_regress_user0; +ANALYZE current_check; +-- Stats visible +SELECT row_security_active('current_check'); +SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; + +SET SESSION AUTHORIZATION rls_regress_user1; +-- Stats not visible +SELECT row_security_active('current_check'); +SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; + -- -- Collation support -- BEGIN; -SET row_security = force; +SET row_security TO FORCE; CREATE TABLE coll_t (c) AS VALUES ('bar'::text); CREATE POLICY coll_p ON coll_t USING (c < ('foo'::text COLLATE "C")); ALTER TABLE coll_t ENABLE ROW LEVEL SECURITY; From 116be6c17102a4b300037d3c565694cac0bcba90 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 28 Jul 2015 17:34:00 -0400 Subject: [PATCH 117/442] Reduce chatter from signaling of autovacuum workers. Don't print a WARNING if we get ESRCH from a kill() that's attempting to cancel an autovacuum worker. It's possible (and has been seen in the buildfarm) that the worker is already gone by the time we are able to execute the kill, in which case the failure is harmless. About the only plausible reason for reporting such cases would be to help debug corrupted lock table contents, but this is hardly likely to be the most important symptom if that happens. Moreover issuing a WARNING might scare users more than is warranted. Also, since sending a signal to an autovacuum worker is now entirely a routine thing, and the worker will log the query cancel on its end anyway, reduce the message saying we're doing that from LOG to DEBUG1 level. Very minor cosmetic cleanup as well. Since the main practical reason for doing this is to avoid unnecessary buildfarm failures, back-patch to all active branches. --- src/backend/storage/lmgr/proc.c | 28 +++++++++++++++++++--------- 1 file changed, 19 insertions(+), 9 deletions(-) diff --git a/src/backend/storage/lmgr/proc.c b/src/backend/storage/lmgr/proc.c index 455ad2663402f..a82d473e202cc 100644 --- a/src/backend/storage/lmgr/proc.c +++ b/src/backend/storage/lmgr/proc.c @@ -1170,22 +1170,32 @@ ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable) /* release lock as quickly as possible */ LWLockRelease(ProcArrayLock); - ereport(LOG, + /* send the autovacuum worker Back to Old Kent Road */ + ereport(DEBUG1, (errmsg("sending cancel to blocking autovacuum PID %d", pid), errdetail_log("%s", logbuf.data))); - pfree(logbuf.data); - pfree(locktagbuf.data); - - /* send the autovacuum worker Back to Old Kent Road */ if (kill(pid, SIGINT) < 0) { - /* Just a warning to allow multiple callers */ - ereport(WARNING, - (errmsg("could not send signal to process %d: %m", - pid))); + /* + * There's a race condition here: once we release the + * ProcArrayLock, it's possible for the autovac worker to + * close up shop and exit before we can do the kill(). + * Therefore, we do not whinge about no-such-process. + * Other errors such as EPERM could conceivably happen if + * the kernel recycles the PID fast enough, but such cases + * seem improbable enough that it's probably best to issue + * a warning if we see some other errno. + */ + if (errno != ESRCH) + ereport(WARNING, + (errmsg("could not send signal to process %d: %m", + pid))); } + + pfree(logbuf.data); + pfree(locktagbuf.data); } else LWLockRelease(ProcArrayLock); From 40a50a17b905dae233ddb8bb36b7deff9e3abb16 Mon Sep 17 00:00:00 2001 From: Andrew Dunstan Date: Tue, 28 Jul 2015 17:54:13 -0400 Subject: [PATCH 118/442] Only adjust negative indexes in json_get up to the length of the path. The previous code resulted in memory access beyond the path bounds. The cure is to move it into a code branch that checks the value of lex_level is within the correct bounds. Bug reported and diagnosed by Piotr Stefaniak. --- src/backend/utils/adt/jsonfuncs.c | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/src/backend/utils/adt/jsonfuncs.c b/src/backend/utils/adt/jsonfuncs.c index 17e787b60a2ed..3b8d42e4d51b7 100644 --- a/src/backend/utils/adt/jsonfuncs.c +++ b/src/backend/utils/adt/jsonfuncs.c @@ -977,27 +977,27 @@ get_array_start(void *state) { /* Initialize counting of elements in this array */ _state->array_cur_index[lex_level] = -1; + + /* INT_MIN value is reserved to represent invalid subscript */ + if (_state->path_indexes[lex_level] < 0 && + _state->path_indexes[lex_level] != INT_MIN) + { + /* Negative subscript -- convert to positive-wise subscript */ + int nelements = json_count_array_elements(_state->lex); + + if (-_state->path_indexes[lex_level] <= nelements) + _state->path_indexes[lex_level] += nelements; + } } else if (lex_level == 0 && _state->npath == 0) { /* * Special case: we should match the entire array. We only need this - * at outermost level because at nested levels the match will have - * been started by the outer field or array element callback. + * at the outermost level because at nested levels the match will + * have been started by the outer field or array element callback. */ _state->result_start = _state->lex->token_start; } - - /* INT_MIN value is reserved to represent invalid subscript */ - if (_state->path_indexes[lex_level] < 0 && - _state->path_indexes[lex_level] != INT_MIN) - { - /* Negative subscript -- convert to positive-wise subscript */ - int nelements = json_count_array_elements(_state->lex); - - if (-_state->path_indexes[lex_level] <= nelements) - _state->path_indexes[lex_level] += nelements; - } } static void From 28b11bd1069ed35f45125b4057780cc55b9d716a Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 28 Jul 2015 18:42:59 -0400 Subject: [PATCH 119/442] Update our documentation concerning where to create data directories. Although initdb has long discouraged use of a filesystem mount-point directory as a PG data directory, this point was covered nowhere in the user-facing documentation. Also, with the popularity of pg_upgrade, we really need to recommend that the PG user own not only the data directory but its parent directory too. (Without a writable parent directory, operations such as "mv data data.old" fail immediately. pg_upgrade itself doesn't do that, but wrapper scripts for it often do.) Hence, adjust the "Creating a Database Cluster" section to address these points. I also took the liberty of wordsmithing the discussion of NFS a bit. These considerations aren't by any means new, so back-patch to all supported branches. --- doc/src/sgml/runtime.sgml | 79 ++++++++++++++++++++++++++++----------- 1 file changed, 57 insertions(+), 22 deletions(-) diff --git a/doc/src/sgml/runtime.sgml b/doc/src/sgml/runtime.sgml index 547567e9ca458..6d5b1082d2dbb 100644 --- a/doc/src/sgml/runtime.sgml +++ b/doc/src/sgml/runtime.sgml @@ -49,7 +49,7 @@ Before you can do anything, you must initialize a database storage area on disk. We call this a database cluster. - (SQL uses the term catalog cluster.) A + (The SQL standard uses the term catalog cluster.) A database cluster is a collection of databases that is managed by a single instance of a running database server. After initialization, a database cluster will contain a database named postgres, @@ -65,7 +65,7 @@ - In file system terms, a database cluster will be a single directory + In file system terms, a database cluster is a single directory under which all data will be stored. We call this the data directory or data area. It is completely up to you where you choose to store your data. There is no @@ -109,15 +109,18 @@ initdb will attempt to create the directory you - specify if it does not already exist. It is likely that it will not - have the permission to do so (if you followed our advice and created - an unprivileged account). In that case you should create the - directory yourself (as root) and change the owner to be the - PostgreSQL user. Here is how this might - be done: + specify if it does not already exist. Of course, this will fail if + initdb does not have permissions to write in the + parent directory. It's generally recommendable that the + PostgreSQL user own not just the data + directory but its parent directory as well, so that this should not + be a problem. If the desired parent directory doesn't exist either, + you will need to create it first, using root privileges if the + grandparent directory isn't writable. So the process might look + like this: -root# mkdir /usr/local/pgsql/data -root# chown postgres /usr/local/pgsql/data +root# mkdir /usr/local/pgsql +root# chown postgres /usr/local/pgsql root# su postgres postgres$ initdb -D /usr/local/pgsql/data @@ -125,7 +128,9 @@ postgres$ initdb -D /usr/local/pgsql/data initdb will refuse to run if the data directory - looks like it has already been initialized. + exists and already contains files; this is to prevent accidentally + overwriting an existing installation. + Because the data directory contains all the data stored in the @@ -178,8 +183,30 @@ postgres$ initdb -D /usr/local/pgsql/data locale setting. For details see . + + Use of Secondary File Systems + + + file system mount points + + + + Many installations create their database clusters on file systems + (volumes) other than the machine's root volume. If you + choose to do this, it is not advisable to try to use the secondary + volume's topmost directory (mount point) as the data directory. + Best practice is to create a directory within the mount-point + directory that is owned by the PostgreSQL + user, and then create the data directory within that. This avoids + permissions problems, particularly for operations such + as pg_upgrade, and it also ensures clean failures if + the secondary volume is taken offline. + + + + - Network File Systems + Use of Network File Systems Network File Systems @@ -188,22 +215,30 @@ postgres$ initdb -D /usr/local/pgsql/data Network Attached Storage (NAS)Network File Systems - Many installations create database clusters on network file systems. - Sometimes this is done directly via NFS, or by using a + Many installations create their database clusters on network file + systems. Sometimes this is done via NFS, or by using a Network Attached Storage (NAS) device that uses NFS internally. PostgreSQL does nothing special for NFS file systems, meaning it assumes - NFS behaves exactly like locally-connected drives - (DAS, Direct Attached Storage). If client and server - NFS implementations have non-standard semantics, this can + NFS behaves exactly like locally-connected drives. + If the client or server NFS implementation does not + provide standard file system semantics, this can cause reliability problems (see ). Specifically, delayed (asynchronous) writes to the NFS - server can cause reliability problems; if possible, mount - NFS file systems synchronously (without caching) to avoid - this. Also, soft-mounting NFS is not recommended. - (Storage Area Networks (SAN) use a low-level - communication protocol rather than NFS.) + server can cause data corruption problems. If possible, mount the + NFS file system synchronously (without caching) to avoid + this hazard. Also, soft-mounting the NFS file system is + not recommended. + + + + Storage Area Networks (SAN) typically use communication + protocols other than NFS, and may or may not be subject + to hazards of this sort. It's advisable to consult the vendor's + documentation concerning data consistency guarantees. + PostgreSQL cannot be more reliable than + the file system it's using. From 992c9d345f6607c5b2cab2787f7cf72fba96673d Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Tue, 28 Jul 2015 16:01:56 -0700 Subject: [PATCH 120/442] Create a pg_shdepend entry for each role in TO clause of policies. CreatePolicy() and AlterPolicy() omit to create a pg_shdepend entry for each role in the TO clause. Fix this by creating a new shared dependency type called SHARED_DEPENDENCY_POLICY and assigning it to each role. Reported by Noah Misch. Patch by me, reviewed by Alvaro Herrera. Back-patch to 9.5 where RLS was introduced. --- doc/src/sgml/catalogs.sgml | 10 +++ src/backend/catalog/pg_shdepend.c | 2 + src/backend/commands/policy.c | 87 ++++++++++++++++------- src/include/catalog/dependency.h | 5 ++ src/test/regress/expected/rowsecurity.out | 55 ++++++++++++++ src/test/regress/sql/rowsecurity.sql | 44 ++++++++++++ 6 files changed, 177 insertions(+), 26 deletions(-) diff --git a/doc/src/sgml/catalogs.sgml b/doc/src/sgml/catalogs.sgml index 9096ee5d517de..7781c56f0eb28 100644 --- a/doc/src/sgml/catalogs.sgml +++ b/doc/src/sgml/catalogs.sgml @@ -5792,6 +5792,16 @@
+ + SHARED_DEPENDENCY_POLICY (r) + + + The referenced object (which must be a role) is mentioned as the + target of a dependent policy object. + + + + SHARED_DEPENDENCY_PIN (p) diff --git a/src/backend/catalog/pg_shdepend.c b/src/backend/catalog/pg_shdepend.c index 34fe4e2474585..43076c9c287d2 100644 --- a/src/backend/catalog/pg_shdepend.c +++ b/src/backend/catalog/pg_shdepend.c @@ -1083,6 +1083,8 @@ storeObjectDescription(StringInfo descs, appendStringInfo(descs, _("owner of %s"), objdesc); else if (deptype == SHARED_DEPENDENCY_ACL) appendStringInfo(descs, _("privileges for %s"), objdesc); + else if (deptype == SHARED_DEPENDENCY_POLICY) + appendStringInfo(descs, _("target of %s"), objdesc); else elog(ERROR, "unrecognized dependency type: %d", (int) deptype); diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 17b48d49596b1..9544f75032b4b 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -22,6 +22,7 @@ #include "catalog/indexing.h" #include "catalog/namespace.h" #include "catalog/objectaccess.h" +#include "catalog/pg_authid.h" #include "catalog/pg_policy.h" #include "catalog/pg_type.h" #include "commands/policy.h" @@ -48,7 +49,7 @@ static void RangeVarCallbackForPolicy(const RangeVar *rv, Oid relid, Oid oldrelid, void *arg); static char parse_policy_command(const char *cmd_name); -static ArrayType *policy_role_list_to_array(List *roles); +static Datum *policy_role_list_to_array(List *roles, int *num_roles); /* * Callback to RangeVarGetRelidExtended(). @@ -130,30 +131,28 @@ parse_policy_command(const char *cmd_name) /* * policy_role_list_to_array - * helper function to convert a list of RoleSpecs to an array of role ids. + * helper function to convert a list of RoleSpecs to an array of + * role id Datums. */ -static ArrayType * -policy_role_list_to_array(List *roles) +static Datum * +policy_role_list_to_array(List *roles, int *num_roles) { - ArrayType *role_ids; - Datum *temp_array; + Datum *role_oids; ListCell *cell; - int num_roles; int i = 0; /* Handle no roles being passed in as being for public */ if (roles == NIL) { - temp_array = (Datum *) palloc(sizeof(Datum)); - temp_array[0] = ObjectIdGetDatum(ACL_ID_PUBLIC); + *num_roles = 1; + role_oids = (Datum *) palloc(*num_roles * sizeof(Datum)); + role_oids[0] = ObjectIdGetDatum(ACL_ID_PUBLIC); - role_ids = construct_array(temp_array, 1, OIDOID, sizeof(Oid), true, - 'i'); - return role_ids; + return role_oids; } - num_roles = list_length(roles); - temp_array = (Datum *) palloc(num_roles * sizeof(Datum)); + *num_roles = list_length(roles); + role_oids = (Datum *) palloc(*num_roles * sizeof(Datum)); foreach(cell, roles) { @@ -164,24 +163,24 @@ policy_role_list_to_array(List *roles) */ if (spec->roletype == ROLESPEC_PUBLIC) { - if (num_roles != 1) + if (*num_roles != 1) + { ereport(WARNING, (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("ignoring roles specified other than public"), errhint("All roles are members of the public role."))); - temp_array[0] = ObjectIdGetDatum(ACL_ID_PUBLIC); - num_roles = 1; - break; + *num_roles = 1; + } + role_oids[0] = ObjectIdGetDatum(ACL_ID_PUBLIC); + + return role_oids; } else - temp_array[i++] = + role_oids[i++] = ObjectIdGetDatum(get_rolespec_oid((Node *) spec, false)); } - role_ids = construct_array(temp_array, num_roles, OIDOID, sizeof(Oid), true, - 'i'); - - return role_ids; + return role_oids; } /* @@ -463,6 +462,8 @@ CreatePolicy(CreatePolicyStmt *stmt) Relation target_table; Oid table_id; char polcmd; + Datum *role_oids; + int nitems = 0; ArrayType *role_ids; ParseState *qual_pstate; ParseState *with_check_pstate; @@ -476,6 +477,7 @@ CreatePolicy(CreatePolicyStmt *stmt) bool isnull[Natts_pg_policy]; ObjectAddress target; ObjectAddress myself; + int i; /* Parse command */ polcmd = parse_policy_command(stmt->cmd); @@ -498,9 +500,10 @@ CreatePolicy(CreatePolicyStmt *stmt) (errcode(ERRCODE_SYNTAX_ERROR), errmsg("only WITH CHECK expression allowed for INSERT"))); - /* Collect role ids */ - role_ids = policy_role_list_to_array(stmt->roles); + role_oids = policy_role_list_to_array(stmt->roles, &nitems); + role_ids = construct_array(role_oids, nitems, OIDOID, + sizeof(Oid), true, 'i'); /* Parse the supplied clause */ qual_pstate = make_parsestate(NULL); @@ -614,6 +617,18 @@ CreatePolicy(CreatePolicyStmt *stmt) recordDependencyOnExpr(&myself, with_check_qual, with_check_pstate->p_rtable, DEPENDENCY_NORMAL); + /* Register role dependencies */ + target.classId = AuthIdRelationId; + target.objectSubId = 0; + for (i = 0; i < nitems; i++) + { + target.objectId = DatumGetObjectId(role_oids[i]); + /* no dependency if public */ + if (target.objectId != ACL_ID_PUBLIC) + recordSharedDependencyOn(&myself, &target, + SHARED_DEPENDENCY_POLICY); + } + /* Invalidate Relation Cache */ CacheInvalidateRelcache(target_table); @@ -641,6 +656,8 @@ AlterPolicy(AlterPolicyStmt *stmt) Oid policy_id; Relation target_table; Oid table_id; + Datum *role_oids; + int nitems = 0; ArrayType *role_ids = NULL; List *qual_parse_rtable = NIL; List *with_check_parse_rtable = NIL; @@ -658,10 +675,15 @@ AlterPolicy(AlterPolicyStmt *stmt) Datum cmd_datum; char polcmd; bool polcmd_isnull; + int i; /* Parse role_ids */ if (stmt->roles != NULL) - role_ids = policy_role_list_to_array(stmt->roles); + { + role_oids = policy_role_list_to_array(stmt->roles, &nitems); + role_ids = construct_array(role_oids, nitems, OIDOID, + sizeof(Oid), true, 'i'); + } /* Get id of table. Also handles permissions checks. */ table_id = RangeVarGetRelidExtended(stmt->table, AccessExclusiveLock, @@ -825,6 +847,19 @@ AlterPolicy(AlterPolicyStmt *stmt) recordDependencyOnExpr(&myself, with_check_qual, with_check_parse_rtable, DEPENDENCY_NORMAL); + /* Register role dependencies */ + deleteSharedDependencyRecordsFor(PolicyRelationId, policy_id, 0); + target.classId = AuthIdRelationId; + target.objectSubId = 0; + for (i = 0; i < nitems; i++) + { + target.objectId = DatumGetObjectId(role_oids[i]); + /* no dependency if public */ + if (target.objectId != ACL_ID_PUBLIC) + recordSharedDependencyOn(&myself, &target, + SHARED_DEPENDENCY_POLICY); + } + heap_freetuple(new_tuple); /* Invalidate Relation Cache */ diff --git a/src/include/catalog/dependency.h b/src/include/catalog/dependency.h index aa3f3d90a18e6..fbcf9044325d4 100644 --- a/src/include/catalog/dependency.h +++ b/src/include/catalog/dependency.h @@ -96,6 +96,10 @@ typedef enum DependencyType * created for the owner of an object; hence two objects may be linked by * one or the other, but not both, of these dependency types.) * + * (d) a SHARED_DEPENDENCY_POLICY entry means that the referenced object is + * a role mentioned in a policy object. The referenced object must be a + * pg_authid entry. + * * SHARED_DEPENDENCY_INVALID is a value used as a parameter in internal * routines, and is not valid in the catalog itself. */ @@ -104,6 +108,7 @@ typedef enum SharedDependencyType SHARED_DEPENDENCY_PIN = 'p', SHARED_DEPENDENCY_OWNER = 'o', SHARED_DEPENDENCY_ACL = 'a', + SHARED_DEPENDENCY_POLICY = 'r', SHARED_DEPENDENCY_INVALID = 0 } SharedDependencyType; diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index fd8e180f8a8e8..4749efc5679d9 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -2942,6 +2942,61 @@ SELECT * FROM coll_t; ROLLBACK; -- +-- Shared Object Dependencies +-- +RESET SESSION AUTHORIZATION; +BEGIN; +CREATE ROLE alice; +CREATE ROLE bob; +CREATE TABLE tbl1 (c) AS VALUES ('bar'::text); +GRANT SELECT ON TABLE tbl1 TO alice; +CREATE POLICY P ON tbl1 TO alice, bob USING (true); +SELECT refclassid::regclass, deptype + FROM pg_depend + WHERE classid = 'pg_policy'::regclass + AND refobjid = 'tbl1'::regclass; + refclassid | deptype +------------+--------- + pg_class | a +(1 row) + +SELECT refclassid::regclass, deptype + FROM pg_shdepend + WHERE classid = 'pg_policy'::regclass + AND refobjid IN ('alice'::regrole, 'bob'::regrole); + refclassid | deptype +------------+--------- + pg_authid | r + pg_authid | r +(2 rows) + +SAVEPOINT q; +DROP ROLE alice; --fails due to dependency on POLICY p +ERROR: role "alice" cannot be dropped because some objects depend on it +DETAIL: target of policy p on table tbl1 +privileges for table tbl1 +ROLLBACK TO q; +ALTER POLICY p ON tbl1 TO bob USING (true); +SAVEPOINT q; +DROP ROLE alice; --fails due to dependency on GRANT SELECT +ERROR: role "alice" cannot be dropped because some objects depend on it +DETAIL: privileges for table tbl1 +ROLLBACK TO q; +REVOKE ALL ON TABLE tbl1 FROM alice; +SAVEPOINT q; +DROP ROLE alice; --succeeds +ROLLBACK TO q; +SAVEPOINT q; +DROP ROLE bob; --fails due to dependency on POLICY p +ERROR: role "bob" cannot be dropped because some objects depend on it +DETAIL: target of policy p on table tbl1 +ROLLBACK TO q; +DROP POLICY p ON tbl1; +SAVEPOINT q; +DROP ROLE bob; -- succeeds +ROLLBACK TO q; +ROLLBACK; -- cleanup +-- -- Clean up objects -- RESET SESSION AUTHORIZATION; diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 32f10d8649f1e..529edd01c7f0b 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1216,6 +1216,50 @@ SELECT (string_to_array(polqual, ':'))[7] AS inputcollid FROM pg_policy WHERE po SELECT * FROM coll_t; ROLLBACK; +-- +-- Shared Object Dependencies +-- +RESET SESSION AUTHORIZATION; +BEGIN; +CREATE ROLE alice; +CREATE ROLE bob; +CREATE TABLE tbl1 (c) AS VALUES ('bar'::text); +GRANT SELECT ON TABLE tbl1 TO alice; +CREATE POLICY P ON tbl1 TO alice, bob USING (true); +SELECT refclassid::regclass, deptype + FROM pg_depend + WHERE classid = 'pg_policy'::regclass + AND refobjid = 'tbl1'::regclass; +SELECT refclassid::regclass, deptype + FROM pg_shdepend + WHERE classid = 'pg_policy'::regclass + AND refobjid IN ('alice'::regrole, 'bob'::regrole); + +SAVEPOINT q; +DROP ROLE alice; --fails due to dependency on POLICY p +ROLLBACK TO q; + +ALTER POLICY p ON tbl1 TO bob USING (true); +SAVEPOINT q; +DROP ROLE alice; --fails due to dependency on GRANT SELECT +ROLLBACK TO q; + +REVOKE ALL ON TABLE tbl1 FROM alice; +SAVEPOINT q; +DROP ROLE alice; --succeeds +ROLLBACK TO q; + +SAVEPOINT q; +DROP ROLE bob; --fails due to dependency on POLICY p +ROLLBACK TO q; + +DROP POLICY p ON tbl1; +SAVEPOINT q; +DROP ROLE bob; -- succeeds +ROLLBACK TO q; + +ROLLBACK; -- cleanup + -- -- Clean up objects -- From 344703bcc453ac3ce0060785d4958ddec7d2dbe9 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Tue, 28 Jul 2015 16:24:09 -0700 Subject: [PATCH 121/442] Disallow converting a table to a view if row security is present. When DefineQueryRewrite() is about to convert a table to a view, it checks the table for features unavailable to views. For example, it rejects tables having triggers. It omits to reject tables having relrowsecurity or a pg_policy record. Fix that. To faciliate the repair, invent relation_has_policies() which indicates the presence of policies on a relation even when row security is disabled for that relation. Reported by Noah Misch. Patch by me, review by Stephen Frost. Back-patch to 9.5 where RLS was introduced. --- src/backend/commands/policy.c | 29 +++++++++++++++++++++++ src/backend/rewrite/rewriteDefine.c | 24 +++++++++++++++---- src/include/commands/policy.h | 1 + src/test/regress/expected/rowsecurity.out | 23 ++++++++++++++++++ src/test/regress/sql/rowsecurity.sql | 25 +++++++++++++++++++ 5 files changed, 97 insertions(+), 5 deletions(-) diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 9544f75032b4b..0d4e557d5abfe 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -1037,3 +1037,32 @@ get_relation_policy_oid(Oid relid, const char *policy_name, bool missing_ok) return policy_oid; } + +/* + * relation_has_policies - Determine if relation has any policies + */ +bool +relation_has_policies(Relation rel) +{ + Relation catalog; + ScanKeyData skey; + SysScanDesc sscan; + HeapTuple policy_tuple; + bool ret = false; + + catalog = heap_open(PolicyRelationId, AccessShareLock); + ScanKeyInit(&skey, + Anum_pg_policy_polrelid, + BTEqualStrategyNumber, F_OIDEQ, + ObjectIdGetDatum(RelationGetRelid(rel))); + sscan = systable_beginscan(catalog, PolicyPolrelidPolnameIndexId, true, + NULL, 1, &skey); + policy_tuple = systable_getnext(sscan); + if (HeapTupleIsValid(policy_tuple)) + ret = true; + + systable_endscan(sscan); + heap_close(catalog, AccessShareLock); + + return ret; +} diff --git a/src/backend/rewrite/rewriteDefine.c b/src/backend/rewrite/rewriteDefine.c index a88d73e15f2a2..39c83a605ca10 100644 --- a/src/backend/rewrite/rewriteDefine.c +++ b/src/backend/rewrite/rewriteDefine.c @@ -27,6 +27,7 @@ #include "catalog/objectaccess.h" #include "catalog/pg_rewrite.h" #include "catalog/storage.h" +#include "commands/policy.h" #include "miscadmin.h" #include "nodes/nodeFuncs.h" #include "parser/parse_utilcmd.h" @@ -410,11 +411,12 @@ DefineQueryRewrite(char *rulename, * * If so, check that the relation is empty because the storage for the * relation is going to be deleted. Also insist that the rel not have - * any triggers, indexes, or child tables. (Note: these tests are too - * strict, because they will reject relations that once had such but - * don't anymore. But we don't really care, because this whole - * business of converting relations to views is just a kluge to allow - * dump/reload of views that participate in circular dependencies.) + * any triggers, indexes, child tables, policies, or RLS enabled. + * (Note: these tests are too strict, because they will reject + * relations that once had such but don't anymore. But we don't + * really care, because this whole business of converting relations + * to views is just a kluge to allow dump/reload of views that + * participate in circular dependencies.) */ if (event_relation->rd_rel->relkind != RELKIND_VIEW && event_relation->rd_rel->relkind != RELKIND_MATVIEW) @@ -451,6 +453,18 @@ DefineQueryRewrite(char *rulename, errmsg("could not convert table \"%s\" to a view because it has child tables", RelationGetRelationName(event_relation)))); + if (event_relation->rd_rel->relrowsecurity) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not convert table \"%s\" to a view because it has row security enabled", + RelationGetRelationName(event_relation)))); + + if (relation_has_policies(event_relation)) + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("could not convert table \"%s\" to a view because it has row security policies", + RelationGetRelationName(event_relation)))); + RelisBecomingView = true; } } diff --git a/src/include/commands/policy.h b/src/include/commands/policy.h index ac322e0db9263..be000432312ff 100644 --- a/src/include/commands/policy.h +++ b/src/include/commands/policy.h @@ -31,5 +31,6 @@ extern Oid get_relation_policy_oid(Oid relid, const char *policy_name, extern ObjectAddress rename_policy(RenameStmt *stmt); +extern bool relation_has_policies(Relation rel); #endif /* POLICY_H */ diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index 4749efc5679d9..b0f2565b60ae4 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -2997,6 +2997,29 @@ DROP ROLE bob; -- succeeds ROLLBACK TO q; ROLLBACK; -- cleanup -- +-- Converting table to view +-- +BEGIN; +SET ROW_SECURITY = FORCE; +CREATE TABLE t (c int); +CREATE POLICY p ON t USING (c % 2 = 1); +ALTER TABLE t ENABLE ROW LEVEL SECURITY; +SAVEPOINT q; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- fails due to row level security enabled +ERROR: could not convert table "t" to a view because it has row security enabled +ROLLBACK TO q; +ALTER TABLE t DISABLE ROW LEVEL SECURITY; +SAVEPOINT q; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- fails due to policy p on t +ERROR: could not convert table "t" to a view because it has row security policies +ROLLBACK TO q; +DROP POLICY p ON t; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- succeeds +ROLLBACK; +-- -- Clean up objects -- RESET SESSION AUTHORIZATION; diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 529edd01c7f0b..03f82987c4753 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1260,6 +1260,31 @@ ROLLBACK TO q; ROLLBACK; -- cleanup +-- +-- Converting table to view +-- +BEGIN; +SET ROW_SECURITY = FORCE; +CREATE TABLE t (c int); +CREATE POLICY p ON t USING (c % 2 = 1); +ALTER TABLE t ENABLE ROW LEVEL SECURITY; + +SAVEPOINT q; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- fails due to row level security enabled +ROLLBACK TO q; + +ALTER TABLE t DISABLE ROW LEVEL SECURITY; +SAVEPOINT q; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- fails due to policy p on t +ROLLBACK TO q; + +DROP POLICY p ON t; +CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD + SELECT * FROM generate_series(1,5) t0(c); -- succeeds +ROLLBACK; + -- -- Clean up objects -- From cab23771eb0250fe8e2ad179cf10ef965658f3e7 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 28 Jul 2015 19:55:59 -0400 Subject: [PATCH 122/442] Suppress "variable may be used uninitialized" warning. Also re-pgindent, just because I'm a neatnik. --- src/backend/commands/policy.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 0d4e557d5abfe..4642d7c6403df 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -144,14 +144,14 @@ policy_role_list_to_array(List *roles, int *num_roles) /* Handle no roles being passed in as being for public */ if (roles == NIL) { - *num_roles = 1; + *num_roles = 1; role_oids = (Datum *) palloc(*num_roles * sizeof(Datum)); role_oids[0] = ObjectIdGetDatum(ACL_ID_PUBLIC); return role_oids; } - *num_roles = list_length(roles); + *num_roles = list_length(roles); role_oids = (Datum *) palloc(*num_roles * sizeof(Datum)); foreach(cell, roles) @@ -169,7 +169,7 @@ policy_role_list_to_array(List *roles, int *num_roles) (errcode(ERRCODE_INVALID_PARAMETER_VALUE), errmsg("ignoring roles specified other than public"), errhint("All roles are members of the public role."))); - *num_roles = 1; + *num_roles = 1; } role_oids[0] = ObjectIdGetDatum(ACL_ID_PUBLIC); @@ -656,7 +656,7 @@ AlterPolicy(AlterPolicyStmt *stmt) Oid policy_id; Relation target_table; Oid table_id; - Datum *role_oids; + Datum *role_oids = NULL; int nitems = 0; ArrayType *role_ids = NULL; List *qual_parse_rtable = NIL; From d7f0bb8cc7f4b43830499e89384befc3690b1560 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Tue, 28 Jul 2015 20:00:13 -0400 Subject: [PATCH 123/442] Prevent platform-dependent output row ordering in a new test query. Buildfarm indicates this is necessary. --- src/test/regress/expected/rowsecurity.out | 22 +++++++++++++--------- src/test/regress/sql/rowsecurity.sql | 8 ++++++-- 2 files changed, 19 insertions(+), 11 deletions(-) diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index b0f2565b60ae4..b146da373c37a 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -2899,12 +2899,14 @@ SELECT row_security_active('current_check'); f (1 row) -SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; - most_common_vals ---------------------- - - - {rls_regress_user1} +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; + attname | most_common_vals +-----------+--------------------- + currentid | + payload | + rlsuser | {rls_regress_user1} (3 rows) SET SESSION AUTHORIZATION rls_regress_user1; @@ -2915,9 +2917,11 @@ SELECT row_security_active('current_check'); t (1 row) -SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; - most_common_vals ------------------- +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; + attname | most_common_vals +---------+------------------ (0 rows) -- diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 03f82987c4753..54f2c89eda362 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1197,12 +1197,16 @@ SET SESSION AUTHORIZATION rls_regress_user0; ANALYZE current_check; -- Stats visible SELECT row_security_active('current_check'); -SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; SET SESSION AUTHORIZATION rls_regress_user1; -- Stats not visible SELECT row_security_active('current_check'); -SELECT most_common_vals FROM pg_stats where tablename = 'current_check'; +SELECT attname, most_common_vals FROM pg_stats + WHERE tablename = 'current_check' + ORDER BY 1; -- -- Collation support From 6f1789a475fe2726f8ade5ecd3aa14223b130fb1 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Wed, 29 Jul 2015 10:55:43 +0300 Subject: [PATCH 124/442] Fix typo in comment. Amit Langote --- src/backend/commands/tablecmds.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c index 1c7eded9a79cd..b459b1eaeca80 100644 --- a/src/backend/commands/tablecmds.c +++ b/src/backend/commands/tablecmds.c @@ -11169,7 +11169,7 @@ ATPrepChangePersistence(Relation rel, bool toLogged) /* * Check existing foreign key constraints to preserve the invariant that - * no permanent tables cannot reference unlogged ones. Self-referencing + * permanent tables cannot reference unlogged ones. Self-referencing * foreign keys can safely be ignored. */ pg_constraint = heap_open(ConstraintRelationId, AccessShareLock); From 81191f65820d3cf29ea94fe7f65c065e8c6a296c Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Wed, 29 Jul 2015 10:14:32 +0200 Subject: [PATCH 125/442] Remove outdated comment in LWLockDequeueSelf's header. Noticed-By: Robert Haas Backpatch: 9.5, where the function was added --- src/backend/storage/lmgr/lwlock.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index 46cab4911e7a5..e5566d1b60969 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -796,8 +796,7 @@ LWLockQueueSelf(LWLock *lock, LWLockMode mode) * * This is used if we queued ourselves because we thought we needed to sleep * but, after further checking, we discovered that we don't actually need to - * do so. Returns false if somebody else already has woken us up, otherwise - * returns true. + * do so. */ static void LWLockDequeueSelf(LWLock *lock) From 0bfbf14f93c30ec8f505baba79625f5a3b010405 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Wed, 29 Jul 2015 09:39:28 -0700 Subject: [PATCH 126/442] Add missing post create and alter hooks to policy objects. AlterPolicy() and CreatePolicy() lacked their respective hook invocations. Noted by Noah Misch, review by Dean Rasheed. Back-patch to 9.5 where RLS was introduced. --- src/backend/commands/policy.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index 4642d7c6403df..d8b43908ec406 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -629,6 +629,8 @@ CreatePolicy(CreatePolicyStmt *stmt) SHARED_DEPENDENCY_POLICY); } + InvokeObjectPostCreateHook(PolicyRelationId, policy_id, 0); + /* Invalidate Relation Cache */ CacheInvalidateRelcache(target_table); @@ -860,6 +862,8 @@ AlterPolicy(AlterPolicyStmt *stmt) SHARED_DEPENDENCY_POLICY); } + InvokeObjectPostAlterHook(PolicyRelationId, policy_id, 0); + heap_freetuple(new_tuple); /* Invalidate Relation Cache */ From 3ef1a682d5e4a919dcaddc8256ea65de91654d1c Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Wed, 29 Jul 2015 13:27:15 -0400 Subject: [PATCH 127/442] Add some test coverage of EvalPlanQual with non-locked tables. A Salesforce colleague of mine griped that the regression tests don't exercise EvalPlanQualFetchRowMarks() and allied routines. Which is a fair complaint. Add test cases that go through the REFERENCE and COPY code paths. Unfortunately we don't have sufficient infrastructure right now to exercise the FDW code path in the isolation tests, but this is surely better than before. --- .../isolation/expected/eval-plan-qual.out | 40 +++++++++++++++++++ src/test/isolation/specs/eval-plan-qual.spec | 16 ++++++++ 2 files changed, 56 insertions(+) diff --git a/src/test/isolation/expected/eval-plan-qual.out b/src/test/isolation/expected/eval-plan-qual.out index 433533e6117f9..457461e355caa 100644 --- a/src/test/isolation/expected/eval-plan-qual.out +++ b/src/test/isolation/expected/eval-plan-qual.out @@ -104,3 +104,43 @@ a b c 2 2 2 2 3 0 step c2: COMMIT; + +starting permutation: wx2 partiallock c2 c1 read +step wx2: UPDATE accounts SET balance = balance + 450 WHERE accountid = 'checking'; +step partiallock: + SELECT * FROM accounts a1, accounts a2 + WHERE a1.accountid = a2.accountid + FOR UPDATE OF a1; + +step c2: COMMIT; +step partiallock: <... completed> +accountid balance accountid balance + +checking 1050 checking 600 +savings 600 savings 600 +step c1: COMMIT; +step read: SELECT * FROM accounts ORDER BY accountid; +accountid balance + +checking 1050 +savings 600 + +starting permutation: wx2 lockwithvalues c2 c1 read +step wx2: UPDATE accounts SET balance = balance + 450 WHERE accountid = 'checking'; +step lockwithvalues: + SELECT * FROM accounts a1, (values('checking'),('savings')) v(id) + WHERE a1.accountid = v.id + FOR UPDATE OF a1; + +step c2: COMMIT; +step lockwithvalues: <... completed> +accountid balance id + +checking 1050 checking +savings 600 savings +step c1: COMMIT; +step read: SELECT * FROM accounts ORDER BY accountid; +accountid balance + +checking 1050 +savings 600 diff --git a/src/test/isolation/specs/eval-plan-qual.spec b/src/test/isolation/specs/eval-plan-qual.spec index 6fb24322863dc..a391466722f47 100644 --- a/src/test/isolation/specs/eval-plan-qual.spec +++ b/src/test/isolation/specs/eval-plan-qual.spec @@ -50,6 +50,20 @@ step "writep1" { UPDATE p SET b = -1 WHERE a = 1 AND b = 1 AND c = 0; } step "writep2" { UPDATE p SET b = -b WHERE a = 1 AND c = 0; } step "c1" { COMMIT; } +# these tests are meant to exercise EvalPlanQualFetchRowMarks, +# ie, handling non-locked tables in an EvalPlanQual recheck + +step "partiallock" { + SELECT * FROM accounts a1, accounts a2 + WHERE a1.accountid = a2.accountid + FOR UPDATE OF a1; +} +step "lockwithvalues" { + SELECT * FROM accounts a1, (values('checking'),('savings')) v(id) + WHERE a1.accountid = v.id + FOR UPDATE OF a1; +} + session "s2" setup { BEGIN ISOLATION LEVEL READ COMMITTED; } step "wx2" { UPDATE accounts SET balance = balance + 450 WHERE accountid = 'checking'; } @@ -79,3 +93,5 @@ permutation "wy1" "wy2" "c1" "c2" "read" permutation "upsert1" "upsert2" "c1" "c2" "read" permutation "readp1" "writep1" "readp2" "c1" "c2" permutation "writep2" "returningp1" "c1" "c2" +permutation "wx2" "partiallock" "c2" "c1" "read" +permutation "wx2" "lockwithvalues" "c2" "c1" "read" From 43797ed42a7c0365c9143ad6efdc566ac9d93fd8 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Wed, 29 Jul 2015 15:41:00 -0700 Subject: [PATCH 128/442] Create new ParseExprKind for use by policy expressions. Policy USING and WITH CHECK expressions were using EXPR_KIND_WHERE for parse analysis, which results in inappropriate ERROR messages when the expression contains unsupported constructs such as aggregates. Create a new ParseExprKind called EXPR_KIND_POLICY and tailor the related messages to fit. Reported by Noah Misch. Reviewed by Dean Rasheed, Alvaro Herrera, and Robert Haas. Back-patch to 9.5 where RLS was introduced. --- src/backend/commands/policy.c | 8 ++++---- src/backend/parser/parse_agg.c | 10 ++++++++++ src/backend/parser/parse_expr.c | 3 +++ src/include/parser/parse_node.h | 3 ++- src/test/modules/test_rls_hooks/test_rls_hooks.c | 4 ++-- src/test/regress/expected/rowsecurity.out | 9 +++++++++ src/test/regress/sql/rowsecurity.sql | 9 +++++++++ 7 files changed, 39 insertions(+), 7 deletions(-) diff --git a/src/backend/commands/policy.c b/src/backend/commands/policy.c index d8b43908ec406..bcf4a8f35d1f0 100644 --- a/src/backend/commands/policy.c +++ b/src/backend/commands/policy.c @@ -534,12 +534,12 @@ CreatePolicy(CreatePolicyStmt *stmt) qual = transformWhereClause(qual_pstate, copyObject(stmt->qual), - EXPR_KIND_WHERE, + EXPR_KIND_POLICY, "POLICY"); with_check_qual = transformWhereClause(with_check_pstate, copyObject(stmt->with_check), - EXPR_KIND_WHERE, + EXPR_KIND_POLICY, "POLICY"); /* Fix up collation information */ @@ -707,7 +707,7 @@ AlterPolicy(AlterPolicyStmt *stmt) addRTEtoQuery(qual_pstate, rte, false, true, true); qual = transformWhereClause(qual_pstate, copyObject(stmt->qual), - EXPR_KIND_WHERE, + EXPR_KIND_POLICY, "POLICY"); /* Fix up collation information */ @@ -730,7 +730,7 @@ AlterPolicy(AlterPolicyStmt *stmt) with_check_qual = transformWhereClause(with_check_pstate, copyObject(stmt->with_check), - EXPR_KIND_WHERE, + EXPR_KIND_POLICY, "POLICY"); /* Fix up collation information */ diff --git a/src/backend/parser/parse_agg.c b/src/backend/parser/parse_agg.c index 478d8ca70bdd6..3846b569d6fa4 100644 --- a/src/backend/parser/parse_agg.c +++ b/src/backend/parser/parse_agg.c @@ -372,6 +372,13 @@ check_agglevels_and_constraints(ParseState *pstate, Node *expr) break; case EXPR_KIND_WHERE: errkind = true; + break; + case EXPR_KIND_POLICY: + if (isAgg) + err = _("aggregate functions are not allowed in policy expressions"); + else + err = _("grouping operations are not allowed in policy expressions"); + break; case EXPR_KIND_HAVING: /* okay */ @@ -770,6 +777,9 @@ transformWindowFuncCall(ParseState *pstate, WindowFunc *wfunc, case EXPR_KIND_WHERE: errkind = true; break; + case EXPR_KIND_POLICY: + err = _("window functions are not allowed in policy expressions"); + break; case EXPR_KIND_HAVING: errkind = true; break; diff --git a/src/backend/parser/parse_expr.c b/src/backend/parser/parse_expr.c index 0ff46dd457c79..fa77ef1f8bb6a 100644 --- a/src/backend/parser/parse_expr.c +++ b/src/backend/parser/parse_expr.c @@ -1672,6 +1672,7 @@ transformSubLink(ParseState *pstate, SubLink *sublink) case EXPR_KIND_FROM_SUBSELECT: case EXPR_KIND_FROM_FUNCTION: case EXPR_KIND_WHERE: + case EXPR_KIND_POLICY: case EXPR_KIND_HAVING: case EXPR_KIND_FILTER: case EXPR_KIND_WINDOW_PARTITION: @@ -3173,6 +3174,8 @@ ParseExprKindName(ParseExprKind exprKind) return "function in FROM"; case EXPR_KIND_WHERE: return "WHERE"; + case EXPR_KIND_POLICY: + return "POLICY"; case EXPR_KIND_HAVING: return "HAVING"; case EXPR_KIND_FILTER: diff --git a/src/include/parser/parse_node.h b/src/include/parser/parse_node.h index 7ecaffc0dc37d..52499453690d9 100644 --- a/src/include/parser/parse_node.h +++ b/src/include/parser/parse_node.h @@ -63,7 +63,8 @@ typedef enum ParseExprKind EXPR_KIND_INDEX_PREDICATE, /* index predicate */ EXPR_KIND_ALTER_COL_TRANSFORM, /* transform expr in ALTER COLUMN TYPE */ EXPR_KIND_EXECUTE_PARAMETER, /* parameter value in EXECUTE */ - EXPR_KIND_TRIGGER_WHEN /* WHEN condition in CREATE TRIGGER */ + EXPR_KIND_TRIGGER_WHEN, /* WHEN condition in CREATE TRIGGER */ + EXPR_KIND_POLICY /* USING or WITH CHECK expr in policy */ } ParseExprKind; diff --git a/src/test/modules/test_rls_hooks/test_rls_hooks.c b/src/test/modules/test_rls_hooks/test_rls_hooks.c index 61b62d55b4cf9..d76b17ae46a3d 100644 --- a/src/test/modules/test_rls_hooks/test_rls_hooks.c +++ b/src/test/modules/test_rls_hooks/test_rls_hooks.c @@ -106,7 +106,7 @@ test_rls_hooks_permissive(CmdType cmdtype, Relation relation) e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0); policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e), - EXPR_KIND_WHERE, + EXPR_KIND_POLICY, "POLICY"); policy->with_check_qual = copyObject(policy->qual); @@ -160,7 +160,7 @@ test_rls_hooks_restrictive(CmdType cmdtype, Relation relation) e = (Node *) makeSimpleA_Expr(AEXPR_OP, "=", (Node *) n, (Node *) c, 0); policy->qual = (Expr *) transformWhereClause(qual_pstate, copyObject(e), - EXPR_KIND_WHERE, + EXPR_KIND_POLICY, "POLICY"); policy->with_check_qual = copyObject(policy->qual); diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index b146da373c37a..b0556c2ff1f47 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -3024,6 +3024,15 @@ CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD SELECT * FROM generate_series(1,5) t0(c); -- succeeds ROLLBACK; -- +-- Policy expression handling +-- +BEGIN; +SET row_security = FORCE; +CREATE TABLE t (c) AS VALUES ('bar'::text); +CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allowed in policy expressions +ERROR: aggregate functions are not allowed in policy expressions +ROLLBACK; +-- -- Clean up objects -- RESET SESSION AUTHORIZATION; diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 54f2c89eda362..300f34ad4bf21 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1289,6 +1289,15 @@ CREATE RULE "_RETURN" AS ON SELECT TO t DO INSTEAD SELECT * FROM generate_series(1,5) t0(c); -- succeeds ROLLBACK; +-- +-- Policy expression handling +-- +BEGIN; +SET row_security = FORCE; +CREATE TABLE t (c) AS VALUES ('bar'::text); +CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allowed in policy expressions +ROLLBACK; + -- -- Clean up objects -- From f7dca86fc3a2c423824a2056994319c348992913 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 29 Jul 2015 22:48:25 -0400 Subject: [PATCH 129/442] MSVC: Future-proof installation file skip logic. This code relied on knowing exactly where in the source tree temporary installations might appear. A reasonable hacker may not think to update this code when adding use of a temporary installation, making it fragile. Observe that commit 9fa8b0ee90c44c0f97d16bf65e94322988c94864 broke it unnoticed, and commit dcae5faccab64776376d354decda0017c648bb53 fixed it unnoticed. Back-patch to 9.5 only; use of temporary installations is unlikely to change in released versions. --- src/tools/msvc/Install.pm | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/tools/msvc/Install.pm b/src/tools/msvc/Install.pm index b592f997f6cf9..f9557254a4b2b 100644 --- a/src/tools/msvc/Install.pm +++ b/src/tools/msvc/Install.pm @@ -98,6 +98,9 @@ sub Install { wanted => sub { /^.*\.sample\z/s && push(@$sample_files, $File::Find::name); + + # Don't find files of in-tree temporary installations. + $_ eq 'share' and $File::Find::prune = 1; } }, @top_dir); @@ -152,6 +155,9 @@ sub Install { wanted => sub { /^(.*--.*\.sql|.*\.control)\z/s && push(@$pl_extension_files, $File::Find::name); + + # Don't find files of in-tree temporary installations. + $_ eq 'share' and $File::Find::prune = 1; } }, @pldirs); @@ -199,8 +205,6 @@ sub CopySetOfFiles print "Copying $what" if $what; foreach (@$flist) { - next if /regress/; # Skip temporary install in regression subdir - next if /ecpg.test/; # Skip temporary install in regression subdir my $tgt = $target . basename($_); print "."; lcopy($_, $tgt) || croak "Could not copy $_: $!\n"; From 95eb4b265502c26c9f72f0f554df41e273551858 Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 29 Jul 2015 22:48:43 -0400 Subject: [PATCH 130/442] MSVC: Remove duplicate PATH entry in test harness. Back-patch to 9.5, where commit 4cb7d671fddc8855c8def2de51fb23df1c8ac0af introduced it. --- src/tools/msvc/vcregress.pl | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index 619638361575f..1a0ae67d0a33f 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -50,7 +50,7 @@ copy("$Config/regress/regress.dll", "src/test/regress"); copy("$Config/dummy_seclabel/dummy_seclabel.dll", "src/test/regress"); -$ENV{PATH} = "$topdir/$Config/libpq;$topdir/$Config/libpq;$ENV{PATH}"; +$ENV{PATH} = "$topdir/$Config/libpq;$ENV{PATH}"; my $schedule = shift; unless ($schedule) From fdb8ea9366785d7e2a31469c1389ca8a6f11889f Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 29 Jul 2015 22:48:56 -0400 Subject: [PATCH 131/442] MSVC: Revert most 9.5 changes to pre-9.5 vcregress.pl tests. The reverted changes did not narrow the semantic gap between the MSVC build system and the GNU make build system. For targets old and new that run multiple suites (contribcheck, modulescheck, tapcheck), restore vcregress.pl to mimicking "make -k" rather than the "make -S" default. Lack of "-k" would be more burdensome than lack of "-S". Keep changes reflecting contemporary changes to the GNU make build system, and keep updates to Makefile parsing. Keep the loss of --psqldir in "check" and "ecpgcheck" targets; it had been a no-op when used alongside --temp-install. No log message mentioned any of the reverted changes. Based on a germ by Michael Paquier. Back-patch to 9.5. --- src/tools/msvc/vcregress.pl | 69 ++++++++++++++++--------------------- 1 file changed, 30 insertions(+), 39 deletions(-) diff --git a/src/tools/msvc/vcregress.pl b/src/tools/msvc/vcregress.pl index 1a0ae67d0a33f..0d2f5ec856782 100644 --- a/src/tools/msvc/vcregress.pl +++ b/src/tools/msvc/vcregress.pl @@ -108,15 +108,12 @@ sub installcheck sub check { - chdir $startdir; - InstallTemp(); chdir "${topdir}/src/test/regress"; - my @args = ( - "${tmp_installdir}/bin/pg_regress", + "../../../$Config/pg_regress/pg_regress", "--dlpath=.", - "--bindir=${tmp_installdir}/bin", + "--bindir=", "--schedule=${schedule}_schedule", "--encoding=SQL_ASCII", "--no-locale", @@ -136,11 +133,9 @@ sub ecpgcheck exit $status if $status; InstallTemp(); chdir "$topdir/src/interfaces/ecpg/test"; - - $ENV{PATH} = "${tmp_installdir}/bin;${tmp_installdir}/lib;$ENV{PATH}"; $schedule = "ecpg"; my @args = ( - "${tmp_installdir}/bin/pg_regress_ecpg", + "../../../../$Config/pg_regress_ecpg/pg_regress_ecpg", "--bindir=", "--dbname=regress1,connectdb", "--create-role=connectuser,connectdb", @@ -156,14 +151,12 @@ sub ecpgcheck sub isolationcheck { - chdir $startdir; - - InstallTemp(); - chdir "${topdir}/src/test/isolation"; - + chdir "../isolation"; + copy("../../../$Config/isolationtester/isolationtester.exe", + "../../../$Config/pg_isolation_regress"); my @args = ( - "${tmp_installdir}/bin/pg_isolation_regress", - "--bindir=${tmp_installdir}/bin", + "../../../$Config/pg_isolation_regress/pg_isolation_regress", + "--bindir=../../../$Config/psql", "--inputdir=.", "--schedule=./isolation_schedule"); push(@args, $maxconn) if $maxconn; @@ -174,10 +167,7 @@ sub isolationcheck sub plcheck { - chdir $startdir; - - InstallTemp(); - chdir "${topdir}/src/pl"; + chdir "../../pl"; foreach my $pl (glob("*")) { @@ -214,8 +204,8 @@ sub plcheck "============================================================\n"; print "Checking $lang\n"; my @args = ( - "${tmp_installdir}/bin/pg_regress", - "--bindir=${tmp_installdir}/bin", + "../../../$Config/pg_regress/pg_regress", + "--bindir=../../../$Config/psql", "--dbname=pl_regression", @lang_args, @tests); system(@args); my $status = $? >> 8; @@ -230,7 +220,6 @@ sub subdircheck { my $subdir = shift; my $module = shift; - my $mstat = 0; if ( !-d "$module/sql" || !-d "$module/expected" @@ -277,24 +266,19 @@ sub subdircheck print "============================================================\n"; print "Checking $module\n"; my @args = ( - "${tmp_installdir}/bin/pg_regress", - "--bindir=${tmp_installdir}/bin", + "$topdir/$Config/pg_regress/pg_regress", + "--bindir=${topdir}/${Config}/psql", "--dbname=contrib_regression", @opts, @tests); system(@args); - my $status = $? >> 8; - $mstat ||= $status; chdir ".."; - - exit $mstat if $mstat; } sub contribcheck { - InstallTemp(); - chdir "$topdir/contrib"; + chdir "../../../contrib"; + my $mstat = 0; foreach my $module (glob("*")) { - # these configuration-based exclusions must match Install.pm next if ($module eq "uuid-ossp" && !defined($config->{uuid})); next if ($module eq "sslinfo" && !defined($config->{openssl})); @@ -305,26 +289,31 @@ sub contribcheck next if ($module eq "sepgsql"); subdircheck("$topdir/contrib", $module); + my $status = $? >> 8; + $mstat ||= $status; } + exit $mstat if $mstat; } sub modulescheck { - InstallTemp(); - chdir "$topdir/src/test/modules"; + chdir "../../../src/test/modules"; + my $mstat = 0; foreach my $module (glob("*")) { subdircheck("$topdir/src/test/modules", $module); + my $status = $? >> 8; + $mstat ||= $status; } + exit $mstat if $mstat; } - # Run "initdb", then reconfigure authentication. sub standard_initdb { return ( - system("${tmp_installdir}/bin/initdb", '-N') == 0 and system( - "${tmp_installdir}/bin/pg_regress", '--config-auth', + system('initdb', '-N') == 0 and system( + "$topdir/$Config/pg_regress/pg_regress", '--config-auth', $ENV{PGDATA}) == 0); } @@ -343,13 +332,14 @@ sub upgradecheck $ENV{PGPORT} ||= 50432; my $tmp_root = "$topdir/src/bin/pg_upgrade/tmp_check"; (mkdir $tmp_root || die $!) unless -d $tmp_root; - - InstallTemp(); + my $upg_tmp_install = "$tmp_root/install"; # unshared temp install + print "Setting up temp install\n\n"; + Install($upg_tmp_install, "all", $config); # Install does a chdir, so change back after that chdir $cwd; my ($bindir, $libdir, $oldsrc, $newsrc) = - ("$tmp_installdir/bin", "$tmp_installdir/lib", $topdir, $topdir); + ("$upg_tmp_install/bin", "$upg_tmp_install/lib", $topdir, $topdir); $ENV{PATH} = "$bindir;$ENV{PATH}"; my $data = "$tmp_root/data"; $ENV{PGDATA} = "$data.old"; @@ -488,6 +478,7 @@ sub InstallTemp { print "Setting up temp install\n\n"; Install("$tmp_installdir", "all", $config); + $ENV{PATH} = "$tmp_installdir/bin;$ENV{PATH}"; } sub usage From 1471c0e27c2f71bed551463e8072da9c01c63dae Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 29 Jul 2015 22:49:36 -0400 Subject: [PATCH 132/442] Remove redundant "make install" from pg_upgrade test suite. A top-level "make install" includes pg_upgrade since commit 9fa8b0ee90c44c0f97d16bf65e94322988c94864. Back-patch to 9.5, where that commit first appeared. --- src/bin/pg_upgrade/test.sh | 1 - 1 file changed, 1 deletion(-) diff --git a/src/bin/pg_upgrade/test.sh b/src/bin/pg_upgrade/test.sh index f4e5d9ae6e920..ec3a7ed96a754 100644 --- a/src/bin/pg_upgrade/test.sh +++ b/src/bin/pg_upgrade/test.sh @@ -71,7 +71,6 @@ if [ "$1" = '--install' ]; then libdir=$temp_install/$libdir "$MAKE" -s -C ../.. install DESTDIR="$temp_install" - "$MAKE" -s -C . install DESTDIR="$temp_install" # platform-specific magic to find the shared libraries; see pg_regress.c LD_LIBRARY_PATH=$libdir:$LD_LIBRARY_PATH From a664d4790e5f93726f264c77c044a7ce4c1a675c Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Wed, 29 Jul 2015 22:49:48 -0400 Subject: [PATCH 133/442] Blacklist xlc 32-bit inlining. Per a suggestion from Tom Lane. Back-patch to 9.0 (all supported versions). While only 9.4 and up have code known to elicit this compiler bug, we were disabling inlining by accident until commit 43d89a23d59c487bc9258fad7a6187864cb8c0c0. --- config/test_quiet_include.h | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/config/test_quiet_include.h b/config/test_quiet_include.h index f4fa4d30dd774..732b23149e946 100644 --- a/config/test_quiet_include.h +++ b/config/test_quiet_include.h @@ -7,3 +7,12 @@ fun() { return 0; } + +/* + * "IBM XL C/C++ for AIX, V12.1" miscompiles, for 32-bit, some inline + * expansions of ginCompareItemPointers() "long long" arithmetic. To take + * advantage of inlining, build a 64-bit PostgreSQL. + */ +#if defined(__ILP32__) && defined(__IBMC__) +#error "known inlining bug" +#endif From 2e75be6660dbaaf2da09b98c54d47c9fe0ac8cfa Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Thu, 30 Jul 2015 14:50:51 +0300 Subject: [PATCH 134/442] Fix calculation of latency of pgbench backslash commands. When we loop back to the top of doCustom after processing a backslash command, we must reset the "now" timestamp, because that's used to calculate the time spent executing the previous command. Report and fix by Fabien Coelho. Backpatch to 9.5, where this was broken. --- src/bin/pgbench/pgbench.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/bin/pgbench/pgbench.c b/src/bin/pgbench/pgbench.c index ceaf14cde16e2..d48e5b78443ed 100644 --- a/src/bin/pgbench/pgbench.c +++ b/src/bin/pgbench/pgbench.c @@ -1154,11 +1154,12 @@ doCustom(TState *thread, CState *st, instr_time *conn_time, FILE *logfile, AggVa * first time it's needed, and reuse the same value throughout this * function after that. This also ensures that e.g. the calculated latency * reported in the log file and in the totals are the same. Zero means - * "not set yet". + * "not set yet". Reset "now" when we step to the next command with "goto + * top", though. */ +top: INSTR_TIME_SET_ZERO(now); -top: commands = sql_files[st->use_file]; /* From e91a1643ac723477d6ec2d47c8486cd0013660bb Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Thu, 30 Jul 2015 12:11:23 -0400 Subject: [PATCH 135/442] Avoid some zero-divide hazards in the planner. Although I think on all modern machines floating division by zero results in Infinity not SIGFPE, we still don't want infinities running around in the planner's costing estimates; too much risk of that leading to insane behavior. grouping_planner() failed to consider the possibility that final_rel might be known dummy and hence have zero rowcount. (I wonder if it would be better to set a rows estimate of 1 for dummy relations? But at least in the back branches, changing this convention seems like a bad idea, so I'll leave that for another day.) Make certain that get_variable_numdistinct() produces a nonzero result. The case that can be shown to be broken is with stadistinct < 0.0 and small ntuples; we did not prevent the result from rounding to zero. For good luck I applied clamp_row_est() to all the nonconstant return values. In ExecChooseHashTableSize(), Assert that we compute positive nbuckets and nbatch. I know of no reason to think this isn't the case, but it seems like a good safety check. Per reports from Piotr Stefaniak. Back-patch to all active branches. --- src/backend/executor/nodeHash.c | 3 +++ src/backend/optimizer/plan/planner.c | 6 ++++-- src/backend/utils/adt/selfuncs.c | 10 +++++----- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/src/backend/executor/nodeHash.c b/src/backend/executor/nodeHash.c index 906cb46b65892..ee9298a157f56 100644 --- a/src/backend/executor/nodeHash.c +++ b/src/backend/executor/nodeHash.c @@ -542,6 +542,9 @@ ExecChooseHashTableSize(double ntuples, int tupwidth, bool useskew, nbatch <<= 1; } + Assert(nbuckets > 0); + Assert(nbatch > 0); + *numbuckets = nbuckets; *numbatches = nbatch; } diff --git a/src/backend/optimizer/plan/planner.c b/src/backend/optimizer/plan/planner.c index 6ee411eec870a..09d4ea12e874c 100644 --- a/src/backend/optimizer/plan/planner.c +++ b/src/backend/optimizer/plan/planner.c @@ -1536,9 +1536,11 @@ grouping_planner(PlannerInfo *root, double tuple_fraction) standard_qp_callback, &qp_extra); /* - * Extract rowcount and width estimates for use below. + * Extract rowcount and width estimates for use below. If final_rel + * has been proven dummy, its rows estimate will be zero; clamp it to + * one to avoid zero-divide in subsequent calculations. */ - path_rows = final_rel->rows; + path_rows = clamp_row_est(final_rel->rows); path_width = final_rel->width; /* diff --git a/src/backend/utils/adt/selfuncs.c b/src/backend/utils/adt/selfuncs.c index 64b6ae4838f24..14b8c2ff54591 100644 --- a/src/backend/utils/adt/selfuncs.c +++ b/src/backend/utils/adt/selfuncs.c @@ -4632,8 +4632,8 @@ examine_simple_variable(PlannerInfo *root, Var *var, * *isdefault: set to TRUE if the result is a default rather than based on * anything meaningful. * - * NB: be careful to produce an integral result, since callers may compare - * the result to exact integer counts. + * NB: be careful to produce a positive integral result, since callers may + * compare the result to exact integer counts, or might divide by it. */ double get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) @@ -4709,7 +4709,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) * If we had an absolute estimate, use that. */ if (stadistinct > 0.0) - return stadistinct; + return clamp_row_est(stadistinct); /* * Otherwise we need to get the relation size; punt if not available. @@ -4730,7 +4730,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) * If we had a relative estimate, use that. */ if (stadistinct < 0.0) - return floor((-stadistinct * ntuples) + 0.5); + return clamp_row_est(-stadistinct * ntuples); /* * With no data, estimate ndistinct = ntuples if the table is small, else @@ -4738,7 +4738,7 @@ get_variable_numdistinct(VariableStatData *vardata, bool *isdefault) * that the behavior isn't discontinuous. */ if (ntuples < DEFAULT_NUM_DISTINCT) - return ntuples; + return clamp_row_est(ntuples); *isdefault = true; return DEFAULT_NUM_DISTINCT; From 23b5e726da6ef5ebbc1dbc821320ee35fa1d0737 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Thu, 30 Jul 2015 09:38:13 -0700 Subject: [PATCH 136/442] Use appropriate command type when retrieving relation's policies. When retrieving policies, if not working on the root target relation, we actually want the relation's SELECT policies, regardless of the top level query command type. For example in UPDATE t1...FROM t2 we need to apply t1's UPDATE policies and t2's SELECT policies. Previously top level query command type was applied to all relations, which was wrong. Add some regression coverage to ensure we don't violate this principle in the future. Report and patch by Dean Rasheed. Cherry picked from larger refactoring patch and tweaked by me. Back-patched to 9.5 where RLS was introduced. --- src/backend/rewrite/rowsecurity.c | 12 +++- src/test/regress/expected/rowsecurity.out | 83 +++++++++++++++++++++++ src/test/regress/sql/rowsecurity.sql | 40 +++++++++++ 3 files changed, 134 insertions(+), 1 deletion(-) diff --git a/src/backend/rewrite/rowsecurity.c b/src/backend/rewrite/rowsecurity.c index 2386cf016fbc5..562dbc90e9fdf 100644 --- a/src/backend/rewrite/rowsecurity.c +++ b/src/backend/rewrite/rowsecurity.c @@ -147,8 +147,18 @@ get_row_security_policies(Query *root, CmdType commandType, RangeTblEntry *rte, return; } - /* Grab the built-in policies which should be applied to this relation. */ + /* + * RLS is enabled for this relation. + * + * Get the security policies that should be applied, based on the command + * type. Note that if this isn't the target relation, we actually want + * the relation's SELECT policies, regardless of the query command type, + * for example in UPDATE t1 ... FROM t2 we need to apply t1's UPDATE + * policies and t2's SELECT policies. + */ rel = heap_open(rte->relid, NoLock); + if (rt_index != root->resultRelation) + commandType = CMD_SELECT; rowsec_policies = pull_row_security_policies(commandType, rel, user_id); diff --git a/src/test/regress/expected/rowsecurity.out b/src/test/regress/expected/rowsecurity.out index b0556c2ff1f47..6fc80af30ee43 100644 --- a/src/test/regress/expected/rowsecurity.out +++ b/src/test/regress/expected/rowsecurity.out @@ -3033,6 +3033,89 @@ CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allow ERROR: aggregate functions are not allowed in policy expressions ROLLBACK; -- +-- Non-target relations are only subject to SELECT policies +-- +SET SESSION AUTHORIZATION rls_regress_user0; +CREATE TABLE r1 (a int); +CREATE TABLE r2 (a int); +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); +GRANT ALL ON r1, r2 TO rls_regress_user1; +CREATE POLICY p1 ON r1 USING (true); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; +CREATE POLICY p1 ON r2 FOR SELECT USING (true); +CREATE POLICY p2 ON r2 FOR INSERT WITH CHECK (false); +CREATE POLICY p3 ON r2 FOR UPDATE USING (false); +CREATE POLICY p4 ON r2 FOR DELETE USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; +SET SESSION AUTHORIZATION rls_regress_user1; +SELECT * FROM r1; + a +---- + 10 + 20 +(2 rows) + +SELECT * FROM r2; + a +---- + 10 + 20 +(2 rows) + +-- r2 is read-only +INSERT INTO r2 VALUES (2); -- Not allowed +ERROR: new row violates row level security policy for "r2" +UPDATE r2 SET a = 2 RETURNING *; -- Updates nothing + a +--- +(0 rows) + +DELETE FROM r2 RETURNING *; -- Deletes nothing + a +--- +(0 rows) + +-- r2 can be used as a non-target relation in DML +INSERT INTO r1 SELECT a + 1 FROM r2 RETURNING *; -- OK + a +---- + 11 + 21 +(2 rows) + +UPDATE r1 SET a = r2.a + 2 FROM r2 WHERE r1.a = r2.a RETURNING *; -- OK + a | a +----+---- + 12 | 10 + 22 | 20 +(2 rows) + +DELETE FROM r1 USING r2 WHERE r1.a = r2.a + 2 RETURNING *; -- OK + a | a +----+---- + 12 | 10 + 22 | 20 +(2 rows) + +SELECT * FROM r1; + a +---- + 11 + 21 +(2 rows) + +SELECT * FROM r2; + a +---- + 10 + 20 +(2 rows) + +SET SESSION AUTHORIZATION rls_regress_user0; +DROP TABLE r1; +DROP TABLE r2; +-- -- Clean up objects -- RESET SESSION AUTHORIZATION; diff --git a/src/test/regress/sql/rowsecurity.sql b/src/test/regress/sql/rowsecurity.sql index 300f34ad4bf21..e8c09e9043135 100644 --- a/src/test/regress/sql/rowsecurity.sql +++ b/src/test/regress/sql/rowsecurity.sql @@ -1298,6 +1298,46 @@ CREATE TABLE t (c) AS VALUES ('bar'::text); CREATE POLICY p ON t USING (max(c)); -- fails: aggregate functions are not allowed in policy expressions ROLLBACK; +-- +-- Non-target relations are only subject to SELECT policies +-- +SET SESSION AUTHORIZATION rls_regress_user0; +CREATE TABLE r1 (a int); +CREATE TABLE r2 (a int); +INSERT INTO r1 VALUES (10), (20); +INSERT INTO r2 VALUES (10), (20); + +GRANT ALL ON r1, r2 TO rls_regress_user1; + +CREATE POLICY p1 ON r1 USING (true); +ALTER TABLE r1 ENABLE ROW LEVEL SECURITY; + +CREATE POLICY p1 ON r2 FOR SELECT USING (true); +CREATE POLICY p2 ON r2 FOR INSERT WITH CHECK (false); +CREATE POLICY p3 ON r2 FOR UPDATE USING (false); +CREATE POLICY p4 ON r2 FOR DELETE USING (false); +ALTER TABLE r2 ENABLE ROW LEVEL SECURITY; + +SET SESSION AUTHORIZATION rls_regress_user1; +SELECT * FROM r1; +SELECT * FROM r2; + +-- r2 is read-only +INSERT INTO r2 VALUES (2); -- Not allowed +UPDATE r2 SET a = 2 RETURNING *; -- Updates nothing +DELETE FROM r2 RETURNING *; -- Deletes nothing + +-- r2 can be used as a non-target relation in DML +INSERT INTO r1 SELECT a + 1 FROM r2 RETURNING *; -- OK +UPDATE r1 SET a = r2.a + 2 FROM r2 WHERE r1.a = r2.a RETURNING *; -- OK +DELETE FROM r1 USING r2 WHERE r1.a = r2.a + 2 RETURNING *; -- OK +SELECT * FROM r1; +SELECT * FROM r2; + +SET SESSION AUTHORIZATION rls_regress_user0; +DROP TABLE r1; +DROP TABLE r2; + -- -- Clean up objects -- From 7be60a2459135199f8edff7f553b6d551729d79f Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Thu, 30 Jul 2015 10:16:49 -0700 Subject: [PATCH 137/442] Improve CREATE FUNCTION doc WRT to LEAKPROOF RLS interaction. Patch by Dean Rasheed. Back-patched to 9.5 where RLS was introduced. --- doc/src/sgml/ref/create_function.sgml | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/doc/src/sgml/ref/create_function.sgml b/doc/src/sgml/ref/create_function.sgml index c5beb166cfaf2..cc2098c442068 100644 --- a/doc/src/sgml/ref/create_function.sgml +++ b/doc/src/sgml/ref/create_function.sgml @@ -350,9 +350,18 @@ CREATE [ OR REPLACE ] FUNCTION effects. It reveals no information about its arguments other than by its return value. For example, a function which throws an error message for some argument values but not others, or which includes the argument - values in any error message, is not leakproof. The query planner may - push leakproof functions (but not others) into views created with the - security_barrier option. See + values in any error message, is not leakproof. This affects how the + system executes queries against views created with the + security_barrier option or tables with row level + security enabled. The system will enforce conditions from security + policies and security barrier views before any user-supplied conditions + from the query itself that contain non-leakproof functions, in order to + prevent the inadvertent exposure of data. Functions and operators + marked as leakproof are assumed to be trustworthy, and may be executed + before conditions from security policies and security barrier views. + In addtion, functions which do not take arguments or which are not + passed any arguments from the security barrier view or table do not have + to be marked as leakproof to be executed before security conditions. See and . This option can only be set by the superuser. From 244c378e243e3649efc99fe96ec9f123bbe9ffbc Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Thu, 30 Jul 2015 15:07:19 -0300 Subject: [PATCH 138/442] Fix broken assertion in BRIN code The code was assuming that any NULL value in scan keys was due to IS NULL or IS NOT NULL, but it turns out to be possible to get them with other operators too, if they are used in contrived-enough ways. Easiest way out of the problem seems to check explicitely for the IS NOT NULL flag, instead of assuming it must be set if the IS NULL flag is not set, when a null scan key is found; if neither flag is set, follow the lead of other index AMs and assume that all indexable operators must be strict, and thus the query is never satisfiable. Also, add a comment to try and lure some future hacker into improving analysis of scan keys in brin. Per report from Andreas Seltenreich; diagnosis by Tom Lane. Backpatch to 9.5. Discussion: http://www.postgresql.org/message-id/20646.1437919632@sss.pgh.pa.us --- src/backend/access/brin/brin.c | 8 ++++++++ src/backend/access/brin/brin_inclusion.c | 10 ++++++++-- src/backend/access/brin/brin_minmax.c | 10 ++++++++-- 3 files changed, 24 insertions(+), 4 deletions(-) diff --git a/src/backend/access/brin/brin.c b/src/backend/access/brin/brin.c index 268a55e71f93a..360b26e6fc470 100644 --- a/src/backend/access/brin/brin.c +++ b/src/backend/access/brin/brin.c @@ -464,6 +464,14 @@ brinrescan(PG_FUNCTION_ARGS) /* other arguments ignored */ + /* + * Other index AMs preprocess the scan keys at this point, or sometime + * early during the scan; this lets them optimize by removing redundant + * keys, or doing early returns when they are impossible to satisfy; see + * _bt_preprocess_keys for an example. Something like that could be added + * here someday, too. + */ + if (scankey && scan->numberOfKeys > 0) memmove(scan->keyData, scankey, scan->numberOfKeys * sizeof(ScanKeyData)); diff --git a/src/backend/access/brin/brin_inclusion.c b/src/backend/access/brin/brin_inclusion.c index 803b07f10a913..926487ec0390b 100644 --- a/src/backend/access/brin/brin_inclusion.c +++ b/src/backend/access/brin/brin_inclusion.c @@ -276,8 +276,14 @@ brin_inclusion_consistent(PG_FUNCTION_ARGS) * For IS NOT NULL, we can only skip ranges that are known to have * only nulls. */ - Assert(key->sk_flags & SK_SEARCHNOTNULL); - PG_RETURN_BOOL(!column->bv_allnulls); + if (key->sk_flags & SK_SEARCHNOTNULL) + PG_RETURN_BOOL(!column->bv_allnulls); + + /* + * Neither IS NULL nor IS NOT NULL was used; assume all indexable + * operators are strict and return false. + */ + PG_RETURN_BOOL(false); } /* If it is all nulls, it cannot possibly be consistent. */ diff --git a/src/backend/access/brin/brin_minmax.c b/src/backend/access/brin/brin_minmax.c index 7cd98887c0ffe..2cc6e41e5f78c 100644 --- a/src/backend/access/brin/brin_minmax.c +++ b/src/backend/access/brin/brin_minmax.c @@ -174,8 +174,14 @@ brin_minmax_consistent(PG_FUNCTION_ARGS) * For IS NOT NULL, we can only skip ranges that are known to have * only nulls. */ - Assert(key->sk_flags & SK_SEARCHNOTNULL); - PG_RETURN_BOOL(!column->bv_allnulls); + if (key->sk_flags & SK_SEARCHNOTNULL) + PG_RETURN_BOOL(!column->bv_allnulls); + + /* + * Neither IS NULL nor IS NOT NULL was used; assume all indexable + * operators are strict and return false. + */ + PG_RETURN_BOOL(false); } /* if the range is all empty, it cannot possibly be consistent */ From 71b66e78e432d99325db6356f056cb3f03b3d7b7 Mon Sep 17 00:00:00 2001 From: Alvaro Herrera Date: Thu, 30 Jul 2015 15:19:49 -0300 Subject: [PATCH 139/442] Fix volatility marking of commit timestamp functions They are marked stable, but since they act on instantaneous state and it is possible to consult state of transactions as they commit, the results could change mid-query. They need to be marked volatile, and this commit does so. There would normally be a catversion bump here, but this is so much a niche feature and I don't believe there's real damage from the incorrect marking, that I refrained. Backpatch to 9.5, where commit timestamps where introduced. Per note from Fujii Masao. --- src/include/catalog/pg_proc.h | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/include/catalog/pg_proc.h b/src/include/catalog/pg_proc.h index 9a27399c5128e..c72fb933066d0 100644 --- a/src/include/catalog/pg_proc.h +++ b/src/include/catalog/pg_proc.h @@ -3080,10 +3080,10 @@ DESCR("view two-phase transactions"); DATA(insert OID = 3819 ( pg_get_multixact_members PGNSP PGUID 12 1 1000 0 0 f f f f t t v 1 0 2249 "28" "{28,28,25}" "{i,o,o}" "{multixid,xid,mode}" _null_ _null_ pg_get_multixact_members _null_ _null_ _null_ )); DESCR("view members of a multixactid"); -DATA(insert OID = 3581 ( pg_xact_commit_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f s 1 0 1184 "28" _null_ _null_ _null_ _null_ _null_ pg_xact_commit_timestamp _null_ _null_ _null_ )); +DATA(insert OID = 3581 ( pg_xact_commit_timestamp PGNSP PGUID 12 1 0 0 0 f f f f t f v 1 0 1184 "28" _null_ _null_ _null_ _null_ _null_ pg_xact_commit_timestamp _null_ _null_ _null_ )); DESCR("get commit timestamp of a transaction"); -DATA(insert OID = 3583 ( pg_last_committed_xact PGNSP PGUID 12 1 0 0 0 f f f f t f s 0 0 2249 "" "{28,1184}" "{o,o}" "{xid,timestamp}" _null_ _null_ pg_last_committed_xact _null_ _null_ _null_ )); +DATA(insert OID = 3583 ( pg_last_committed_xact PGNSP PGUID 12 1 0 0 0 f f f f t f v 0 0 2249 "" "{28,1184}" "{o,o}" "{xid,timestamp}" _null_ _null_ pg_last_committed_xact _null_ _null_ _null_ )); DESCR("get transaction Id and commit timestamp of latest transaction commit"); DATA(insert OID = 3537 ( pg_describe_object PGNSP PGUID 12 1 0 0 0 f f f f t f s 3 0 25 "26 26 23" _null_ _null_ _null_ _null_ _null_ pg_describe_object _null_ _null_ _null_ )); From c7446194fa8fbbb9e8d948668bb47563ab58f45f Mon Sep 17 00:00:00 2001 From: Noah Misch Date: Thu, 30 Jul 2015 20:48:41 -0400 Subject: [PATCH 140/442] Consolidate makefile code for setting top_srcdir, srcdir and VPATH. Responsibility was formerly split between Makefile.global and pgxs.mk. As a result of commit b58233c71b93a32fcab7219585cafc25a27eb769, in the PGXS case, these variables were unset while parsing Makefile.global and callees. Inclusion of Makefile.custom did not work from PGXS, and the subtle difference seemed like a recipe for future bugs. Back-patch to 9.4, where that commit first appeared. --- src/Makefile.global.in | 21 ++++++++++++++++++--- src/makefiles/pgxs.mk | 15 --------------- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/src/Makefile.global.in b/src/Makefile.global.in index e87885bb2b039..e2c8aeacd40bb 100644 --- a/src/Makefile.global.in +++ b/src/Makefile.global.in @@ -40,9 +40,24 @@ VERSION = @PACKAGE_VERSION@ MAJORVERSION = @PG_MAJORVERSION@ VERSION_NUM = @PG_VERSION_NUM@ -# Support for VPATH builds -# (PGXS VPATH support is handled separately in pgxs.mk) -ifndef PGXS +# Set top_srcdir, srcdir, and VPATH. +ifdef PGXS +top_srcdir = $(top_builddir) + +# If VPATH is set or Makefile is not in current directory we are building +# the extension with VPATH so we set the variable here. +ifdef VPATH +srcdir = $(VPATH) +else +ifeq ($(CURDIR),$(dir $(firstword $(MAKEFILE_LIST)))) +srcdir = . +VPATH = +else +srcdir = $(dir $(firstword $(MAKEFILE_LIST))) +VPATH = $(srcdir) +endif +endif +else # not PGXS vpath_build = @vpath_build@ abs_top_builddir = @abs_top_builddir@ abs_top_srcdir = @abs_top_srcdir@ diff --git a/src/makefiles/pgxs.mk b/src/makefiles/pgxs.mk index b5113726841ae..b6874ced91c75 100644 --- a/src/makefiles/pgxs.mk +++ b/src/makefiles/pgxs.mk @@ -62,21 +62,6 @@ ifdef PGXS top_builddir := $(dir $(PGXS))../.. include $(top_builddir)/src/Makefile.global -top_srcdir = $(top_builddir) -# If VPATH is set or Makefile is not in current directory we are building -# the extension with VPATH so we set the variable here. -ifdef VPATH -srcdir = $(VPATH) -else -ifeq ($(CURDIR),$(dir $(firstword $(MAKEFILE_LIST)))) -srcdir = . -VPATH = -else -srcdir = $(dir $(firstword $(MAKEFILE_LIST))) -VPATH = $(srcdir) -endif -endif - # These might be set in Makefile.global, but if they were not found # during the build of PostgreSQL, supply default values so that users # of pgxs can use the variables. From edf26ed033f18bddc9bfe5c239388330150766a1 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Fri, 31 Jul 2015 19:26:33 -0400 Subject: [PATCH 141/442] Fix an oversight in checking whether a join with LATERAL refs is legal. In many cases, we can implement a semijoin as a plain innerjoin by first passing the righthand-side relation through a unique-ification step. However, one of the cases where this does NOT work is where the RHS has a LATERAL reference to the LHS; that makes the RHS dependent on the LHS so that unique-ification is meaningless. joinpath.c understood this, and so would not generate any join paths of this kind ... but join_is_legal neglected to check for the case, so it would think that we could do it. The upshot would be a "could not devise a query plan for the given query" failure once we had failed to generate any join paths at all for the bogus join pair. Back-patch to 9.3 where LATERAL was added. --- src/backend/optimizer/path/joinrels.c | 8 ++++-- src/test/regress/expected/join.out | 35 +++++++++++++++++++++++++++ src/test/regress/sql/join.sql | 13 ++++++++++ 3 files changed, 54 insertions(+), 2 deletions(-) diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index fe9fd57317429..b6c9494fed67f 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -536,7 +536,9 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, if (!bms_is_subset(ljinfo->lateral_lhs, rel1->relids)) return false; /* rel1 can't compute the required parameter */ if (match_sjinfo && - (reversed || match_sjinfo->jointype == JOIN_FULL)) + (reversed || + unique_ified || + match_sjinfo->jointype == JOIN_FULL)) return false; /* not implementable as nestloop */ } if (bms_is_subset(ljinfo->lateral_rhs, rel1->relids) && @@ -549,7 +551,9 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, if (!bms_is_subset(ljinfo->lateral_lhs, rel2->relids)) return false; /* rel2 can't compute the required parameter */ if (match_sjinfo && - (!reversed || match_sjinfo->jointype == JOIN_FULL)) + (!reversed || + unique_ified || + match_sjinfo->jointype == JOIN_FULL)) return false; /* not implementable as nestloop */ } } diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 1afd0c328b5b8..10336d48a3adb 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -4527,6 +4527,41 @@ select * from Output: 3 (11 rows) +-- check we don't try to do a unique-ified semijoin with LATERAL +explain (verbose, costs off) +select * from + (values (0,9998), (1,1000)) v(id,x), + lateral (select f1 from int4_tbl + where f1 = any (select unique1 from tenk1 + where unique2 = v.x offset 0)) ss; + QUERY PLAN +---------------------------------------------------------------------- + Nested Loop + Output: "*VALUES*".column1, "*VALUES*".column2, int4_tbl.f1 + -> Values Scan on "*VALUES*" + Output: "*VALUES*".column1, "*VALUES*".column2 + -> Nested Loop Semi Join + Output: int4_tbl.f1 + Join Filter: (int4_tbl.f1 = tenk1.unique1) + -> Seq Scan on public.int4_tbl + Output: int4_tbl.f1 + -> Materialize + Output: tenk1.unique1 + -> Index Scan using tenk1_unique2 on public.tenk1 + Output: tenk1.unique1 + Index Cond: (tenk1.unique2 = "*VALUES*".column2) +(14 rows) + +select * from + (values (0,9998), (1,1000)) v(id,x), + lateral (select f1 from int4_tbl + where f1 = any (select unique1 from tenk1 + where unique2 = v.x offset 0)) ss; + id | x | f1 +----+------+---- + 0 | 9998 | 0 +(1 row) + -- test some error cases where LATERAL should have been used but wasn't select f1,g from int4_tbl a, (select f1 as g) ss; ERROR: column "f1" does not exist diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index d34cefac5a18f..7553aefc6bd72 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -1365,6 +1365,19 @@ select * from select * from (select 3 as z offset 0) z where z.z = x.x ) zz on zz.z = y.y; +-- check we don't try to do a unique-ified semijoin with LATERAL +explain (verbose, costs off) +select * from + (values (0,9998), (1,1000)) v(id,x), + lateral (select f1 from int4_tbl + where f1 = any (select unique1 from tenk1 + where unique2 = v.x offset 0)) ss; +select * from + (values (0,9998), (1,1000)) v(id,x), + lateral (select f1 from int4_tbl + where f1 = any (select unique1 from tenk1 + where unique2 = v.x offset 0)) ss; + -- test some error cases where LATERAL should have been used but wasn't select f1,g from int4_tbl a, (select f1 as g) ss; select f1,g from int4_tbl a, (select a.f1 as g) ss; From 8dccf030e884ea8c723275a070acf8a8ed1eebe1 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 1 Aug 2015 14:31:46 -0400 Subject: [PATCH 142/442] Teach predtest.c that "foo" implies "foo IS NOT NULL". Per complaint from Peter Holzer. It's useful to cover this special case, since for a boolean variable "foo", earlier parts of the planner will have reduced variants like "foo = true" to just "foo", and thus we may fail to recognize the applicability of a partial index with predicate "foo IS NOT NULL". Back-patch to 9.5, but not further; given the lack of previous complaints this doesn't seem like behavior to change in stable branches. --- src/backend/optimizer/util/predtest.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/backend/optimizer/util/predtest.c b/src/backend/optimizer/util/predtest.c index d9e49d127e1a0..7e86ca974be27 100644 --- a/src/backend/optimizer/util/predtest.c +++ b/src/backend/optimizer/util/predtest.c @@ -1028,6 +1028,8 @@ arrayexpr_cleanup_fn(PredIterInfo info) * "foo" is NULL, which we can take as equivalent to FALSE because we know * we are within an AND/OR subtree of a WHERE clause. (Again, "foo" is * already known immutable, so the clause will certainly always fail.) + * Also, if the clause is just "foo" (meaning it's a boolean variable), + * the predicate is implied since the clause can't be true if "foo" is NULL. * * Finally, if both clauses are binary operator expressions, we may be able * to prove something using the system's knowledge about operators; those @@ -1061,6 +1063,8 @@ predicate_implied_by_simple_clause(Expr *predicate, Node *clause) list_member_strip(((FuncExpr *) clause)->args, nonnullarg) && func_strict(((FuncExpr *) clause)->funcid)) return true; + if (equal(clause, nonnullarg)) + return true; } return false; /* we can't succeed below... */ } From 7968238eb17ed5f2f1123271549b7921fa1d3aba Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sat, 1 Aug 2015 20:57:41 -0400 Subject: [PATCH 143/442] Fix some planner issues with degenerate outer join clauses. An outer join clause that didn't actually reference the RHS (perhaps only after constant-folding) could confuse the join order enforcement logic, leading to wrong query results. Also, nested occurrences of such things could trigger an Assertion that on reflection seems incorrect. Per fuzz testing by Andreas Seltenreich. The practical use of such cases seems thin enough that it's not too surprising we've not heard field reports about it. This has been broken for a long time, so back-patch to all active branches. --- src/backend/optimizer/path/joinrels.c | 26 +++-- src/backend/optimizer/plan/initsplan.c | 18 +++- src/test/regress/expected/join.out | 129 +++++++++++++++++++++++++ src/test/regress/sql/join.sql | 50 ++++++++++ 4 files changed, 211 insertions(+), 12 deletions(-) diff --git a/src/backend/optimizer/path/joinrels.c b/src/backend/optimizer/path/joinrels.c index b6c9494fed67f..02c7c5ea864b6 100644 --- a/src/backend/optimizer/path/joinrels.c +++ b/src/backend/optimizer/path/joinrels.c @@ -467,20 +467,26 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, } else { + /* + * Otherwise, the proposed join overlaps the RHS but isn't a valid + * implementation of this SJ. It might still be a legal join, + * however, if it does not overlap the LHS. + */ + if (bms_overlap(joinrelids, sjinfo->min_lefthand)) + return false; + /*---------- - * Otherwise, the proposed join overlaps the RHS but isn't - * a valid implementation of this SJ. It might still be - * a legal join, however. If both inputs overlap the RHS, - * assume that it's OK. Since the inputs presumably got past - * this function's checks previously, they can't overlap the - * LHS and their violations of the RHS boundary must represent - * SJs that have been determined to commute with this one. + * If both inputs overlap the RHS, assume that it's OK. Since the + * inputs presumably got past this function's checks previously, + * their violations of the RHS boundary must represent SJs that + * have been determined to commute with this one. * We have to allow this to work correctly in cases like * (a LEFT JOIN (b JOIN (c LEFT JOIN d))) * when the c/d join has been determined to commute with the join * to a, and hence d is not part of min_righthand for the upper * join. It should be legal to join b to c/d but this will appear * as a violation of the upper join's RHS. + * * Furthermore, if one input overlaps the RHS and the other does * not, we should still allow the join if it is a valid * implementation of some other SJ. We have to allow this to @@ -496,11 +502,13 @@ join_is_legal(PlannerInfo *root, RelOptInfo *rel1, RelOptInfo *rel2, bms_overlap(rel1->relids, sjinfo->min_righthand) && bms_overlap(rel2->relids, sjinfo->min_righthand)) { - /* seems OK */ - Assert(!bms_overlap(joinrelids, sjinfo->min_lefthand)); + /* both overlap; assume OK */ } else + { + /* one overlaps, the other doesn't (or it's a semijoin) */ is_valid_inner = false; + } } } diff --git a/src/backend/optimizer/plan/initsplan.c b/src/backend/optimizer/plan/initsplan.c index 701b99254db0d..40a867c260782 100644 --- a/src/backend/optimizer/plan/initsplan.c +++ b/src/backend/optimizer/plan/initsplan.c @@ -1128,6 +1128,20 @@ make_outerjoininfo(PlannerInfo *root, min_righthand = bms_int_members(bms_union(clause_relids, inner_join_rels), right_rels); + /* + * If we have a degenerate join clause that doesn't mention any RHS rels, + * force the min RHS to be the syntactic RHS; otherwise we can end up + * making serious errors, like putting the LHS on the wrong side of an + * outer join. It seems to be safe to not do this when we have a + * contribution from inner_join_rels, though; that's enough to pin the SJ + * to occur at a reasonable place in the tree. + */ + if (bms_is_empty(min_righthand)) + min_righthand = bms_copy(right_rels); + + /* + * Now check previous outer joins for ordering restrictions. + */ foreach(l, root->join_info_list) { SpecialJoinInfo *otherinfo = (SpecialJoinInfo *) lfirst(l); @@ -1224,12 +1238,10 @@ make_outerjoininfo(PlannerInfo *root, * If we found nothing to put in min_lefthand, punt and make it the full * LHS, to avoid having an empty min_lefthand which will confuse later * processing. (We don't try to be smart about such cases, just correct.) - * Likewise for min_righthand. + * We already forced min_righthand nonempty, so nothing to do for that. */ if (bms_is_empty(min_lefthand)) min_lefthand = bms_copy(left_rels); - if (bms_is_empty(min_righthand)) - min_righthand = bms_copy(right_rels); /* Now they'd better be nonempty */ Assert(!bms_is_empty(min_lefthand)); diff --git a/src/test/regress/expected/join.out b/src/test/regress/expected/join.out index 10336d48a3adb..4832bc3047d3f 100644 --- a/src/test/regress/expected/join.out +++ b/src/test/regress/expected/join.out @@ -3293,6 +3293,135 @@ using (join_key); 1 | | (2 rows) +-- +-- test successful handling of nested outer joins with degenerate join quals +-- +explain (verbose, costs off) +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + QUERY PLAN +---------------------------------------------------------------------- + Hash Left Join + Output: t1.f1 + Hash Cond: (i8.q2 = i4.f1) + -> Nested Loop Left Join + Output: t1.f1, i8.q2 + Join Filter: (t1.f1 = '***'::text) + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + -> Materialize + Output: i8.q2 + -> Hash Right Join + Output: i8.q2 + Hash Cond: ((NULL::integer) = i8b1.q2) + -> Hash Left Join + Output: i8.q2, (NULL::integer) + Hash Cond: (i8.q1 = i8b2.q1) + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + -> Hash + Output: i8b2.q1, (NULL::integer) + -> Seq Scan on public.int8_tbl i8b2 + Output: i8b2.q1, NULL::integer + -> Hash + Output: i8b1.q2 + -> Seq Scan on public.int8_tbl i8b1 + Output: i8b1.q2 + -> Hash + Output: i4.f1 + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 +(30 rows) + +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + f1 +------------------- + doh! + hi de ho neighbor +(2 rows) + +explain (verbose, costs off) +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + QUERY PLAN +---------------------------------------------------------------------------- + Hash Left Join + Output: t1.f1 + Hash Cond: (i8.q2 = i4.f1) + -> Nested Loop Left Join + Output: i8.q2, t1.f1 + Join Filter: (t1.f1 = '***'::text) + -> Seq Scan on public.text_tbl t1 + Output: t1.f1 + -> Materialize + Output: i8.q2 + -> Hash Right Join + Output: i8.q2 + Hash Cond: ((NULL::integer) = i8b1.q2) + -> Hash Right Join + Output: i8.q2, (NULL::integer) + Hash Cond: (i8b2.q1 = i8.q1) + -> Nested Loop + Output: i8b2.q1, NULL::integer + -> Seq Scan on public.int8_tbl i8b2 + Output: i8b2.q1, i8b2.q2 + -> Materialize + -> Seq Scan on public.int4_tbl i4b2 + -> Hash + Output: i8.q1, i8.q2 + -> Seq Scan on public.int8_tbl i8 + Output: i8.q1, i8.q2 + -> Hash + Output: i8b1.q2 + -> Seq Scan on public.int8_tbl i8b1 + Output: i8b1.q2 + -> Hash + Output: i4.f1 + -> Seq Scan on public.int4_tbl i4 + Output: i4.f1 +(34 rows) + +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + f1 +------------------- + doh! + hi de ho neighbor +(2 rows) + -- -- test ability to push constants through outer join clauses -- diff --git a/src/test/regress/sql/join.sql b/src/test/regress/sql/join.sql index 7553aefc6bd72..9a1e22a197ee4 100644 --- a/src/test/regress/sql/join.sql +++ b/src/test/regress/sql/join.sql @@ -984,6 +984,56 @@ left join ) foo3 using (join_key); +-- +-- test successful handling of nested outer joins with degenerate join quals +-- + +explain (verbose, costs off) +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + +explain (verbose, costs off) +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + +select t1.* from + text_tbl t1 + left join (select *, '***'::text as d1 from int8_tbl i8b1) b1 + left join int8_tbl i8 + left join (select *, null::int as d2 from int8_tbl i8b2, int4_tbl i4b2) b2 + on (i8.q1 = b2.q1) + on (b2.d2 = b1.q2) + on (t1.f1 = b1.d1) + left join int4_tbl i4 + on (i8.q2 = i4.f1); + -- -- test ability to push constants through outer join clauses -- From 27b719173516b54df63a1bba4266798e9f77bbb9 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Fri, 31 Jul 2015 20:20:43 +0200 Subject: [PATCH 144/442] Fix issues around the "variable" support in the lwlock infrastructure. The lwlock scalability work introduced two race conditions into the lwlock variable support provided for xlog.c. First, and harmlessly on most platforms, it set/read the variable without the spinlock in some places. Secondly, due to the removal of the spinlock, it was possible that a backend missed changes to the variable's state if it changed in the wrong moment because checking the lock's state, the variable's state and the queuing are not protected by a single spinlock acquisition anymore. To fix first move resetting the variable's from LWLockAcquireWithVar to WALInsertLockRelease, via a new function LWLockReleaseClearVar. That prevents issues around waiting for a variable's value to change when a new locker has acquired the lock, but not yet set the value. Secondly re-check that the variable hasn't changed after enqueing, that prevents the issue that the lock has been released and already re-acquired by the time the woken up backend checks for the lock's state. Reported-By: Jeff Janes Analyzed-By: Heikki Linnakangas Reviewed-By: Heikki Linnakangas Discussion: 5592DB35.2060401@iki.fi Backpatch: 9.5, where the lwlock scalability went in --- src/backend/access/transam/xlog.c | 34 +++--- src/backend/storage/lmgr/lwlock.c | 193 +++++++++++++++++------------- src/include/storage/lwlock.h | 2 +- 3 files changed, 129 insertions(+), 100 deletions(-) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 1dd31b37ffe06..939813e7b7177 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -1408,9 +1408,7 @@ WALInsertLockAcquire(void) * The insertingAt value is initially set to 0, as we don't know our * insert location yet. */ - immed = LWLockAcquireWithVar(&WALInsertLocks[MyLockNo].l.lock, - &WALInsertLocks[MyLockNo].l.insertingAt, - 0); + immed = LWLockAcquire(&WALInsertLocks[MyLockNo].l.lock, LW_EXCLUSIVE); if (!immed) { /* @@ -1435,26 +1433,28 @@ WALInsertLockAcquireExclusive(void) int i; /* - * When holding all the locks, we only update the last lock's insertingAt - * indicator. The others are set to 0xFFFFFFFFFFFFFFFF, which is higher - * than any real XLogRecPtr value, to make sure that no-one blocks waiting - * on those. + * When holding all the locks, all but the last lock's insertingAt + * indicator is set to 0xFFFFFFFFFFFFFFFF, which is higher than any real + * XLogRecPtr value, to make sure that no-one blocks waiting on those. */ for (i = 0; i < NUM_XLOGINSERT_LOCKS - 1; i++) { - LWLockAcquireWithVar(&WALInsertLocks[i].l.lock, - &WALInsertLocks[i].l.insertingAt, - PG_UINT64_MAX); + LWLockAcquire(&WALInsertLocks[i].l.lock, LW_EXCLUSIVE); + LWLockUpdateVar(&WALInsertLocks[i].l.lock, + &WALInsertLocks[i].l.insertingAt, + PG_UINT64_MAX); } - LWLockAcquireWithVar(&WALInsertLocks[i].l.lock, - &WALInsertLocks[i].l.insertingAt, - 0); + /* Variable value reset to 0 at release */ + LWLockAcquire(&WALInsertLocks[i].l.lock, LW_EXCLUSIVE); holdingAllLocks = true; } /* * Release our insertion lock (or locks, if we're holding them all). + * + * NB: Reset all variables to 0, so they cause LWLockWaitForVar to block the + * next time the lock is acquired. */ static void WALInsertLockRelease(void) @@ -1464,13 +1464,17 @@ WALInsertLockRelease(void) int i; for (i = 0; i < NUM_XLOGINSERT_LOCKS; i++) - LWLockRelease(&WALInsertLocks[i].l.lock); + LWLockReleaseClearVar(&WALInsertLocks[i].l.lock, + &WALInsertLocks[i].l.insertingAt, + 0); holdingAllLocks = false; } else { - LWLockRelease(&WALInsertLocks[MyLockNo].l.lock); + LWLockReleaseClearVar(&WALInsertLocks[MyLockNo].l.lock, + &WALInsertLocks[MyLockNo].l.insertingAt, + 0); } } diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index e5566d1b60969..ae03eb14196c8 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -10,13 +10,15 @@ * locking should be done with the full lock manager --- which depends on * LWLocks to protect its shared state. * - * In addition to exclusive and shared modes, lightweight locks can be used - * to wait until a variable changes value. The variable is initially set - * when the lock is acquired with LWLockAcquireWithVar, and can be updated + * In addition to exclusive and shared modes, lightweight locks can be used to + * wait until a variable changes value. The variable is initially not set + * when the lock is acquired with LWLockAcquire, i.e. it remains set to the + * value it was set to when the lock was released last, and can be updated * without releasing the lock by calling LWLockUpdateVar. LWLockWaitForVar - * waits for the variable to be updated, or until the lock is free. The - * meaning of the variable is up to the caller, the lightweight lock code - * just assigns and compares it. + * waits for the variable to be updated, or until the lock is free. When + * releasing the lock with LWLockReleaseClearVar() the value can be set to an + * appropriate value for a free lock. The meaning of the variable is up to + * the caller, the lightweight lock code just assigns and compares it. * * Portions Copyright (c) 1996-2015, PostgreSQL Global Development Group * Portions Copyright (c) 1994, Regents of the University of California @@ -150,9 +152,6 @@ static LWLockHandle held_lwlocks[MAX_SIMUL_LWLOCKS]; static int lock_addin_request = 0; static bool lock_addin_request_allowed = true; -static inline bool LWLockAcquireCommon(LWLock *l, LWLockMode mode, - uint64 *valptr, uint64 val); - #ifdef LWLOCK_STATS typedef struct lwlock_stats_key { @@ -899,25 +898,7 @@ LWLockDequeueSelf(LWLock *lock) * Side effect: cancel/die interrupts are held off until lock release. */ bool -LWLockAcquire(LWLock *l, LWLockMode mode) -{ - return LWLockAcquireCommon(l, mode, NULL, 0); -} - -/* - * LWLockAcquireWithVar - like LWLockAcquire, but also sets *valptr = val - * - * The lock is always acquired in exclusive mode with this function. - */ -bool -LWLockAcquireWithVar(LWLock *l, uint64 *valptr, uint64 val) -{ - return LWLockAcquireCommon(l, LW_EXCLUSIVE, valptr, val); -} - -/* internal function to implement LWLockAcquire and LWLockAcquireWithVar */ -static inline bool -LWLockAcquireCommon(LWLock *lock, LWLockMode mode, uint64 *valptr, uint64 val) +LWLockAcquire(LWLock *lock, LWLockMode mode) { PGPROC *proc = MyProc; bool result = true; @@ -1064,10 +1045,6 @@ LWLockAcquireCommon(LWLock *lock, LWLockMode mode, uint64 *valptr, uint64 val) result = false; } - /* If there's a variable associated with this lock, initialize it */ - if (valptr) - *valptr = val; - TRACE_POSTGRESQL_LWLOCK_ACQUIRE(T_NAME(lock), T_ID(lock), mode); /* Add lock to list of locks held by this backend */ @@ -1258,6 +1235,71 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode) return !mustwait; } +/* + * Does the lwlock in its current state need to wait for the variable value to + * change? + * + * If we don't need to wait, and it's because the value of the variable has + * changed, store the current value in newval. + * + * *result is set to true if the lock was free, and false otherwise. + */ +static bool +LWLockConflictsWithVar(LWLock *lock, + uint64 *valptr, uint64 oldval, uint64 *newval, + bool *result) +{ + bool mustwait; + uint64 value; +#ifdef LWLOCK_STATS + lwlock_stats *lwstats; + + lwstats = get_lwlock_stats_entry(lock); +#endif + + /* + * Test first to see if it the slot is free right now. + * + * XXX: the caller uses a spinlock before this, so we don't need a memory + * barrier here as far as the current usage is concerned. But that might + * not be safe in general. + */ + mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0; + + if (!mustwait) + { + *result = true; + return false; + } + + *result = false; + + /* + * Read value using spinlock as we can't rely on atomic 64 bit + * reads/stores. TODO: On platforms with a way to do atomic 64 bit + * reads/writes the spinlock could be optimized away. + */ +#ifdef LWLOCK_STATS + lwstats->spin_delay_count += SpinLockAcquire(&lock->mutex); +#else + SpinLockAcquire(&lock->mutex); +#endif + value = *valptr; + SpinLockRelease(&lock->mutex); + + if (value != oldval) + { + mustwait = false; + *newval = value; + } + else + { + mustwait = true; + } + + return mustwait; +} + /* * LWLockWaitForVar - Wait until lock is free, or a variable is updated. * @@ -1268,11 +1310,6 @@ LWLockAcquireOrWait(LWLock *lock, LWLockMode mode) * matches oldval, returns false and sets *newval to the current value in * *valptr. * - * It's possible that the lock holder releases the lock, but another backend - * acquires it again before we get a chance to observe that the lock was - * momentarily released. We wouldn't need to wait for the new lock holder, - * but we cannot distinguish that case, so we will have to wait. - * * Note: this function ignores shared lock holders; if the lock is held * in shared mode, returns 'true'. */ @@ -1290,16 +1327,6 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval) PRINT_LWDEBUG("LWLockWaitForVar", lock, LW_WAIT_UNTIL_FREE); - /* - * Quick test first to see if it the slot is free right now. - * - * XXX: the caller uses a spinlock before this, so we don't need a memory - * barrier here as far as the current usage is concerned. But that might - * not be safe in general. - */ - if ((pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) == 0) - return true; - /* * Lock out cancel/die interrupts while we sleep on the lock. There is no * cleanup mechanism to remove us from the wait queue if we got @@ -1313,39 +1340,9 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval) for (;;) { bool mustwait; - uint64 value; - - mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0; - - if (mustwait) - { - /* - * Perform comparison using spinlock as we can't rely on atomic 64 - * bit reads/stores. - */ -#ifdef LWLOCK_STATS - lwstats->spin_delay_count += SpinLockAcquire(&lock->mutex); -#else - SpinLockAcquire(&lock->mutex); -#endif - /* - * XXX: We can significantly optimize this on platforms with 64bit - * atomics. - */ - value = *valptr; - if (value != oldval) - { - result = false; - mustwait = false; - *newval = value; - } - else - mustwait = true; - SpinLockRelease(&lock->mutex); - } - else - mustwait = false; + mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval, + &result); if (!mustwait) break; /* the lock was free or value didn't match */ @@ -1354,7 +1351,9 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval) * Add myself to wait queue. Note that this is racy, somebody else * could wakeup before we're finished queuing. NB: We're using nearly * the same twice-in-a-row lock acquisition protocol as - * LWLockAcquire(). Check its comments for details. + * LWLockAcquire(). Check its comments for details. The only + * difference is that we also have to check the variable's values when + * checking the state of the lock. */ LWLockQueueSelf(lock, LW_WAIT_UNTIL_FREE); @@ -1365,12 +1364,13 @@ LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval) pg_atomic_fetch_or_u32(&lock->state, LW_FLAG_RELEASE_OK); /* - * We're now guaranteed to be woken up if necessary. Recheck the - * lock's state. + * We're now guaranteed to be woken up if necessary. Recheck the lock + * and variables state. */ - mustwait = (pg_atomic_read_u32(&lock->state) & LW_VAL_EXCLUSIVE) != 0; + mustwait = LWLockConflictsWithVar(lock, valptr, oldval, newval, + &result); - /* Ok, lock is free after we queued ourselves. Undo queueing. */ + /* Ok, no conflict after we queued ourselves. Undo queueing. */ if (!mustwait) { LOG_LWDEBUG("LWLockWaitForVar", lock, "free, undoing queue"); @@ -1587,6 +1587,31 @@ LWLockRelease(LWLock *lock) RESUME_INTERRUPTS(); } +/* + * LWLockReleaseClearVar - release a previously acquired lock, reset variable + */ +void +LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val) +{ +#ifdef LWLOCK_STATS + lwlock_stats *lwstats; + + lwstats = get_lwlock_stats_entry(lock); + lwstats->spin_delay_count += SpinLockAcquire(&lock->mutex); +#else + SpinLockAcquire(&lock->mutex); +#endif + /* + * Set the variable's value before releasing the lock, that prevents race + * a race condition wherein a new locker acquires the lock, but hasn't yet + * set the variables value. + */ + *valptr = val; + SpinLockRelease(&lock->mutex); + + LWLockRelease(lock); +} + /* * LWLockReleaseAll - release all currently-held locks diff --git a/src/include/storage/lwlock.h b/src/include/storage/lwlock.h index cff3b9992218e..cbd63184b9b4c 100644 --- a/src/include/storage/lwlock.h +++ b/src/include/storage/lwlock.h @@ -182,10 +182,10 @@ extern bool LWLockAcquire(LWLock *lock, LWLockMode mode); extern bool LWLockConditionalAcquire(LWLock *lock, LWLockMode mode); extern bool LWLockAcquireOrWait(LWLock *lock, LWLockMode mode); extern void LWLockRelease(LWLock *lock); +extern void LWLockReleaseClearVar(LWLock *lock, uint64 *valptr, uint64 val); extern void LWLockReleaseAll(void); extern bool LWLockHeldByMe(LWLock *lock); -extern bool LWLockAcquireWithVar(LWLock *lock, uint64 *valptr, uint64 val); extern bool LWLockWaitForVar(LWLock *lock, uint64 *valptr, uint64 oldval, uint64 *newval); extern void LWLockUpdateVar(LWLock *lock, uint64 *valptr, uint64 value); From 9074e41dbd41bc45ef79aeac1b6496bf087509a7 Mon Sep 17 00:00:00 2001 From: Andres Freund Date: Fri, 31 Jul 2015 20:50:35 +0200 Subject: [PATCH 145/442] Micro optimize LWLockAttemptLock() a bit. LWLockAttemptLock pointlessly read the lock's state in every loop iteration, even though pg_atomic_compare_exchange_u32() returns the old value. Instead do that only once before the loop iteration. Additionally there's no need to have the expected_state variable, old_state mostly had the same value anyway. Noticed-By: Heikki Linnakangas Backpatch: 9.5, no reason to let the branches diverge at this point --- src/backend/storage/lmgr/lwlock.c | 20 ++++++++++++-------- 1 file changed, 12 insertions(+), 8 deletions(-) diff --git a/src/backend/storage/lmgr/lwlock.c b/src/backend/storage/lmgr/lwlock.c index ae03eb14196c8..687ed6399cb19 100644 --- a/src/backend/storage/lmgr/lwlock.c +++ b/src/backend/storage/lmgr/lwlock.c @@ -582,29 +582,33 @@ LWLockInitialize(LWLock *lock, int tranche_id) static bool LWLockAttemptLock(LWLock *lock, LWLockMode mode) { + uint32 old_state; + AssertArg(mode == LW_EXCLUSIVE || mode == LW_SHARED); + /* + * Read once outside the loop, later iterations will get the newer value + * via compare & exchange. + */ + old_state = pg_atomic_read_u32(&lock->state); + /* loop until we've determined whether we could acquire the lock or not */ while (true) { - uint32 old_state; - uint32 expected_state; uint32 desired_state; bool lock_free; - old_state = pg_atomic_read_u32(&lock->state); - expected_state = old_state; - desired_state = expected_state; + desired_state = old_state; if (mode == LW_EXCLUSIVE) { - lock_free = (expected_state & LW_LOCK_MASK) == 0; + lock_free = (old_state & LW_LOCK_MASK) == 0; if (lock_free) desired_state += LW_VAL_EXCLUSIVE; } else { - lock_free = (expected_state & LW_VAL_EXCLUSIVE) == 0; + lock_free = (old_state & LW_VAL_EXCLUSIVE) == 0; if (lock_free) desired_state += LW_VAL_SHARED; } @@ -620,7 +624,7 @@ LWLockAttemptLock(LWLock *lock, LWLockMode mode) * Retry if the value changed since we last looked at it. */ if (pg_atomic_compare_exchange_u32(&lock->state, - &expected_state, desired_state)) + &old_state, desired_state)) { if (lock_free) { From 54f23a45f3742e9533dbfa7c1177f02f116b0457 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Sun, 2 Aug 2015 20:08:10 +0300 Subject: [PATCH 146/442] Fix race condition that lead to WALInsertLock deadlock with commit_delay. If a call to WaitForXLogInsertionsToFinish() returned a value in the middle of a page, and another backend then started to insert a record to the same page, and then you called WaitXLogInsertionsToFinish() again, the second call might return a smaller value than the first call. The problem was in GetXLogBuffer(), which always updated the insertingAt value to the beginning of the requested page, not the actual requested location. Because of that, the second call might return a xlog pointer to the beginning of the page, while the first one returned a later position on the same page. XLogFlush() performs two calls to WaitXLogInsertionsToFinish() in succession, and holds WALWriteLock on the second call, which can deadlock if the second call to WaitXLogInsertionsToFinish() blocks. Reported by Spiros Ioannou. Backpatch to 9.4, where the more scalable WALInsertLock mechanism, and this bug, was introduced. --- src/backend/access/transam/xlog.c | 27 ++++++++++++++++++++++++--- 1 file changed, 24 insertions(+), 3 deletions(-) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index 939813e7b7177..f06b51dde3743 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -1664,11 +1664,32 @@ GetXLogBuffer(XLogRecPtr ptr) endptr = XLogCtl->xlblocks[idx]; if (expectedEndPtr != endptr) { + XLogRecPtr initializedUpto; + /* - * Let others know that we're finished inserting the record up to the - * page boundary. + * Before calling AdvanceXLInsertBuffer(), which can block, let others + * know how far we're finished with inserting the record. + * + * NB: If 'ptr' points to just after the page header, advertise a + * position at the beginning of the page rather than 'ptr' itself. If + * there are no other insertions running, someone might try to flush + * up to our advertised location. If we advertised a position after + * the page header, someone might try to flush the page header, even + * though page might actually not be initialized yet. As the first + * inserter on the page, we are effectively responsible for making + * sure that it's initialized, before we let insertingAt to move past + * the page header. */ - WALInsertLockUpdateInsertingAt(expectedEndPtr - XLOG_BLCKSZ); + if (ptr % XLOG_BLCKSZ == SizeOfXLogShortPHD && + ptr % XLOG_SEG_SIZE > XLOG_BLCKSZ) + initializedUpto = ptr - SizeOfXLogShortPHD; + else if (ptr % XLOG_BLCKSZ == SizeOfXLogLongPHD && + ptr % XLOG_SEG_SIZE < XLOG_BLCKSZ) + initializedUpto = ptr - SizeOfXLogLongPHD; + else + initializedUpto = ptr; + + WALInsertLockUpdateInsertingAt(initializedUpto); AdvanceXLInsertBuffer(ptr, false); endptr = XLogCtl->xlblocks[idx]; From 72697d2ba77074713cd4008995a97cf284de1712 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 2 Aug 2015 14:54:44 -0400 Subject: [PATCH 147/442] Fix incorrect order of lock file removal and failure to close() sockets. Commit c9b0cbe98bd783e24a8c4d8d8ac472a494b81292 accidentally broke the order of operations during postmaster shutdown: it resulted in removing the per-socket lockfiles after, not before, postmaster.pid. This creates a race-condition hazard for a new postmaster that's started immediately after observing that postmaster.pid has disappeared; if it sees the socket lockfile still present, it will quite properly refuse to start. This error appears to be the explanation for at least some of the intermittent buildfarm failures we've seen in the pg_upgrade test. Another problem, which has been there all along, is that the postmaster has never bothered to close() its listen sockets, but has just allowed them to close at process death. This creates a different race condition for an incoming postmaster: it might be unable to bind to the desired listen address because the old postmaster is still incumbent. This might explain some odd failures we've seen in the past, too. (Note: this is not related to the fact that individual backends don't close their client communication sockets. That behavior is intentional and is not changed by this patch.) Fix by adding an on_proc_exit function that closes the postmaster's ports explicitly, and (in 9.3 and up) reshuffling the responsibility for where to unlink the Unix socket files. Lock file unlinking can stay where it is, but teach it to unlink the lock files in reverse order of creation. --- src/backend/libpq/pqcomm.c | 54 +++++++++++++---------------- src/backend/postmaster/postmaster.c | 47 +++++++++++++++++++++++++ src/backend/utils/init/miscinit.c | 6 +++- src/include/libpq/libpq.h | 1 + 4 files changed, 78 insertions(+), 30 deletions(-) diff --git a/src/backend/libpq/pqcomm.c b/src/backend/libpq/pqcomm.c index a4b37ed5a263c..279df936cf9f7 100644 --- a/src/backend/libpq/pqcomm.c +++ b/src/backend/libpq/pqcomm.c @@ -174,12 +174,15 @@ PQcommMethods *PqCommMethods = &PqCommSocketMethods; void pq_init(void) { + /* initialize state variables */ PqSendBufferSize = PQ_SEND_BUFFER_SIZE; PqSendBuffer = MemoryContextAlloc(TopMemoryContext, PqSendBufferSize); PqSendPointer = PqSendStart = PqRecvPointer = PqRecvLength = 0; PqCommBusy = false; PqCommReadingMsg = false; DoingCopyOut = false; + + /* set up process-exit hook to close the socket */ on_proc_exit(socket_close, 0); /* @@ -285,28 +288,6 @@ socket_close(int code, Datum arg) */ -/* StreamDoUnlink() - * Shutdown routine for backend connection - * If any Unix sockets are used for communication, explicitly close them. - */ -#ifdef HAVE_UNIX_SOCKETS -static void -StreamDoUnlink(int code, Datum arg) -{ - ListCell *l; - - /* Loop through all created sockets... */ - foreach(l, sock_paths) - { - char *sock_path = (char *) lfirst(l); - - unlink(sock_path); - } - /* Since we're about to exit, no need to reclaim storage */ - sock_paths = NIL; -} -#endif /* HAVE_UNIX_SOCKETS */ - /* * StreamServerPort -- open a "listening" port to accept connections. * @@ -588,16 +569,11 @@ Lock_AF_UNIX(char *unixSocketDir, char *unixSocketPath) * Once we have the interlock, we can safely delete any pre-existing * socket file to avoid failure at bind() time. */ - unlink(unixSocketPath); + (void) unlink(unixSocketPath); /* - * Arrange to unlink the socket file(s) at proc_exit. If this is the - * first one, set up the on_proc_exit function to do it; then add this - * socket file to the list of files to unlink. + * Remember socket file pathnames for later maintenance. */ - if (sock_paths == NIL) - on_proc_exit(StreamDoUnlink, 0); - sock_paths = lappend(sock_paths, pstrdup(unixSocketPath)); return STATUS_OK; @@ -826,6 +802,26 @@ TouchSocketFiles(void) } } +/* + * RemoveSocketFiles -- unlink socket files at postmaster shutdown + */ +void +RemoveSocketFiles(void) +{ + ListCell *l; + + /* Loop through all created sockets... */ + foreach(l, sock_paths) + { + char *sock_path = (char *) lfirst(l); + + /* Ignore any error. */ + (void) unlink(sock_path); + } + /* Since we're about to exit, no need to reclaim storage */ + sock_paths = NIL; +} + /* -------------------------------- * Low-level I/O routines begin here. diff --git a/src/backend/postmaster/postmaster.c b/src/backend/postmaster/postmaster.c index 1bb3138a03ab0..000524dcb9428 100644 --- a/src/backend/postmaster/postmaster.c +++ b/src/backend/postmaster/postmaster.c @@ -370,6 +370,7 @@ static DNSServiceRef bonjour_sdref = NULL; /* * postmaster.c - function prototypes */ +static void CloseServerPorts(int status, Datum arg); static void unlink_external_pid_file(int status, Datum arg); static void getInstallationPaths(const char *argv0); static void checkDataDir(void); @@ -900,6 +901,11 @@ PostmasterMain(int argc, char *argv[]) * interlock (thanks to whoever decided to put socket files in /tmp :-(). * For the same reason, it's best to grab the TCP socket(s) before the * Unix socket(s). + * + * Also note that this internally sets up the on_proc_exit function that + * is responsible for removing both data directory and socket lockfiles; + * so it must happen before opening sockets so that at exit, the socket + * lockfiles go away after CloseServerPorts runs. */ CreateDataDirLockFile(true); @@ -924,10 +930,15 @@ PostmasterMain(int argc, char *argv[]) /* * Establish input sockets. + * + * First, mark them all closed, and set up an on_proc_exit function that's + * charged with closing the sockets again at postmaster shutdown. */ for (i = 0; i < MAXLISTEN; i++) ListenSocket[i] = PGINVALID_SOCKET; + on_proc_exit(CloseServerPorts, 0); + if (ListenAddresses) { char *rawstring; @@ -1271,6 +1282,42 @@ PostmasterMain(int argc, char *argv[]) } +/* + * on_proc_exit callback to close server's listen sockets + */ +static void +CloseServerPorts(int status, Datum arg) +{ + int i; + + /* + * First, explicitly close all the socket FDs. We used to just let this + * happen implicitly at postmaster exit, but it's better to close them + * before we remove the postmaster.pid lockfile; otherwise there's a race + * condition if a new postmaster wants to re-use the TCP port number. + */ + for (i = 0; i < MAXLISTEN; i++) + { + if (ListenSocket[i] != PGINVALID_SOCKET) + { + StreamClose(ListenSocket[i]); + ListenSocket[i] = PGINVALID_SOCKET; + } + } + + /* + * Next, remove any filesystem entries for Unix sockets. To avoid race + * conditions against incoming postmasters, this must happen after closing + * the sockets and before removing lock files. + */ + RemoveSocketFiles(); + + /* + * We don't do anything about socket lock files here; those will be + * removed in a later on_proc_exit callback. + */ +} + /* * on_proc_exit callback to delete external_pid_file */ diff --git a/src/backend/utils/init/miscinit.c b/src/backend/utils/init/miscinit.c index ac3e764e8b8c2..5bf595c9e5fe4 100644 --- a/src/backend/utils/init/miscinit.c +++ b/src/backend/utils/init/miscinit.c @@ -1018,7 +1018,11 @@ CreateLockFile(const char *filename, bool amPostmaster, if (lock_files == NIL) on_proc_exit(UnlinkLockFiles, 0); - lock_files = lappend(lock_files, pstrdup(filename)); + /* + * Use lcons so that the lock files are unlinked in reverse order of + * creation; this is critical! + */ + lock_files = lcons(pstrdup(filename), lock_files); } /* diff --git a/src/include/libpq/libpq.h b/src/include/libpq/libpq.h index c408e5b5517a0..efb2dacbba30c 100644 --- a/src/include/libpq/libpq.h +++ b/src/include/libpq/libpq.h @@ -59,6 +59,7 @@ extern int StreamServerPort(int family, char *hostName, extern int StreamConnection(pgsocket server_fd, Port *port); extern void StreamClose(pgsocket sock); extern void TouchSocketFiles(void); +extern void RemoveSocketFiles(void); extern void pq_init(void); extern int pq_getbytes(char *s, size_t len); extern int pq_getstring(StringInfo s); From ea8385df6ce95507951f6c12fa4defb5b3ba9cda Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Sun, 2 Aug 2015 22:12:33 +0300 Subject: [PATCH 148/442] Fix output of ISBN-13 numbers beginning with 979. An EAN beginning with 979 (but not 9790 - those are ISMN's) are accepted as ISBN numbers, but they cannot be represented in the old, 10-digit ISBN format. They must be output in the new 13-digit ISBN-13 format. We printed out an incorrect value for those. Also add a regression test, to test this and some other basic functionality of the module. Patch by Fabien Coelho. This fixes bug #13442, reported by B.Z. Backpatch to 9.1, where we started to recognize ISBN-13 numbers. --- contrib/isn/Makefile | 2 + contrib/isn/expected/isn.out | 222 +++++++++++++++++++++++++++++++++++ contrib/isn/isn.c | 27 +++-- contrib/isn/sql/isn.sql | 104 ++++++++++++++++ 4 files changed, 345 insertions(+), 10 deletions(-) create mode 100644 contrib/isn/expected/isn.out create mode 100644 contrib/isn/sql/isn.sql diff --git a/contrib/isn/Makefile b/contrib/isn/Makefile index 75c07a8296d8f..96aaf35884478 100644 --- a/contrib/isn/Makefile +++ b/contrib/isn/Makefile @@ -6,6 +6,8 @@ EXTENSION = isn DATA = isn--1.0.sql isn--unpackaged--1.0.sql PGFILEDESC = "isn - data types for international product numbering standards" +REGRESS = isn + ifdef USE_PGXS PG_CONFIG = pg_config PGXS := $(shell $(PG_CONFIG) --pgxs) diff --git a/contrib/isn/expected/isn.out b/contrib/isn/expected/isn.out new file mode 100644 index 0000000000000..140bc86656147 --- /dev/null +++ b/contrib/isn/expected/isn.out @@ -0,0 +1,222 @@ +-- +-- Test ISN extension +-- +CREATE EXTENSION isn; +-- +-- test valid conversions +-- +SELECT '9780123456786'::EAN13, -- old book + '9790123456785'::EAN13, -- music + '9791234567896'::EAN13, -- new book + '9771234567898'::EAN13, -- serial + '0123456789012'::EAN13, -- upc + '1234567890128'::EAN13; + ean13 | ean13 | ean13 | ean13 | ean13 | ean13 +-------------------+-------------------+-----------------+-------------------+-----------------+----------------- + 978-0-12-345678-6 | 979-0-1234-5678-5 | 979-123456789-6 | 977-1234-567-89-8 | 012-345678901-2 | 123-456789012-8 +(1 row) + +SELECT '9780123456786'::ISBN, + '123456789X'::ISBN, + '9780123456786'::ISBN13::ISBN, + '9780123456786'::EAN13::ISBN; + isbn | isbn | isbn | isbn +---------------+---------------+---------------+--------------- + 0-12-345678-9 | 1-234-56789-X | 0-12-345678-9 | 0-12-345678-9 +(1 row) + +SELECT -- new books, shown as ISBN13 even for ISBN... + '9791234567896'::ISBN, + '9791234567896'::ISBN13::ISBN, + '9791234567896'::EAN13::ISBN; + isbn | isbn | isbn +-----------------+-----------------+----------------- + 979-123456789-6 | 979-123456789-6 | 979-123456789-6 +(1 row) + +SELECT '9780123456786'::ISBN13, + '123456789X'::ISBN13, + '9791234567896'::ISBN13, + '9791234567896'::EAN13::ISBN13; + isbn13 | isbn13 | isbn13 | isbn13 +-------------------+-------------------+-----------------+----------------- + 978-0-12-345678-6 | 978-1-234-56789-7 | 979-123456789-6 | 979-123456789-6 +(1 row) + +SELECT '9790123456785'::ISMN, + '9790123456785'::EAN13::ISMN, + 'M123456785'::ISMN, + 'M-1234-5678-5'::ISMN; + ismn | ismn | ismn | ismn +---------------+---------------+---------------+--------------- + M-1234-5678-5 | M-1234-5678-5 | M-1234-5678-5 | M-1234-5678-5 +(1 row) + +SELECT '9790123456785'::ISMN13, + 'M123456785'::ISMN13, + 'M-1234-5678-5'::ISMN13; + ismn13 | ismn13 | ismn13 +-------------------+-------------------+------------------- + 979-0-1234-5678-5 | 979-0-1234-5678-5 | 979-0-1234-5678-5 +(1 row) + +SELECT '9771234567003'::ISSN, + '12345679'::ISSN; + issn | issn +-----------+----------- + 1234-5679 | 1234-5679 +(1 row) + +SELECT '9771234567003'::ISSN13, + '12345679'::ISSN13, + '9771234567898'::ISSN13, + '9771234567898'::EAN13::ISSN13; + issn13 | issn13 | issn13 | issn13 +-------------------+-------------------+-------------------+------------------- + 977-1234-567-00-3 | 977-1234-567-00-3 | 977-1234-567-89-8 | 977-1234-567-89-8 +(1 row) + +SELECT '0123456789012'::UPC, + '0123456789012'::EAN13::UPC; + upc | upc +--------------+-------------- + 123456789012 | 123456789012 +(1 row) + +-- +-- test invalid checksums +-- +SELECT '1234567890'::ISBN; +ERROR: invalid check digit for ISBN number: "1234567890", should be X +LINE 1: SELECT '1234567890'::ISBN; + ^ +SELECT 'M123456780'::ISMN; +ERROR: invalid check digit for ISMN number: "M123456780", should be 5 +LINE 1: SELECT 'M123456780'::ISMN; + ^ +SELECT '12345670'::ISSN; +ERROR: invalid check digit for ISSN number: "12345670", should be 9 +LINE 1: SELECT '12345670'::ISSN; + ^ +SELECT '9780123456780'::ISBN; +ERROR: invalid check digit for ISBN number: "9780123456780", should be 6 +LINE 1: SELECT '9780123456780'::ISBN; + ^ +SELECT '9791234567890'::ISBN13; +ERROR: invalid check digit for ISBN number: "9791234567890", should be 6 +LINE 1: SELECT '9791234567890'::ISBN13; + ^ +SELECT '0123456789010'::UPC; +ERROR: invalid check digit for UPC number: "0123456789010", should be 2 +LINE 1: SELECT '0123456789010'::UPC; + ^ +SELECT '1234567890120'::EAN13; +ERROR: invalid check digit for EAN13 number: "1234567890120", should be 8 +LINE 1: SELECT '1234567890120'::EAN13; + ^ +-- +-- test invalid conversions +-- +SELECT '9790123456785'::ISBN; -- not a book +ERROR: cannot cast ISMN to ISBN for number: "9790123456785" +LINE 1: SELECT '9790123456785'::ISBN; + ^ +SELECT '9771234567898'::ISBN; -- not a book +ERROR: cannot cast ISSN to ISBN for number: "9771234567898" +LINE 1: SELECT '9771234567898'::ISBN; + ^ +SELECT '0123456789012'::ISBN; -- not a book +ERROR: cannot cast UPC to ISBN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISBN; + ^ +SELECT '9790123456785'::ISBN13; -- not a book +ERROR: cannot cast ISMN to ISBN for number: "9790123456785" +LINE 1: SELECT '9790123456785'::ISBN13; + ^ +SELECT '9771234567898'::ISBN13; -- not a book +ERROR: cannot cast ISSN to ISBN for number: "9771234567898" +LINE 1: SELECT '9771234567898'::ISBN13; + ^ +SELECT '0123456789012'::ISBN13; -- not a book +ERROR: cannot cast UPC to ISBN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISBN13; + ^ +SELECT '9780123456786'::ISMN; -- not music +ERROR: cannot cast ISBN to ISMN for number: "9780123456786" +LINE 1: SELECT '9780123456786'::ISMN; + ^ +SELECT '9771234567898'::ISMN; -- not music +ERROR: cannot cast ISSN to ISMN for number: "9771234567898" +LINE 1: SELECT '9771234567898'::ISMN; + ^ +SELECT '9791234567896'::ISMN; -- not music +ERROR: cannot cast ISBN to ISMN for number: "9791234567896" +LINE 1: SELECT '9791234567896'::ISMN; + ^ +SELECT '0123456789012'::ISMN; -- not music +ERROR: cannot cast UPC to ISMN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISMN; + ^ +SELECT '9780123456786'::ISSN; -- not serial +ERROR: cannot cast ISBN to ISSN for number: "9780123456786" +LINE 1: SELECT '9780123456786'::ISSN; + ^ +SELECT '9790123456785'::ISSN; -- not serial +ERROR: cannot cast ISMN to ISSN for number: "9790123456785" +LINE 1: SELECT '9790123456785'::ISSN; + ^ +SELECT '9791234567896'::ISSN; -- not serial +ERROR: cannot cast ISBN to ISSN for number: "9791234567896" +LINE 1: SELECT '9791234567896'::ISSN; + ^ +SELECT '0123456789012'::ISSN; -- not serial +ERROR: cannot cast UPC to ISSN for number: "0123456789012" +LINE 1: SELECT '0123456789012'::ISSN; + ^ +SELECT '9780123456786'::UPC; -- not a product +ERROR: cannot cast ISBN to UPC for number: "9780123456786" +LINE 1: SELECT '9780123456786'::UPC; + ^ +SELECT '9771234567898'::UPC; -- not a product +ERROR: cannot cast ISSN to UPC for number: "9771234567898" +LINE 1: SELECT '9771234567898'::UPC; + ^ +SELECT '9790123456785'::UPC; -- not a product +ERROR: cannot cast ISMN to UPC for number: "9790123456785" +LINE 1: SELECT '9790123456785'::UPC; + ^ +SELECT '9791234567896'::UPC; -- not a product +ERROR: cannot cast ISBN to UPC for number: "9791234567896" +LINE 1: SELECT '9791234567896'::UPC; + ^ +SELECT 'postgresql...'::EAN13; +ERROR: invalid input syntax for EAN13 number: "postgresql..." +LINE 1: SELECT 'postgresql...'::EAN13; + ^ +SELECT 'postgresql...'::ISBN; +ERROR: invalid input syntax for ISBN number: "postgresql..." +LINE 1: SELECT 'postgresql...'::ISBN; + ^ +SELECT 9780123456786::EAN13; +ERROR: cannot cast type bigint to ean13 +LINE 1: SELECT 9780123456786::EAN13; + ^ +SELECT 9780123456786::ISBN; +ERROR: cannot cast type bigint to isbn +LINE 1: SELECT 9780123456786::ISBN; + ^ +-- +-- test some comparisons, must yield true +-- +SELECT '12345679'::ISSN = '9771234567003'::EAN13 AS "ok", + 'M-1234-5678-5'::ISMN = '9790123456785'::EAN13 AS "ok", + '9791234567896'::EAN13 != '123456789X'::ISBN AS "nope"; + ok | ok | nope +----+----+------ + t | t | t +(1 row) + +-- +-- cleanup +-- +DROP EXTENSION isn; diff --git a/contrib/isn/isn.c b/contrib/isn/isn.c index 40398245f6d0e..9f53e1a1fbf60 100644 --- a/contrib/isn/isn.c +++ b/contrib/isn/isn.c @@ -443,16 +443,23 @@ ean2ISBN(char *isn) char *aux; unsigned check; - /* the number should come in this format: 978-0-000-00000-0 */ - /* Strip the first part and calculate the new check digit */ - hyphenate(isn, isn + 4, NULL, NULL); - check = weight_checkdig(isn, 10); - aux = strchr(isn, '\0'); - while (!isdigit((unsigned char) *--aux)); - if (check == 10) - *aux = 'X'; - else - *aux = check + '0'; + /* + * The number should come in this format: 978-0-000-00000-0 + * or may be an ISBN-13 number, 979-..., which does not have a short + * representation. Do the short output version if possible. + */ + if (strncmp("978-", isn, 4) == 0) + { + /* Strip the first part and calculate the new check digit */ + hyphenate(isn, isn + 4, NULL, NULL); + check = weight_checkdig(isn, 10); + aux = strchr(isn, '\0'); + while (!isdigit((unsigned char) *--aux)); + if (check == 10) + *aux = 'X'; + else + *aux = check + '0'; + } } static inline void diff --git a/contrib/isn/sql/isn.sql b/contrib/isn/sql/isn.sql new file mode 100644 index 0000000000000..5ef6d8aa3bee2 --- /dev/null +++ b/contrib/isn/sql/isn.sql @@ -0,0 +1,104 @@ +-- +-- Test ISN extension +-- + +CREATE EXTENSION isn; + +-- +-- test valid conversions +-- +SELECT '9780123456786'::EAN13, -- old book + '9790123456785'::EAN13, -- music + '9791234567896'::EAN13, -- new book + '9771234567898'::EAN13, -- serial + '0123456789012'::EAN13, -- upc + '1234567890128'::EAN13; + +SELECT '9780123456786'::ISBN, + '123456789X'::ISBN, + '9780123456786'::ISBN13::ISBN, + '9780123456786'::EAN13::ISBN; + +SELECT -- new books, shown as ISBN13 even for ISBN... + '9791234567896'::ISBN, + '9791234567896'::ISBN13::ISBN, + '9791234567896'::EAN13::ISBN; + +SELECT '9780123456786'::ISBN13, + '123456789X'::ISBN13, + '9791234567896'::ISBN13, + '9791234567896'::EAN13::ISBN13; + +SELECT '9790123456785'::ISMN, + '9790123456785'::EAN13::ISMN, + 'M123456785'::ISMN, + 'M-1234-5678-5'::ISMN; + +SELECT '9790123456785'::ISMN13, + 'M123456785'::ISMN13, + 'M-1234-5678-5'::ISMN13; + +SELECT '9771234567003'::ISSN, + '12345679'::ISSN; + +SELECT '9771234567003'::ISSN13, + '12345679'::ISSN13, + '9771234567898'::ISSN13, + '9771234567898'::EAN13::ISSN13; + +SELECT '0123456789012'::UPC, + '0123456789012'::EAN13::UPC; + +-- +-- test invalid checksums +-- +SELECT '1234567890'::ISBN; +SELECT 'M123456780'::ISMN; +SELECT '12345670'::ISSN; +SELECT '9780123456780'::ISBN; +SELECT '9791234567890'::ISBN13; +SELECT '0123456789010'::UPC; +SELECT '1234567890120'::EAN13; + +-- +-- test invalid conversions +-- +SELECT '9790123456785'::ISBN; -- not a book +SELECT '9771234567898'::ISBN; -- not a book +SELECT '0123456789012'::ISBN; -- not a book + +SELECT '9790123456785'::ISBN13; -- not a book +SELECT '9771234567898'::ISBN13; -- not a book +SELECT '0123456789012'::ISBN13; -- not a book + +SELECT '9780123456786'::ISMN; -- not music +SELECT '9771234567898'::ISMN; -- not music +SELECT '9791234567896'::ISMN; -- not music +SELECT '0123456789012'::ISMN; -- not music + +SELECT '9780123456786'::ISSN; -- not serial +SELECT '9790123456785'::ISSN; -- not serial +SELECT '9791234567896'::ISSN; -- not serial +SELECT '0123456789012'::ISSN; -- not serial + +SELECT '9780123456786'::UPC; -- not a product +SELECT '9771234567898'::UPC; -- not a product +SELECT '9790123456785'::UPC; -- not a product +SELECT '9791234567896'::UPC; -- not a product + +SELECT 'postgresql...'::EAN13; +SELECT 'postgresql...'::ISBN; +SELECT 9780123456786::EAN13; +SELECT 9780123456786::ISBN; + +-- +-- test some comparisons, must yield true +-- +SELECT '12345679'::ISSN = '9771234567003'::EAN13 AS "ok", + 'M-1234-5678-5'::ISMN = '9790123456785'::EAN13 AS "ok", + '9791234567896'::EAN13 != '123456789X'::ISBN AS "nope"; + +-- +-- cleanup +-- +DROP EXTENSION isn; From c75b1f75b3d159c0e71c1ec7f42c922bce448d89 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 2 Aug 2015 15:48:27 -0400 Subject: [PATCH 149/442] Avoid calling memcpy() with a NULL source pointer and count == 0. As in commit 0a52d378b03b7d5a, avoid doing something that has undefined results according to the C standard, even though in practice there does not seem to be any problem with it. This fixes two places in numeric.c that demonstrably could call memcpy() with such arguments. I looked through that file and didn't see any other places with similar hazards; this is not to claim that there are not such places in other files. Per report from Piotr Stefaniak. Back-patch to 9.5 which is where the previous commit was added. We're more or less setting a precedent that we will not worry about this type of issue in pre-9.5 branches unless someone demonstrates a problem in the field. --- src/backend/utils/adt/numeric.c | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index 7ce41b788880c..1bfa29e1b280b 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -4769,7 +4769,9 @@ set_var_from_var(NumericVar *value, NumericVar *dest) newbuf = digitbuf_alloc(value->ndigits + 1); newbuf[0] = 0; /* spare digit for rounding */ - memcpy(newbuf + 1, value->digits, value->ndigits * sizeof(NumericDigit)); + if (value->ndigits > 0) /* else value->digits might be null */ + memcpy(newbuf + 1, value->digits, + value->ndigits * sizeof(NumericDigit)); digitbuf_free(dest->buf); @@ -5090,8 +5092,9 @@ make_result(NumericVar *var) result->choice.n_long.n_weight = weight; } - memcpy(NUMERIC_DIGITS(result), digits, n * sizeof(NumericDigit)); Assert(NUMERIC_NDIGITS(result) == n); + if (n > 0) + memcpy(NUMERIC_DIGITS(result), digits, n * sizeof(NumericDigit)); /* Check for overflow of int16 fields */ if (NUMERIC_WEIGHT(result) != weight || From 89e80b03297555277473fc3978b83c68ec9847b8 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 2 Aug 2015 23:49:19 -0400 Subject: [PATCH 150/442] Fix a number of places that produced XX000 errors in the regression tests. It's against project policy to use elog() for user-facing errors, or to omit an errcode() selection for errors that aren't supposed to be "can't happen" cases. Fix all the violations of this policy that result in ERRCODE_INTERNAL_ERROR log entries during the standard regression tests, as errors that can reliably be triggered from SQL surely should be considered user-facing. I also looked through all the files touched by this commit and fixed other nearby problems of the same ilk. I do not claim to have fixed all violations of the policy, just the ones in these files. In a few places I also changed existing ERRCODE choices that didn't seem particularly appropriate; mainly replacing ERRCODE_SYNTAX_ERROR by something more specific. Back-patch to 9.5, but no further; changing ERRCODE assignments in stable branches doesn't seem like a good idea. --- contrib/tablefunc/tablefunc.c | 40 +++++++++------ src/backend/access/common/reloptions.c | 19 ++++--- src/backend/access/heap/heapam.c | 8 ++- src/backend/commands/copy.c | 32 ++++++++---- src/backend/commands/vacuum.c | 5 +- src/backend/executor/execQual.c | 6 ++- src/backend/utils/adt/txid.c | 13 +++-- src/pl/plperl/plperl.c | 70 +++++++++++++++++--------- src/pl/plpython/plpy_elog.c | 2 +- src/pl/plpython/plpy_exec.c | 12 +++-- src/pl/tcl/pltcl.c | 51 +++++++++++++------ src/test/regress/expected/txid.out | 10 ++-- src/test/regress/regress.c | 6 ++- 13 files changed, 181 insertions(+), 93 deletions(-) diff --git a/contrib/tablefunc/tablefunc.c b/contrib/tablefunc/tablefunc.c index 8a95d4710b700..cb1d029bf5472 100644 --- a/contrib/tablefunc/tablefunc.c +++ b/contrib/tablefunc/tablefunc.c @@ -432,7 +432,9 @@ crosstab(PG_FUNCTION_ARGS) break; default: /* result type isn't composite */ - elog(ERROR, "return type must be a row type"); + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("return type must be a row type"))); break; } @@ -1350,7 +1352,9 @@ build_tuplestore_recursively(char *key_fld, appendStringInfo(&chk_current_key, "%s%s%s", branch_delim, current_key, branch_delim); if (strstr(chk_branchstr.data, chk_current_key.data)) - elog(ERROR, "infinite recursion detected"); + ereport(ERROR, + (errcode(ERRCODE_INVALID_RECURSION), + errmsg("infinite recursion detected"))); } /* OK, extend the branch */ @@ -1429,7 +1433,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial { if (tupdesc->natts != (CONNECTBY_NCOLS + serial_column)) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("Query-specified return tuple has " \ "wrong number of columns."))); @@ -1438,7 +1442,7 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial { if (tupdesc->natts != CONNECTBY_NCOLS_NOBRANCH + serial_column) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("Query-specified return tuple has " \ "wrong number of columns."))); @@ -1447,14 +1451,14 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial /* check that the types of the first two columns match */ if (tupdesc->attrs[0]->atttypid != tupdesc->attrs[1]->atttypid) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("First two columns must be the same type."))); /* check that the type of the third column is INT4 */ if (tupdesc->attrs[2]->atttypid != INT4OID) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("Third column must be type %s.", format_type_be(INT4OID)))); @@ -1462,20 +1466,26 @@ validateConnectbyTupleDesc(TupleDesc tupdesc, bool show_branch, bool show_serial /* check that the type of the fourth column is TEXT if applicable */ if (show_branch && tupdesc->attrs[3]->atttypid != TEXTOID) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("Fourth column must be type %s.", format_type_be(TEXTOID)))); /* check that the type of the fifth column is INT4 */ if (show_branch && show_serial && tupdesc->attrs[4]->atttypid != INT4OID) - elog(ERROR, "query-specified return tuple not valid for Connectby: " - "fifth column must be type %s", format_type_be(INT4OID)); + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("query-specified return tuple not valid for Connectby: " + "fifth column must be type %s", + format_type_be(INT4OID)))); /* check that the type of the fifth column is INT4 */ if (!show_branch && show_serial && tupdesc->attrs[3]->atttypid != INT4OID) - elog(ERROR, "query-specified return tuple not valid for Connectby: " - "fourth column must be type %s", format_type_be(INT4OID)); + ereport(ERROR, + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("query-specified return tuple not valid for Connectby: " + "fourth column must be type %s", + format_type_be(INT4OID)))); /* OK, the tupdesc is valid for our purposes */ } @@ -1496,7 +1506,7 @@ compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) */ if (sql_tupdesc->natts < 2) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("Query must return at least two columns."))); @@ -1511,7 +1521,7 @@ compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("SQL key field type %s does " \ "not match return key field type %s.", @@ -1525,7 +1535,7 @@ compatConnectbyTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) if (ret_atttypid != sql_atttypid || (ret_atttypmod >= 0 && ret_atttypmod != sql_atttypmod)) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("SQL parent key field type %s does " \ "not match return parent key field type %s.", @@ -1556,7 +1566,7 @@ compatCrosstabTupleDescs(TupleDesc ret_tupdesc, TupleDesc sql_tupdesc) sql_atttypid = sql_tupdesc->attrs[0]->atttypid; if (ret_atttypid != sql_atttypid) ereport(ERROR, - (errcode(ERRCODE_SYNTAX_ERROR), + (errcode(ERRCODE_DATATYPE_MISMATCH), errmsg("invalid return type"), errdetail("SQL rowid datatype does not match " \ "return rowid datatype."))); diff --git a/src/backend/access/common/reloptions.c b/src/backend/access/common/reloptions.c index 8176b6a6d414b..180f529060d1a 100644 --- a/src/backend/access/common/reloptions.c +++ b/src/backend/access/common/reloptions.c @@ -484,7 +484,7 @@ allocate_reloption(bits32 kinds, int type, char *name, char *desc) size = sizeof(relopt_string); break; default: - elog(ERROR, "unsupported option type"); + elog(ERROR, "unsupported reloption type %d", type); return NULL; /* keep compiler quiet */ } @@ -1016,7 +1016,8 @@ parse_one_reloption(relopt_value *option, char *text_str, int text_len, parsed = parse_bool(value, &option->values.bool_val); if (validate && !parsed) ereport(ERROR, - (errmsg("invalid value for boolean option \"%s\": %s", + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid value for boolean option \"%s\": %s", option->gen->name, value))); } break; @@ -1027,12 +1028,14 @@ parse_one_reloption(relopt_value *option, char *text_str, int text_len, parsed = parse_int(value, &option->values.int_val, 0, NULL); if (validate && !parsed) ereport(ERROR, - (errmsg("invalid value for integer option \"%s\": %s", + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid value for integer option \"%s\": %s", option->gen->name, value))); if (validate && (option->values.int_val < optint->min || option->values.int_val > optint->max)) ereport(ERROR, - (errmsg("value %s out of bounds for option \"%s\"", + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("value %s out of bounds for option \"%s\"", value, option->gen->name), errdetail("Valid values are between \"%d\" and \"%d\".", optint->min, optint->max))); @@ -1045,12 +1048,14 @@ parse_one_reloption(relopt_value *option, char *text_str, int text_len, parsed = parse_real(value, &option->values.real_val); if (validate && !parsed) ereport(ERROR, - (errmsg("invalid value for floating point option \"%s\": %s", + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("invalid value for floating point option \"%s\": %s", option->gen->name, value))); if (validate && (option->values.real_val < optreal->min || option->values.real_val > optreal->max)) ereport(ERROR, - (errmsg("value %s out of bounds for option \"%s\"", + (errcode(ERRCODE_INVALID_PARAMETER_VALUE), + errmsg("value %s out of bounds for option \"%s\"", value, option->gen->name), errdetail("Valid values are between \"%f\" and \"%f\".", optreal->min, optreal->max))); @@ -1168,7 +1173,7 @@ fillRelOptions(void *rdopts, Size basesize, } break; default: - elog(ERROR, "unrecognized reloption type %c", + elog(ERROR, "unsupported reloption type %d", options[i].gen->type); break; } diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 050efdc4806a7..3701d8e59d536 100644 --- a/src/backend/access/heap/heapam.c +++ b/src/backend/access/heap/heapam.c @@ -2801,7 +2801,9 @@ heap_delete(Relation relation, ItemPointer tid, if (result == HeapTupleInvisible) { UnlockReleaseBuffer(buffer); - elog(ERROR, "attempted to delete invisible tuple"); + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("attempted to delete invisible tuple"))); } else if (result == HeapTupleBeingUpdated && wait) { @@ -3343,7 +3345,9 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, if (result == HeapTupleInvisible) { UnlockReleaseBuffer(buffer); - elog(ERROR, "attempted to update invisible tuple"); + ereport(ERROR, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), + errmsg("attempted to update invisible tuple"))); } else if (result == HeapTupleBeingUpdated && wait) { diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 47dd3accafe24..8db1b35fe82d9 100644 --- a/src/backend/commands/copy.c +++ b/src/backend/commands/copy.c @@ -1422,9 +1422,9 @@ BeginCopy(bool is_from, * in any RLS clauses. * * When this happens, we are passed in the relid of the originally - * found relation (which we have locked). As the planner will look - * up the relation again, we double-check here to make sure it found - * the same one that we have locked. + * found relation (which we have locked). As the planner will look up + * the relation again, we double-check here to make sure it found the + * same one that we have locked. */ if (queryRelId != InvalidOid) { @@ -1603,10 +1603,12 @@ ClosePipeToProgram(CopyState cstate) pclose_rc = ClosePipeStream(cstate->copy_file); if (pclose_rc == -1) ereport(ERROR, - (errmsg("could not close pipe to external command: %m"))); + (errcode_for_file_access(), + errmsg("could not close pipe to external command: %m"))); else if (pclose_rc != 0) ereport(ERROR, - (errmsg("program \"%s\" failed", + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("program \"%s\" failed", cstate->filename), errdetail_internal("%s", wait_result_to_str(pclose_rc)))); } @@ -1703,7 +1705,8 @@ BeginCopyTo(Relation rel, cstate->copy_file = OpenPipeStream(cstate->filename, PG_BINARY_W); if (cstate->copy_file == NULL) ereport(ERROR, - (errmsg("could not execute command \"%s\": %m", + (errcode_for_file_access(), + errmsg("could not execute command \"%s\": %m", cstate->filename))); } else @@ -1730,7 +1733,10 @@ BeginCopyTo(Relation rel, cstate->filename))); if (fstat(fileno(cstate->copy_file), &st)) - elog(ERROR, "could not stat file \"%s\": %m", cstate->filename); + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not stat file \"%s\": %m", + cstate->filename))); if (S_ISDIR(st.st_mode)) ereport(ERROR, @@ -2271,13 +2277,13 @@ CopyFrom(CopyState cstate) { if (!ThereAreNoPriorRegisteredSnapshots() || !ThereAreNoReadyPortals()) ereport(ERROR, - (ERRCODE_INVALID_TRANSACTION_STATE, + (errcode(ERRCODE_INVALID_TRANSACTION_STATE), errmsg("cannot perform FREEZE because of prior transaction activity"))); if (cstate->rel->rd_createSubid != GetCurrentSubTransactionId() && cstate->rel->rd_newRelfilenodeSubid != GetCurrentSubTransactionId()) ereport(ERROR, - (ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE, + (errcode(ERRCODE_OBJECT_NOT_IN_PREREQUISITE_STATE), errmsg("cannot perform FREEZE because the table was not created or truncated in the current subtransaction"))); hi_options |= HEAP_INSERT_FROZEN; @@ -2737,7 +2743,8 @@ BeginCopyFrom(Relation rel, cstate->copy_file = OpenPipeStream(cstate->filename, PG_BINARY_R); if (cstate->copy_file == NULL) ereport(ERROR, - (errmsg("could not execute command \"%s\": %m", + (errcode_for_file_access(), + errmsg("could not execute command \"%s\": %m", cstate->filename))); } else @@ -2752,7 +2759,10 @@ BeginCopyFrom(Relation rel, cstate->filename))); if (fstat(fileno(cstate->copy_file), &st)) - elog(ERROR, "could not stat file \"%s\": %m", cstate->filename); + ereport(ERROR, + (errcode_for_file_access(), + errmsg("could not stat file \"%s\": %m", + cstate->filename))); if (S_ISDIR(st.st_mode)) ereport(ERROR, diff --git a/src/backend/commands/vacuum.c b/src/backend/commands/vacuum.c index baf66f1e6c01e..85b0483247948 100644 --- a/src/backend/commands/vacuum.c +++ b/src/backend/commands/vacuum.c @@ -180,7 +180,10 @@ vacuum(int options, RangeVar *relation, Oid relid, VacuumParams *params, * calls a hostile index expression that itself calls ANALYZE. */ if (in_vacuum) - elog(ERROR, "%s cannot be executed from VACUUM or ANALYZE", stmttype); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("%s cannot be executed from VACUUM or ANALYZE", + stmttype))); /* * Send info about dead objects to the statistics collector, unless we are diff --git a/src/backend/executor/execQual.c b/src/backend/executor/execQual.c index 0f911f210bf3b..16bc8fa5f6c35 100644 --- a/src/backend/executor/execQual.c +++ b/src/backend/executor/execQual.c @@ -631,7 +631,8 @@ ExecEvalScalarVar(ExprState *exprstate, ExprContext *econtext, { if (variable->vartype != attr->atttypid) ereport(ERROR, - (errmsg("attribute %d has wrong type", attnum), + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("attribute %d has wrong type", attnum), errdetail("Table has type %s, but query expects %s.", format_type_be(attr->atttypid), format_type_be(variable->vartype)))); @@ -4111,7 +4112,8 @@ ExecEvalFieldSelect(FieldSelectState *fstate, /* As in ExecEvalScalarVar, we should but can't check typmod */ if (fselect->resulttype != attr->atttypid) ereport(ERROR, - (errmsg("attribute %d has wrong type", fieldnum), + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("attribute %d has wrong type", fieldnum), errdetail("Table has type %s, but query expects %s.", format_type_be(attr->atttypid), format_type_be(fselect->resulttype)))); diff --git a/src/backend/utils/adt/txid.c b/src/backend/utils/adt/txid.c index ce1d9abddea93..ba4b48298fddd 100644 --- a/src/backend/utils/adt/txid.c +++ b/src/backend/utils/adt/txid.c @@ -334,8 +334,11 @@ parse_snapshot(const char *str) return buf_finalize(buf); bad_format: - elog(ERROR, "invalid input for txid_snapshot: \"%s\"", str_start); - return NULL; + ereport(ERROR, + (errcode(ERRCODE_INVALID_TEXT_REPRESENTATION), + errmsg("invalid input syntax for type txid_snapshot: \"%s\"", + str_start))); + return NULL; /* keep compiler quiet */ } /* @@ -526,8 +529,10 @@ txid_snapshot_recv(PG_FUNCTION_ARGS) PG_RETURN_POINTER(snap); bad_format: - elog(ERROR, "invalid snapshot data"); - return (Datum) NULL; + ereport(ERROR, + (errcode(ERRCODE_INVALID_BINARY_REPRESENTATION), + errmsg("invalid external txid_snapshot data"))); + PG_RETURN_POINTER(NULL); /* keep compiler quiet */ } /* diff --git a/src/pl/plperl/plperl.c b/src/pl/plperl/plperl.c index 78baaac05db27..ae0ba19814fd0 100644 --- a/src/pl/plperl/plperl.c +++ b/src/pl/plperl/plperl.c @@ -640,8 +640,9 @@ select_perl_context(bool trusted) else plperl_untrusted_init(); #else - elog(ERROR, - "cannot allocate multiple Perl interpreters on this platform"); + errmsg(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot allocate multiple Perl interpreters on this platform"))); #endif } @@ -660,7 +661,8 @@ select_perl_context(bool trusted) eval_pv("PostgreSQL::InServer::SPI::bootstrap()", FALSE); if (SvTRUE(ERRSV)) ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), errcontext("while executing PostgreSQL::InServer::SPI::bootstrap"))); /* Fully initialized, so mark the hashtable entry valid */ @@ -834,12 +836,14 @@ plperl_init_interp(void) if (perl_parse(plperl, plperl_init_shared_libs, nargs, embedding, NULL) != 0) ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), errcontext("while parsing Perl initialization"))); if (perl_run(plperl) != 0) ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), errcontext("while running Perl initialization"))); #ifdef PLPERL_RESTORE_LOCALE @@ -952,7 +956,8 @@ plperl_trusted_init(void) eval_pv(PLC_TRUSTED, FALSE); if (SvTRUE(ERRSV)) ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), errcontext("while executing PLC_TRUSTED"))); /* @@ -963,7 +968,8 @@ plperl_trusted_init(void) eval_pv("my $a=chr(0x100); return $a =~ /\\xa9/i", FALSE); if (SvTRUE(ERRSV)) ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), errcontext("while executing utf8fix"))); /* @@ -1002,11 +1008,12 @@ plperl_trusted_init(void) if (plperl_on_plperl_init && *plperl_on_plperl_init) { eval_pv(plperl_on_plperl_init, FALSE); + /* XXX need to find a way to determine a better errcode here */ if (SvTRUE(ERRSV)) ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), errcontext("while executing plperl.on_plperl_init"))); - } } @@ -1025,7 +1032,8 @@ plperl_untrusted_init(void) eval_pv(plperl_on_plperlu_init, FALSE); if (SvTRUE(ERRSV)) ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))), errcontext("while executing plperl.on_plperlu_init"))); } } @@ -1382,7 +1390,9 @@ plperl_sv_to_literal(SV *sv, char *fqtypename) isnull; if (!OidIsValid(typid)) - elog(ERROR, "lookup failed for type %s", fqtypename); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("lookup failed for type %s", fqtypename))); datum = plperl_sv_to_datum(sv, typid, -1, @@ -2059,7 +2069,8 @@ plperl_create_sub(plperl_proc_desc *prodesc, char *s, Oid fn_oid) if (!subref) ereport(ERROR, - (errmsg("didn't get a CODE reference from compiling function \"%s\"", + (errcode(ERRCODE_SYNTAX_ERROR), + errmsg("didn't get a CODE reference from compiling function \"%s\"", prodesc->proname))); prodesc->reference = subref; @@ -2147,7 +2158,9 @@ plperl_call_perl_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo) PUTBACK; FREETMPS; LEAVE; - elog(ERROR, "didn't get a return item from function"); + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("didn't get a return item from function"))); } if (SvTRUE(ERRSV)) @@ -2156,9 +2169,10 @@ plperl_call_perl_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo) PUTBACK; FREETMPS; LEAVE; - /* XXX need to find a way to assign an errcode here */ + /* XXX need to find a way to determine a better errcode here */ ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))))); + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))))); } retval = newSVsv(POPs); @@ -2187,7 +2201,9 @@ plperl_call_perl_trigger_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo, TDsv = get_sv("main::_TD", 0); if (!TDsv) - elog(ERROR, "couldn't fetch $_TD"); + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("couldn't fetch $_TD"))); save_item(TDsv); /* local $_TD */ sv_setsv(TDsv, td); @@ -2209,7 +2225,9 @@ plperl_call_perl_trigger_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo, PUTBACK; FREETMPS; LEAVE; - elog(ERROR, "didn't get a return item from trigger function"); + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("didn't get a return item from trigger function"))); } if (SvTRUE(ERRSV)) @@ -2218,9 +2236,10 @@ plperl_call_perl_trigger_func(plperl_proc_desc *desc, FunctionCallInfo fcinfo, PUTBACK; FREETMPS; LEAVE; - /* XXX need to find a way to assign an errcode here */ + /* XXX need to find a way to determine a better errcode here */ ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))))); + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))))); } retval = newSVsv(POPs); @@ -2248,7 +2267,9 @@ plperl_call_perl_event_trigger_func(plperl_proc_desc *desc, TDsv = get_sv("main::_TD", 0); if (!TDsv) - elog(ERROR, "couldn't fetch $_TD"); + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("couldn't fetch $_TD"))); save_item(TDsv); /* local $_TD */ sv_setsv(TDsv, td); @@ -2266,7 +2287,9 @@ plperl_call_perl_event_trigger_func(plperl_proc_desc *desc, PUTBACK; FREETMPS; LEAVE; - elog(ERROR, "didn't get a return item from trigger function"); + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("didn't get a return item from trigger function"))); } if (SvTRUE(ERRSV)) @@ -2275,9 +2298,10 @@ plperl_call_perl_event_trigger_func(plperl_proc_desc *desc, PUTBACK; FREETMPS; LEAVE; - /* XXX need to find a way to assign an errcode here */ + /* XXX need to find a way to determine a better errcode here */ ereport(ERROR, - (errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))))); + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", strip_trailing_ws(sv2cstr(ERRSV))))); } retval = newSVsv(POPs); diff --git a/src/pl/plpython/plpy_elog.c b/src/pl/plpython/plpy_elog.c index 461d68c5461f6..15406d60b9012 100644 --- a/src/pl/plpython/plpy_elog.c +++ b/src/pl/plpython/plpy_elog.c @@ -97,7 +97,7 @@ PLy_elog(int elevel, const char *fmt,...) PG_TRY(); { ereport(elevel, - (errcode(sqlerrcode ? sqlerrcode : ERRCODE_INTERNAL_ERROR), + (errcode(sqlerrcode ? sqlerrcode : ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), errmsg_internal("%s", primary ? primary : "no exception data"), (detail) ? errdetail_internal("%s", detail) : 0, (tb_depth > 0 && tbmsg) ? errcontext("%s", tbmsg) : 0, diff --git a/src/pl/plpython/plpy_exec.c b/src/pl/plpython/plpy_exec.c index 8c525c932a7e5..3ccebe403e428 100644 --- a/src/pl/plpython/plpy_exec.c +++ b/src/pl/plpython/plpy_exec.c @@ -662,11 +662,13 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata, { if ((plntup = PyDict_GetItemString(pltd, "new")) == NULL) ereport(ERROR, - (errmsg("TD[\"new\"] deleted, cannot modify row"))); + (errcode(ERRCODE_UNDEFINED_OBJECT), + errmsg("TD[\"new\"] deleted, cannot modify row"))); Py_INCREF(plntup); if (!PyDict_Check(plntup)) ereport(ERROR, - (errmsg("TD[\"new\"] is not a dictionary"))); + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("TD[\"new\"] is not a dictionary"))); plkeys = PyDict_Keys(plntup); natts = PyList_Size(plkeys); @@ -690,13 +692,15 @@ PLy_modify_tuple(PLyProcedure *proc, PyObject *pltd, TriggerData *tdata, else { ereport(ERROR, - (errmsg("TD[\"new\"] dictionary key at ordinal position %d is not a string", i))); + (errcode(ERRCODE_DATATYPE_MISMATCH), + errmsg("TD[\"new\"] dictionary key at ordinal position %d is not a string", i))); plattstr = NULL; /* keep compiler quiet */ } attn = SPI_fnumber(tupdesc, plattstr); if (attn == SPI_ERROR_NOATTRIBUTE) ereport(ERROR, - (errmsg("key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering row", + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("key \"%s\" found in TD[\"new\"] does not exist as a column in the triggering row", plattstr))); atti = attn - 1; diff --git a/src/pl/tcl/pltcl.c b/src/pl/tcl/pltcl.c index 48a3206da1ce8..edfda5915b24f 100644 --- a/src/pl/tcl/pltcl.c +++ b/src/pl/tcl/pltcl.c @@ -551,7 +551,8 @@ pltcl_init_load_unknown(Tcl_Interp *interp) if (SPI_processed == 0) { SPI_freetuptable(SPI_tuptable); - elog(WARNING, "module \"unknown\" not found in pltcl_modules"); + ereport(WARNING, + (errmsg("module \"unknown\" not found in pltcl_modules"))); relation_close(pmrel, AccessShareLock); return; } @@ -585,8 +586,10 @@ pltcl_init_load_unknown(Tcl_Interp *interp) if (tcl_rc != TCL_OK) { UTF_BEGIN; - elog(ERROR, "could not load module \"unknown\": %s", - UTF_U2E(Tcl_GetStringResult(interp))); + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("could not load module \"unknown\": %s", + UTF_U2E(Tcl_GetStringResult(interp))))); UTF_END; } @@ -1039,8 +1042,10 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS, bool pltrusted) &ret_numvals, &ret_values) != TCL_OK) { UTF_BEGIN; - elog(ERROR, "could not split return value from trigger: %s", - UTF_U2E(Tcl_GetStringResult(interp))); + ereport(ERROR, + (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + errmsg("could not split return value from trigger: %s", + UTF_U2E(Tcl_GetStringResult(interp))))); UTF_END; } @@ -1048,7 +1053,9 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS, bool pltrusted) PG_TRY(); { if (ret_numvals % 2 != 0) - elog(ERROR, "invalid return list from trigger - must have even # of elements"); + ereport(ERROR, + (errcode(ERRCODE_E_R_I_E_TRIGGER_PROTOCOL_VIOLATED), + errmsg("invalid return list from trigger - must have even # of elements"))); modattrs = (int *) palloc(tupdesc->natts * sizeof(int)); modvalues = (Datum *) palloc(tupdesc->natts * sizeof(Datum)); @@ -1082,9 +1089,15 @@ pltcl_trigger_handler(PG_FUNCTION_ARGS, bool pltrusted) ************************************************************/ attnum = SPI_fnumber(tupdesc, ret_name); if (attnum == SPI_ERROR_NOATTRIBUTE) - elog(ERROR, "invalid attribute \"%s\"", ret_name); + ereport(ERROR, + (errcode(ERRCODE_UNDEFINED_COLUMN), + errmsg("unrecognized attribute \"%s\"", + ret_name))); if (attnum <= 0) - elog(ERROR, "cannot set system attribute \"%s\"", ret_name); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("cannot set system attribute \"%s\"", + ret_name))); /************************************************************ * Ignore dropped columns @@ -1205,7 +1218,8 @@ throw_tcl_error(Tcl_Interp *interp, const char *proname) econtext = UTF_U2E((char *) Tcl_GetVar(interp, "errorInfo", TCL_GLOBAL_ONLY)); ereport(ERROR, - (errmsg("%s", emsg), + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", emsg), errcontext("%s\nin PL/Tcl function \"%s\"", econtext, proname))); UTF_END; @@ -1545,8 +1559,11 @@ compile_pltcl_function(Oid fn_oid, Oid tgreloid, free(prodesc->internal_proname); free(prodesc); UTF_BEGIN; - elog(ERROR, "could not create internal procedure \"%s\": %s", - internal_proname, UTF_U2E(Tcl_GetStringResult(interp))); + ereport(ERROR, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("could not create internal procedure \"%s\": %s", + internal_proname, + UTF_U2E(Tcl_GetStringResult(interp))))); UTF_END; } @@ -1614,10 +1631,10 @@ pltcl_elog(ClientData cdata, Tcl_Interp *interp, } /* - * For non-error messages, just pass 'em to elog(). We do not expect that - * this will fail, but just on the off chance it does, report the error - * back to Tcl. Note we are assuming that elog() can't have any internal - * failures that are so bad as to require a transaction abort. + * For non-error messages, just pass 'em to ereport(). We do not expect + * that this will fail, but just on the off chance it does, report the + * error back to Tcl. Note we are assuming that ereport() can't have any + * internal failures that are so bad as to require a transaction abort. * * This path is also used for FATAL errors, which aren't going to come * back to us at all. @@ -1626,7 +1643,9 @@ pltcl_elog(ClientData cdata, Tcl_Interp *interp, PG_TRY(); { UTF_BEGIN; - elog(level, "%s", UTF_U2E(argv[2])); + ereport(level, + (errcode(ERRCODE_EXTERNAL_ROUTINE_EXCEPTION), + errmsg("%s", UTF_U2E(argv[2])))); UTF_END; } PG_CATCH(); diff --git a/src/test/regress/expected/txid.out b/src/test/regress/expected/txid.out index 7750b7b98f937..ddd217eb1024f 100644 --- a/src/test/regress/expected/txid.out +++ b/src/test/regress/expected/txid.out @@ -20,19 +20,19 @@ select '12:16:14,14'::txid_snapshot; -- errors select '31:12:'::txid_snapshot; -ERROR: invalid input for txid_snapshot: "31:12:" +ERROR: invalid input syntax for type txid_snapshot: "31:12:" LINE 1: select '31:12:'::txid_snapshot; ^ select '0:1:'::txid_snapshot; -ERROR: invalid input for txid_snapshot: "0:1:" +ERROR: invalid input syntax for type txid_snapshot: "0:1:" LINE 1: select '0:1:'::txid_snapshot; ^ select '12:13:0'::txid_snapshot; -ERROR: invalid input for txid_snapshot: "12:13:0" +ERROR: invalid input syntax for type txid_snapshot: "12:13:0" LINE 1: select '12:13:0'::txid_snapshot; ^ select '12:16:14,13'::txid_snapshot; -ERROR: invalid input for txid_snapshot: "12:16:14,13" +ERROR: invalid input syntax for type txid_snapshot: "12:16:14,13" LINE 1: select '12:16:14,13'::txid_snapshot; ^ create temp table snapshot_test ( @@ -235,6 +235,6 @@ SELECT txid_snapshot '1:9223372036854775807:3'; (1 row) SELECT txid_snapshot '1:9223372036854775808:3'; -ERROR: invalid input for txid_snapshot: "1:9223372036854775808:3" +ERROR: invalid input syntax for type txid_snapshot: "1:9223372036854775808:3" LINE 1: SELECT txid_snapshot '1:9223372036854775808:3'; ^ diff --git a/src/test/regress/regress.c b/src/test/regress/regress.c index bd31a3d382526..476975f391afc 100644 --- a/src/test/regress/regress.c +++ b/src/test/regress/regress.c @@ -549,8 +549,10 @@ ttdummy(PG_FUNCTION_ARGS) elog(ERROR, "ttdummy (%s): %s must be NOT NULL", relname, args[1]); if (oldon != newon || oldoff != newoff) - elog(ERROR, "ttdummy (%s): you cannot change %s and/or %s columns (use set_ttdummy)", - relname, args[0], args[1]); + ereport(ERROR, + (errcode(ERRCODE_FEATURE_NOT_SUPPORTED), + errmsg("ttdummy (%s): you cannot change %s and/or %s columns (use set_ttdummy)", + relname, args[0], args[1]))); if (newoff != TTDUMMY_INFINITY) { From 61015249259462020629703a4990234c4629cbee Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Sun, 2 Aug 2015 23:57:32 -0400 Subject: [PATCH 151/442] contrib/isn now needs a .gitignore file. Oversight in commit cb3384a0cb4cf900622b77865f60e31259923079. Back-patch to 9.1, like that commit. --- contrib/isn/.gitignore | 4 ++++ 1 file changed, 4 insertions(+) create mode 100644 contrib/isn/.gitignore diff --git a/contrib/isn/.gitignore b/contrib/isn/.gitignore new file mode 100644 index 0000000000000..5dcb3ff972350 --- /dev/null +++ b/contrib/isn/.gitignore @@ -0,0 +1,4 @@ +# Generated subdirectories +/log/ +/results/ +/tmp_check/ From 642ae4ee7dcb9b48a4abd1f02a46ff4d71aef931 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 3 Aug 2015 00:02:26 -0400 Subject: [PATCH 152/442] Make modules/test_ddl_deparse/.gitignore match its siblings. Not sure why /tmp_check/ was omitted from this one, but even if it isn't really needed right now, it's inconsistent not to include it. --- src/test/modules/test_ddl_deparse/.gitignore | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/test/modules/test_ddl_deparse/.gitignore b/src/test/modules/test_ddl_deparse/.gitignore index 3337b3d294433..5dcb3ff972350 100644 --- a/src/test/modules/test_ddl_deparse/.gitignore +++ b/src/test/modules/test_ddl_deparse/.gitignore @@ -1,2 +1,4 @@ +# Generated subdirectories /log/ /results/ +/tmp_check/ From 2b917a58aec17ca5cf64196ee1d5d77ef8635caf Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 3 Aug 2015 13:06:47 +0300 Subject: [PATCH 153/442] Clean up pg_rewind regression test script. Since commit 01f6bb4b2, TestLib.pm has exported path to tmp_check directory, so let's use that also for the pg_rewind test clusters etc. Also, in master, the $tempdir_short variable has not been used since commit 13d856e17, which moved the initdb-running code to TestLib.pm. Backpatch to 9.5. --- src/bin/pg_rewind/RewindTest.pm | 21 ++++++--------------- 1 file changed, 6 insertions(+), 15 deletions(-) diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm index 1045caa09adb5..4dabffacbb413 100644 --- a/src/bin/pg_rewind/RewindTest.pm +++ b/src/bin/pg_rewind/RewindTest.pm @@ -62,15 +62,8 @@ our @EXPORT = qw( clean_rewind_test ); -# A temporary directory created with 'tempdir' is deleted automatically at -# the end of the tests. You can change it to a constant if you need to keep it -# for debugging purposes, -my $testroot = tempdir; - -our $test_master_datadir = "$testroot/data_master"; -our $test_standby_datadir = "$testroot/data_standby"; - -mkdir $testroot; +our $test_master_datadir = "$tmp_check/data_master"; +our $test_standby_datadir = "$tmp_check/data_standby"; # Define non-conflicting ports for both nodes. my $port_master = $ENV{PGPORT}; @@ -273,9 +266,8 @@ sub run_pg_rewind # Keep a temporary postgresql.conf for master node or it would be # overwritten during the rewind. - copy( - "$test_master_datadir/postgresql.conf", - "$testroot/master-postgresql.conf.tmp"); + copy("$test_master_datadir/postgresql.conf", + "$tmp_check/master-postgresql.conf.tmp"); # Now run pg_rewind if ($test_mode eq "local") @@ -307,9 +299,8 @@ sub run_pg_rewind } # Now move back postgresql.conf with old settings - move( - "$testroot/master-postgresql.conf.tmp", - "$test_master_datadir/postgresql.conf"); + move("$tmp_check/master-postgresql.conf.tmp", + "$test_master_datadir/postgresql.conf"); # Plug-in rewound node to the now-promoted standby node append_to_file( From 615b69595525385bbf050a170912b7671cacc5c8 Mon Sep 17 00:00:00 2001 From: Heikki Linnakangas Date: Mon, 3 Aug 2015 15:23:56 +0300 Subject: [PATCH 154/442] Fix pg_rewind when pg_xlog is a symlink. pg_xlog is often a symlink, typically to a different filesystem. Don't get confused and comlain about by that, and just always pretend that it's a normal directory, even if it's really a symlink. Also add a test case for this. Backpatch to 9.5. --- src/bin/pg_rewind/RewindTest.pm | 14 +++-- src/bin/pg_rewind/filemap.c | 16 ++++- src/bin/pg_rewind/t/001_basic.pl | 1 + src/bin/pg_rewind/t/002_databases.pl | 1 + src/bin/pg_rewind/t/003_extrafiles.pl | 1 + src/bin/pg_rewind/t/004_pg_xlog_symlink.pl | 70 ++++++++++++++++++++++ 6 files changed, 98 insertions(+), 5 deletions(-) create mode 100644 src/bin/pg_rewind/t/004_pg_xlog_symlink.pl diff --git a/src/bin/pg_rewind/RewindTest.pm b/src/bin/pg_rewind/RewindTest.pm index 4dabffacbb413..cbbab2bdb0ce6 100644 --- a/src/bin/pg_rewind/RewindTest.pm +++ b/src/bin/pg_rewind/RewindTest.pm @@ -13,16 +13,18 @@ package RewindTest; # # 2. setup_cluster - creates a PostgreSQL cluster that runs as the master # -# 3. create_standby - runs pg_basebackup to initialize a standby server, and +# 3. start_master - starts the master server +# +# 4. create_standby - runs pg_basebackup to initialize a standby server, and # sets it up to follow the master. # -# 4. promote_standby - runs "pg_ctl promote" to promote the standby server. +# 5. promote_standby - runs "pg_ctl promote" to promote the standby server. # The old master keeps running. # -# 5. run_pg_rewind - stops the old master (if it's still running) and runs +# 6. run_pg_rewind - stops the old master (if it's still running) and runs # pg_rewind to synchronize it with the now-promoted standby server. # -# 6. clean_rewind_test - stops both servers used in the test, if they're +# 7. clean_rewind_test - stops both servers used in the test, if they're # still running. # # The test script can use the helper functions master_psql and standby_psql @@ -56,6 +58,7 @@ our @EXPORT = qw( init_rewind_test setup_cluster + start_master create_standby promote_standby run_pg_rewind @@ -189,7 +192,10 @@ max_connections = 10 "$test_master_datadir/pg_hba.conf", qq( local replication all trust )); +} +sub start_master +{ system_or_bail('pg_ctl' , '-w', '-D' , $test_master_datadir, '-l', "$log_path/master.log", diff --git a/src/bin/pg_rewind/filemap.c b/src/bin/pg_rewind/filemap.c index 05eff68185edd..fb26d093160a9 100644 --- a/src/bin/pg_rewind/filemap.c +++ b/src/bin/pg_rewind/filemap.c @@ -78,6 +78,14 @@ process_source_file(const char *path, file_type_t type, size_t newsize, strcmp(path, "postmaster.opts") == 0) return; + /* + * Pretend that pg_xlog is a directory, even if it's really a symlink. + * We don't want to mess with the symlink itself, nor complain if it's a + * symlink in source but not in target or vice versa. + */ + if (strcmp(path, "pg_xlog") == 0 && type == FILE_TYPE_SYMLINK) + type = FILE_TYPE_DIRECTORY; + /* * Skip temporary files, .../pgsql_tmp/... and .../pgsql_tmp.* in source. * This has the effect that all temporary files in the destination will be @@ -112,7 +120,7 @@ process_source_file(const char *path, file_type_t type, size_t newsize, switch (type) { case FILE_TYPE_DIRECTORY: - if (exists && !S_ISDIR(statbuf.st_mode)) + if (exists && !S_ISDIR(statbuf.st_mode) && strcmp(path, "pg_xlog") != 0) { /* it's a directory in source, but not in target. Strange.. */ pg_fatal("\"%s\" is not a directory\n", localpath); @@ -285,6 +293,12 @@ process_target_file(const char *path, file_type_t type, size_t oldsize, strcmp(path, "postmaster.opts") == 0) return; + /* + * Like in process_source_file, pretend that xlog is always a directory. + */ + if (strcmp(path, "pg_xlog") == 0 && type == FILE_TYPE_SYMLINK) + type = FILE_TYPE_DIRECTORY; + key.path = (char *) path; key_ptr = &key; exists = (bsearch(&key_ptr, map->array, map->narray, sizeof(file_entry_t *), diff --git a/src/bin/pg_rewind/t/001_basic.pl b/src/bin/pg_rewind/t/001_basic.pl index be7d887bb7c97..1764b17c907a1 100644 --- a/src/bin/pg_rewind/t/001_basic.pl +++ b/src/bin/pg_rewind/t/001_basic.pl @@ -10,6 +10,7 @@ sub run_test my $test_mode = shift; RewindTest::setup_cluster(); + RewindTest::start_master(); # Create a test table and insert a row in master. master_psql("CREATE TABLE tbl1 (d text)"); diff --git a/src/bin/pg_rewind/t/002_databases.pl b/src/bin/pg_rewind/t/002_databases.pl index b0b007a763ad2..f10899d44017f 100644 --- a/src/bin/pg_rewind/t/002_databases.pl +++ b/src/bin/pg_rewind/t/002_databases.pl @@ -10,6 +10,7 @@ sub run_test my $test_mode = shift; RewindTest::setup_cluster(); + RewindTest::start_master(); # Create a database in master. master_psql('CREATE DATABASE inmaster'); diff --git a/src/bin/pg_rewind/t/003_extrafiles.pl b/src/bin/pg_rewind/t/003_extrafiles.pl index 0cd0ac4d5677b..d317f53186bf6 100644 --- a/src/bin/pg_rewind/t/003_extrafiles.pl +++ b/src/bin/pg_rewind/t/003_extrafiles.pl @@ -15,6 +15,7 @@ sub run_test my $test_mode = shift; RewindTest::setup_cluster(); + RewindTest::start_master(); my $test_master_datadir = $RewindTest::test_master_datadir; diff --git a/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl new file mode 100644 index 0000000000000..634c623afab8d --- /dev/null +++ b/src/bin/pg_rewind/t/004_pg_xlog_symlink.pl @@ -0,0 +1,70 @@ +# +# Test pg_rewind when the target's pg_xlog directory is a symlink. +# +use strict; +use warnings; +use File::Copy; +use File::Path qw(remove_tree); +use TestLib; +use Test::More tests => 4; + +use RewindTest; + +sub run_test +{ + my $test_mode = shift; + + my $master_xlogdir = "$tmp_check/xlog_master"; + + remove_tree($master_xlogdir); + RewindTest::setup_cluster(); + + # turn pg_xlog into a symlink + print("moving $test_master_datadir/pg_xlog to $master_xlogdir\n"); + move("$test_master_datadir/pg_xlog", $master_xlogdir) or die; + symlink($master_xlogdir, "$test_master_datadir/pg_xlog") or die; + + RewindTest::start_master(); + + # Create a test table and insert a row in master. + master_psql("CREATE TABLE tbl1 (d text)"); + master_psql("INSERT INTO tbl1 VALUES ('in master')"); + + master_psql("CHECKPOINT"); + + RewindTest::create_standby(); + + # Insert additional data on master that will be replicated to standby + master_psql("INSERT INTO tbl1 values ('in master, before promotion')"); + + master_psql('CHECKPOINT'); + + RewindTest::promote_standby(); + + # Insert a row in the old master. This causes the master and standby + # to have "diverged", it's no longer possible to just apply the + # standy's logs over master directory - you need to rewind. + master_psql("INSERT INTO tbl1 VALUES ('in master, after promotion')"); + + # Also insert a new row in the standby, which won't be present in the + # old master. + standby_psql("INSERT INTO tbl1 VALUES ('in standby, after promotion')"); + + RewindTest::run_pg_rewind($test_mode); + + check_query( + 'SELECT * FROM tbl1', + qq(in master +in master, before promotion +in standby, after promotion +), + 'table content'); + + RewindTest::clean_rewind_test(); +} + +# Run the test in both modes +run_test('local'); +run_test('remote'); + +exit(0); From 46e9019bbce96c309d27d4b164bf9a2d0d8292eb Mon Sep 17 00:00:00 2001 From: Fujii Masao Date: Mon, 3 Aug 2015 23:04:41 +0900 Subject: [PATCH 155/442] Make recovery rename tablespace_map to *.old if backup_label is not present. If tablespace_map file is present without backup_label file, there is no use of such file. There is no harm in retaining it, but it is better to get rid of the map file so that we don't have any redundant file in data directory and it will avoid any sort of confusion. It seems prudent though to just rename the file out of the way rather than delete it completely, also we ignore any error that occurs in rename operation as even if map file is present without backup_label file, it is harmless. Back-patch to 9.5 where tablespace_map file was introduced. Amit Kapila, reviewed by Robert Haas, Alvaro Herrera and me. --- src/backend/access/transam/xlog.c | 28 ++++++++++++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/src/backend/access/transam/xlog.c b/src/backend/access/transam/xlog.c index f06b51dde3743..68e33eb1a99cb 100644 --- a/src/backend/access/transam/xlog.c +++ b/src/backend/access/transam/xlog.c @@ -5912,6 +5912,7 @@ StartupXLOG(void) XLogReaderState *xlogreader; XLogPageReadPrivate private; bool fast_promoted = false; + struct stat st; /* * Read control file and check XLOG status looks valid. @@ -6138,6 +6139,33 @@ StartupXLOG(void) } else { + /* + * If tablespace_map file is present without backup_label file, there + * is no use of such file. There is no harm in retaining it, but it + * is better to get rid of the map file so that we don't have any + * redundant file in data directory and it will avoid any sort of + * confusion. It seems prudent though to just rename the file out + * of the way rather than delete it completely, also we ignore any + * error that occurs in rename operation as even if map file is + * present without backup_label file, it is harmless. + */ + if (stat(TABLESPACE_MAP, &st) == 0) + { + unlink(TABLESPACE_MAP_OLD); + if (rename(TABLESPACE_MAP, TABLESPACE_MAP_OLD) == 0) + ereport(LOG, + (errmsg("ignoring \"%s\" file because no \"%s\" file exists", + TABLESPACE_MAP, BACKUP_LABEL_FILE), + errdetail("\"%s\" was renamed to \"%s\".", + TABLESPACE_MAP, TABLESPACE_MAP_OLD))); + else + ereport(LOG, + (errmsg("ignoring \"%s\" file because no \"%s\" file exists", + TABLESPACE_MAP, BACKUP_LABEL_FILE), + errdetail("Could not rename file \"%s\" to \"%s\": %m.", + TABLESPACE_MAP, TABLESPACE_MAP_OLD))); + } + /* * It's possible that archive recovery was requested, but we don't * know how far we need to replay the WAL before we reach consistency. From 8f45a58d394bbe83c54306ba769ac02c9239c259 Mon Sep 17 00:00:00 2001 From: Joe Conway Date: Mon, 3 Aug 2015 09:08:01 -0700 Subject: [PATCH 156/442] Fix psql \d output of policies. psql neglected to wrap parenthesis around USING and WITH CHECK expressions -- fixed. Back-patched to 9.5 where RLS policies were introduced. --- src/bin/psql/describe.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c index f63c7e90d3c0a..898f8b39cdcbe 100644 --- a/src/bin/psql/describe.c +++ b/src/bin/psql/describe.c @@ -2083,11 +2083,11 @@ describeOneTableDetails(const char *schemaname, } if (!PQgetisnull(result, i, 2)) - appendPQExpBuffer(&buf, "\n USING %s", + appendPQExpBuffer(&buf, "\n USING (%s)", PQgetvalue(result, i, 2)); if (!PQgetisnull(result, i, 3)) - appendPQExpBuffer(&buf, "\n WITH CHECK %s", + appendPQExpBuffer(&buf, "\n WITH CHECK (%s)", PQgetvalue(result, i, 3)); printTableAddFooter(&cont, buf.data); From 11daccb445260de9ce03e4408ac7d908545b3319 Mon Sep 17 00:00:00 2001 From: Tom Lane Date: Mon, 3 Aug 2015 12:29:11 -0400 Subject: [PATCH 157/442] Update 9.5 release notes through today. --- doc/src/sgml/release-9.5.sgml | 743 ++++++++++++++++++++++++++++++++-- 1 file changed, 699 insertions(+), 44 deletions(-) diff --git a/doc/src/sgml/release-9.5.sgml b/doc/src/sgml/release-9.5.sgml index dff1ba9ae0bbf..722c8bd45227b 100644 --- a/doc/src/sgml/release-9.5.sgml +++ b/doc/src/sgml/release-9.5.sgml @@ -7,7 +7,7 @@ Release Date 2015-??-?? - Current as of 2015-06-01 + Current as of 2015-08-03 @@ -53,6 +53,9 @@ + Adjust operator precedence to match the SQL standard (Tom Lane) @@ -78,6 +81,9 @@ + Use assignment cast behavior for data type conversions in PL/pgSQL assignments, rather than converting to and @@ -97,6 +103,9 @@ + Allow characters in server command-line options to be escaped with a backslash (Andres Freund) @@ -110,6 +119,9 @@ + Change the default value of include_realm to 1, so that @@ -119,6 +131,9 @@ + Fix REASSIGN OWNED and ALTER OWNER TO @@ -128,6 +143,27 @@ + + + Remove server configuration + parameter ssl_renegotiation_limit, which was deprecated + in earlier releases (Andres Freund) + + + + While SSL renegotiation is a good idea in theory, it has caused enough + bugs to be considered a net negative in practice, and it is due to be + removed from future versions of the relevant standards. We have + therefore removed support for it from PostgreSQL. + + + + + Remove server configuration parameter autocommit, which was already deprecated and non-operational (Tom Lane) @@ -135,6 +171,9 @@ + Remove pg_authid's rolcatupdate field, as it had no value (Adam Brightwell) @@ -142,13 +181,14 @@ + - Allow json and jsonb extraction operators to - accept negative subscripts, which count from the end of JSON - arrays. Historically, these operators yielded NULL - in the event of a negative subscript, because negative - subscripts were considered invalid. (Peter Geoghegan, Andrew - Dunstan) + Allow json and jsonb array extraction operators to + accept negative subscripts, which count from the end of JSON arrays. + Previously, these operators returned NULL for negative + subscripts. (Peter Geoghegan, Andrew Dunstan) @@ -174,18 +214,26 @@ + Add Block Range Indexes (BRIN) (Álvaro Herrera, Heikki Linnakangas, Emre Hasegeli) - BRIN indexes are very compact and store the min/max - values for a range of heap blocks. + BRIN indexes are very compact and cheap to update by + storing min/max values for a range of heap blocks. + Allow queries to perform accurate distance filtering of bounding-box-indexed objects (polygons, circles) using + Allow GiST indexes to perform index-only scans (Anastasia Lubennikova, Heikki Linnakangas, Andreas Karlsson) @@ -209,6 +263,9 @@ + Add configuration parameter to control the size of GIN pending lists (Fujii Masao) @@ -222,6 +279,9 @@ + Issue a warning during the creation of hash indexes because they are not @@ -239,6 +299,11 @@ + Improve the speed of sorting character and numeric fields (Peter Geoghegan, Andrew Gierth, Robert Haas) @@ -246,6 +311,9 @@ + Extend the infrastructure that allows sorting to be performed by inlined, non-SQL-callable comparison functions to @@ -255,12 +323,23 @@ + Improve in-memory hash performance (Tomas Vondra, Robert Haas) + Improve concurrency of shared buffer replacement (Robert Haas, Amit Kapila) @@ -268,13 +347,52 @@ + + + Reduce the number of page locks and pins during index scans (Kevin Grittner) + + - Improve concurrent locking and buffer scan performance (Andres - Freund, Kevin Grittner) + The primary benefit of this is to allow index vacuums to be blocked + less often. + + + Make backend local tracking of buffer pins memory efficient (Andres Freund) + + + + Previously each session allocated an array with space for every buffer + in shared_buffers. + + + + + + + Improve lock scalability (Andres Freund) + + + + This particularly addresses scalability problems when running on + systems with multiple CPU sockets. + + + + + Allow the optimizer to remove unnecessary references to left outer join subqueries (David Rowley) @@ -282,6 +400,9 @@ + Allow pushdown of query restrictions into window functions, where appropriate @@ -290,13 +411,38 @@ + Speed up CRC (cyclic redundancy check) computations (Abhijit Menon-Sen, Heikki Linnakangas) - + + + + Improve bitmap index scan performance (Teodor Sigaev, Tom Lane) + + + + + + + Speed up CREATE INDEX by avoiding unneccessary memory copies (Robert Haas) + + + + @@ -306,6 +452,9 @@ + Add per-table autovacuum logging control via log_min_autovacuum_duration (Michael Paquier) @@ -317,6 +466,9 @@ + Add new configuration parameter (Thomas Munro) @@ -332,6 +484,9 @@ + Prevent non-superusers from changing on connection startup (Fujii Masao) @@ -348,6 +503,9 @@ + Check Subject Alternative Names in SSL server certificates, if present @@ -361,6 +519,9 @@ + Add system view pg_stat_ssl to report @@ -369,6 +530,9 @@ + Add libpq function PQsslAttribute() @@ -386,6 +550,9 @@ + Make libpq honor any OpenSSL thread callbacks (Jan Urbanski) @@ -406,6 +573,12 @@ + Replace configuration parameter checkpoint_segments with @@ -413,12 +586,17 @@ - This allows the allocation of a large number of WAL - files without keeping them if they are not needed. + This allows the allocation of a large number of WAL files + without keeping them if they are not needed. Thus the default + for has been increased + to 1GB. + Add configuration parameter to control compression of full page images stored in WAL @@ -427,6 +605,9 @@ + Allow recording of transaction commit timestamps when configuration parameter + Allow to be set by ALTER ROLE SET (Peter Eisentraut, Kyotaro Horiguchi) @@ -449,6 +633,9 @@ + Allow running autovacuum workers to respond to configuration parameter changes (Michael Paquier) @@ -456,6 +643,9 @@ + Make configuration parameter read-only (Andres Freund) @@ -470,6 +660,9 @@ + Allow setting on systems where it has no effect (Peter Eisentraut) @@ -477,6 +670,9 @@ + Add environment variables PG_OOM_ADJUST_FILE @@ -492,6 +688,10 @@ + Add system view pg_file_settings @@ -501,6 +701,9 @@ + Add pending_restart to the system view pg_settings to @@ -509,6 +712,9 @@ + Allow ALTER SYSTEM values to be reset with ALTER SYSTEM RESET (Vik @@ -532,6 +738,11 @@ + Add recovery.conf parameter + Add new value always to allow standbys to always archive received @@ -553,6 +767,9 @@ + Add configuration parameter to @@ -566,6 +783,9 @@ + Archive WAL files with suffix .partial during standby promotion (Heikki Linnakangas) @@ -573,6 +793,9 @@ + Add configuration parameter @@ -588,6 +811,11 @@ + Allow labeling of the origin of logical replication changes (Andres Freund) @@ -599,6 +827,10 @@ + Report the processes holding replication slots in pg_replication_slots @@ -611,6 +843,9 @@ + Allow recovery.conf's primary_conninfo to @@ -629,6 +864,10 @@ + Allow INSERTS that would generate constraint conflicts to be turned into @@ -644,6 +883,9 @@ + Add GROUP BY analysis functions GROUPING SETS, @@ -654,10 +896,13 @@ + - Allow multi-column UPDATEs with a single subselect - (Tom Lane) + Allow to set multiple columns in + an UPDATE to the result of a + single subselect (Tom Lane) @@ -667,6 +912,9 @@ + Add new SELECT option SKIP LOCKED to skip locked rows (Thomas Munro) @@ -679,6 +927,9 @@ + Add SELECT option TABLESAMPLE to return a subset of a table (Petr @@ -687,6 +938,9 @@ + Suggest possible matches for mistyped column names (Peter Geoghegan, Robert Haas) @@ -703,6 +957,9 @@ + Add more details about sort ordering in EXPLAIN output (Marius Timmer, @@ -716,6 +973,9 @@ + Have VACUUM log the number of pages skipped due to pins (Jim Nasby) @@ -723,6 +983,9 @@ + Have TRUNCATE properly update the pg_stat* tuple counters (Alexander Shulgin) @@ -737,6 +1000,9 @@ + Allow REINDEX to reindex an entire schema using the SCHEMA option (Sawada Masahiko) @@ -744,6 +1010,9 @@ + Add VERBOSE option to REINDEX (Sawada Masahiko) @@ -751,6 +1020,9 @@ + Prevent REINDEX DATABASE and SCHEMA from outputting object names, unless VERBOSE is used @@ -759,6 +1031,9 @@ + Remove obsolete FORCE option from REINDEX (Fujii Masao) @@ -776,6 +1051,9 @@ + Add row-level security control (Craig Ringer, KaiGai Kohei, Adam Brightwell, Dean Rasheed, Stephen Frost) @@ -792,14 +1070,23 @@ + - Allow control of table WAL logging after table creation - with ALTER TABLE .. SET - LOGGED / UNLOGGED (Fabrízio de Royes Mello) + Allow to convert a WAL logged table to an UNLOGGED + one, and the other way round with + ALTER TABLE .. SET LOGGED / + UNLOGGED (Fabrízio de Royes Mello) + Add IF NOT EXISTS clause to CREATE TABLE AS, @@ -811,6 +1098,9 @@ + Add support for IF EXISTS to ALTER TABLE ... RENAME @@ -819,6 +1109,9 @@ + Allow CURRENT/SESSION_USER to specify the current user in some commands (Kyotaro Horiguchi, Álvaro @@ -836,6 +1129,9 @@ + Allow comments on domain constraints (Álvaro Herrera) @@ -843,6 +1139,9 @@ + Reduce lock levels of some create and alter trigger and foreign key commands (Simon Riggs, Andreas Karlsson) @@ -850,6 +1149,9 @@ + Allow LOCK TABLE .. ROW EXCLUSIVE MODE for those with INSERT privileges (Stephen @@ -863,6 +1165,9 @@ + Apply table and domain CHECK constraints in name order (Tom Lane) @@ -874,6 +1179,10 @@ + Allow CREATE/ + Add support for IMPORT FOREIGN @@ -911,6 +1223,9 @@ + Allow foreign tables to participate in inheritance (Shigeru Hanada, Etsuro Fujita) @@ -923,6 +1238,9 @@ + Allow CHECK constraints to be placed on foreign tables (Shigeru Hanada, Etsuro Fujita) @@ -938,6 +1256,10 @@ + Allow foreign data wrappers and custom scans to implement join pushdown (KaiGai Kohei) @@ -954,6 +1276,10 @@ + Add a set-returning function pg_event_trigger_ddl_commands(), which returns DDL activity @@ -962,6 +1288,9 @@ + Allow event triggers on table rewrites caused by ALTER TABLE (Dimitri @@ -970,6 +1299,9 @@ + Add event trigger support for database-level COMMENT, + Add columns to the output of pg_event_trigger_dropped_objects @@ -1003,6 +1339,9 @@ + Allow the XML data type to accept empty or all-whitespace values (Peter Eisentraut) @@ -1015,6 +1354,9 @@ + Fix XML xpath() handling @@ -1029,6 +1371,9 @@ + Allow MACADDR input using the format xxxx-xxxx-xxxx (Herwin Weststrate) @@ -1036,6 +1381,10 @@ + Tighten syntax of INTERVAL precision @@ -1049,6 +1398,9 @@ + Add selectivity estimators for INET/ + Add JSONB functions jsonb_set() @@ -1075,6 +1431,9 @@ + Add several generator functions for JSONB that already existed for JSON (Andrew Dunstan) @@ -1097,6 +1456,9 @@ + Reduce casting requirements to/from JSON and + Allow TEXT, TEXT array, and INTEGER values to be subtracted @@ -1113,6 +1478,9 @@ + Add JSONB operator || @@ -1121,6 +1489,9 @@ + Add json_strip_nulls() @@ -1143,6 +1514,9 @@ + Add generate_series() for NUMERIC values (Plato Malugin) @@ -1150,6 +1524,9 @@ + Allow array_agg() and @@ -1158,6 +1535,10 @@ + Add functions array_position() @@ -1168,6 +1549,9 @@ + Add point-to-polygon @@ -1176,6 +1560,9 @@ + Allow multi-byte characters as escape in SIMILAR TO @@ -1189,6 +1576,9 @@ + Add a width_bucket() @@ -1198,6 +1588,20 @@ + + + Add an optional missing_ok argument to pg_read_file() + and related functions (Michael Paquier, Heikki Linnakangas) + + + + + Allow => to specify named parameters in function calls (Pavel Stehule) @@ -1212,6 +1616,9 @@ + Add POSIX-compliant rounding for platforms that use Postgres-supplied rounding functions (Pedro Gimeno Fortea) @@ -1226,6 +1633,9 @@ + Add function pg_get_object_address() @@ -1235,6 +1645,9 @@ + Add function pg_identify_object_as_address() @@ -1244,6 +1657,9 @@ + Loosen security checks for viewing queries in pg_stat_activity, @@ -1261,6 +1677,9 @@ + Add pg_stat_get_snapshot_timestamp() @@ -1274,6 +1693,9 @@ + Add mxid_age() @@ -1282,21 +1704,16 @@ + - Add data type regrole that returns + Add data types regrole regnamespace that returns the OID of a role (Kyotaro Horiguchi) - - - - Add data type regnamespace that returns - the OID of a schema (Kyotaro Horiguchi) - - - @@ -1307,6 +1724,9 @@ + Add MIN()/MAX() aggregates for INET/ + Use 128-bit integers, where supported, as aggregate accumulators (Andreas Karlsson) @@ -1334,6 +1758,9 @@ + Improve support for composite types in PL/Python (Ed Behn, Ronan @@ -1347,6 +1774,9 @@ + Reduce lossiness of PL/Python floating value @@ -1355,6 +1785,9 @@ + Allow specification of conversion routines between SQL data types and data types of procedural languages (Peter Eisentraut) @@ -1380,6 +1813,12 @@ + Improve PL/pgSQL array performance (Tom Lane) @@ -1387,6 +1826,9 @@ + Add ASSERT statement in PL/pgSQL (Pavel Stehule) @@ -1394,6 +1836,9 @@ + Allow more PL/pgSQL keywords to be used as identifiers (Tom Lane) @@ -1412,6 +1857,12 @@ + Move pg_archivecleanup, @@ -1423,6 +1874,9 @@ + Add pg_rewind, which allows re-synchronizing a master server after failback @@ -1431,6 +1885,9 @@ + Allow pg_receivexlog @@ -1444,6 +1901,9 @@ + Allow the pg_receivexlog @@ -1457,6 +1917,9 @@ + Allow vacuumdb to vacuum in parallel using + Add + Make pg_basebackup use a tablespace mapping file when using tar format, @@ -1481,6 +1950,10 @@ + Add pg_xlogdump option