Skip to content

Commit e076336

Browse files
committed
code: replace 'master' with 'leader' where appropriate.
Leader already is the more widely used terminology, but a few places didn't get the message. Author: Andres Freund Reviewed-By: David Steele Discussion: https://postgr.es/m/20200615182235.x7lch5n6kcjq4aue@alap3.anarazel.de
1 parent 5e7bbb5 commit e076336

File tree

18 files changed

+120
-120
lines changed

18 files changed

+120
-120
lines changed

contrib/pg_prewarm/autoprewarm.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
* pages from a relation that is in the process of being dropped.
1212
*
1313
* While prewarming, autoprewarm will use two workers. There's a
14-
* master worker that reads and sorts the list of blocks to be
14+
* leader worker that reads and sorts the list of blocks to be
1515
* prewarmed and then launches a per-database worker for each
1616
* relevant database in turn. The former keeps running after the
1717
* initial prewarm is complete to update the dump file periodically.
@@ -88,7 +88,7 @@ PG_FUNCTION_INFO_V1(autoprewarm_dump_now);
8888

8989
static void apw_load_buffers(void);
9090
static int apw_dump_now(bool is_bgworker, bool dump_unlogged);
91-
static void apw_start_master_worker(void);
91+
static void apw_start_leader_worker(void);
9292
static void apw_start_database_worker(void);
9393
static bool apw_init_shmem(void);
9494
static void apw_detach_shmem(int code, Datum arg);
@@ -146,11 +146,11 @@ _PG_init(void)
146146

147147
/* Register autoprewarm worker, if enabled. */
148148
if (autoprewarm)
149-
apw_start_master_worker();
149+
apw_start_leader_worker();
150150
}
151151

152152
/*
153-
* Main entry point for the master autoprewarm process. Per-database workers
153+
* Main entry point for the leader autoprewarm process. Per-database workers
154154
* have a separate entry point.
155155
*/
156156
void
@@ -716,7 +716,7 @@ autoprewarm_start_worker(PG_FUNCTION_ARGS)
716716
errmsg("autoprewarm worker is already running under PID %lu",
717717
(unsigned long) pid)));
718718

719-
apw_start_master_worker();
719+
apw_start_leader_worker();
720720

721721
PG_RETURN_VOID();
722722
}
@@ -786,10 +786,10 @@ apw_detach_shmem(int code, Datum arg)
786786
}
787787

788788
/*
789-
* Start autoprewarm master worker process.
789+
* Start autoprewarm leader worker process.
790790
*/
791791
static void
792-
apw_start_master_worker(void)
792+
apw_start_leader_worker(void)
793793
{
794794
BackgroundWorker worker;
795795
BackgroundWorkerHandle *handle;
@@ -801,8 +801,8 @@ apw_start_master_worker(void)
801801
worker.bgw_start_time = BgWorkerStart_ConsistentState;
802802
strcpy(worker.bgw_library_name, "pg_prewarm");
803803
strcpy(worker.bgw_function_name, "autoprewarm_main");
804-
strcpy(worker.bgw_name, "autoprewarm master");
805-
strcpy(worker.bgw_type, "autoprewarm master");
804+
strcpy(worker.bgw_name, "autoprewarm leader");
805+
strcpy(worker.bgw_type, "autoprewarm leader");
806806

807807
if (process_shared_preload_libraries_in_progress)
808808
{

doc/src/sgml/ref/pg_dump.sgml

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -332,12 +332,12 @@ PostgreSQL documentation
332332
</para>
333333
<para>
334334
Requesting exclusive locks on database objects while running a parallel dump could
335-
cause the dump to fail. The reason is that the <application>pg_dump</application> master process
335+
cause the dump to fail. The reason is that the <application>pg_dump</application> leader process
336336
requests shared locks on the objects that the worker processes are going to dump later
337337
in order to
338338
make sure that nobody deletes them and makes them go away while the dump is running.
339339
If another client then requests an exclusive lock on a table, that lock will not be
340-
granted but will be queued waiting for the shared lock of the master process to be
340+
granted but will be queued waiting for the shared lock of the leader process to be
341341
released. Consequently any other access to the table will not be granted either and
342342
will queue after the exclusive lock request. This includes the worker process trying
343343
to dump the table. Without any precautions this would be a classic deadlock situation.
@@ -354,14 +354,14 @@ PostgreSQL documentation
354354
for standbys. With this feature, database clients can ensure they see
355355
the same data set even though they use different connections.
356356
<command>pg_dump -j</command> uses multiple database connections; it
357-
connects to the database once with the master process and once again
357+
connects to the database once with the leader process and once again
358358
for each worker job. Without the synchronized snapshot feature, the
359359
different worker jobs wouldn't be guaranteed to see the same data in
360360
each connection, which could lead to an inconsistent backup.
361361
</para>
362362
<para>
363363
If you want to run a parallel dump of a pre-9.2 server, you need to make sure that the
364-
database content doesn't change from between the time the master connects to the
364+
database content doesn't change from between the time the leader connects to the
365365
database until the last worker job has connected to the database. The easiest way to
366366
do this is to halt any data modifying processes (DDL and DML) accessing the database
367367
before starting the backup. You also need to specify the

src/backend/access/transam/parallel.c

Lines changed: 18 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -89,9 +89,9 @@ typedef struct FixedParallelState
8989
Oid temp_toast_namespace_id;
9090
int sec_context;
9191
bool is_superuser;
92-
PGPROC *parallel_master_pgproc;
93-
pid_t parallel_master_pid;
94-
BackendId parallel_master_backend_id;
92+
PGPROC *parallel_leader_pgproc;
93+
pid_t parallel_leader_pid;
94+
BackendId parallel_leader_backend_id;
9595
TimestampTz xact_ts;
9696
TimestampTz stmt_ts;
9797
SerializableXactHandle serializable_xact_handle;
@@ -124,7 +124,7 @@ static FixedParallelState *MyFixedParallelState;
124124
static dlist_head pcxt_list = DLIST_STATIC_INIT(pcxt_list);
125125

126126
/* Backend-local copy of data from FixedParallelState. */
127-
static pid_t ParallelMasterPid;
127+
static pid_t ParallelLeaderPid;
128128

129129
/*
130130
* List of internal parallel worker entry points. We need this for
@@ -323,9 +323,9 @@ InitializeParallelDSM(ParallelContext *pcxt)
323323
GetUserIdAndSecContext(&fps->current_user_id, &fps->sec_context);
324324
GetTempNamespaceState(&fps->temp_namespace_id,
325325
&fps->temp_toast_namespace_id);
326-
fps->parallel_master_pgproc = MyProc;
327-
fps->parallel_master_pid = MyProcPid;
328-
fps->parallel_master_backend_id = MyBackendId;
326+
fps->parallel_leader_pgproc = MyProc;
327+
fps->parallel_leader_pid = MyProcPid;
328+
fps->parallel_leader_backend_id = MyBackendId;
329329
fps->xact_ts = GetCurrentTransactionStartTimestamp();
330330
fps->stmt_ts = GetCurrentStatementStartTimestamp();
331331
fps->serializable_xact_handle = ShareSerializableXact();
@@ -857,8 +857,8 @@ WaitForParallelWorkersToFinish(ParallelContext *pcxt)
857857
*
858858
* This function ensures that workers have been completely shutdown. The
859859
* difference between WaitForParallelWorkersToFinish and this function is
860-
* that former just ensures that last message sent by worker backend is
861-
* received by master backend whereas this ensures the complete shutdown.
860+
* that the former just ensures that last message sent by a worker backend is
861+
* received by the leader backend whereas this ensures the complete shutdown.
862862
*/
863863
static void
864864
WaitForParallelWorkersToExit(ParallelContext *pcxt)
@@ -1302,8 +1302,8 @@ ParallelWorkerMain(Datum main_arg)
13021302
MyFixedParallelState = fps;
13031303

13041304
/* Arrange to signal the leader if we exit. */
1305-
ParallelMasterPid = fps->parallel_master_pid;
1306-
ParallelMasterBackendId = fps->parallel_master_backend_id;
1305+
ParallelLeaderPid = fps->parallel_leader_pid;
1306+
ParallelLeaderBackendId = fps->parallel_leader_backend_id;
13071307
on_shmem_exit(ParallelWorkerShutdown, (Datum) 0);
13081308

13091309
/*
@@ -1318,8 +1318,8 @@ ParallelWorkerMain(Datum main_arg)
13181318
shm_mq_set_sender(mq, MyProc);
13191319
mqh = shm_mq_attach(mq, seg, NULL);
13201320
pq_redirect_to_shm_mq(seg, mqh);
1321-
pq_set_parallel_master(fps->parallel_master_pid,
1322-
fps->parallel_master_backend_id);
1321+
pq_set_parallel_leader(fps->parallel_leader_pid,
1322+
fps->parallel_leader_backend_id);
13231323

13241324
/*
13251325
* Send a BackendKeyData message to the process that initiated parallelism
@@ -1347,8 +1347,8 @@ ParallelWorkerMain(Datum main_arg)
13471347
* deadlock. (If we can't join the lock group, the leader has gone away,
13481348
* so just exit quietly.)
13491349
*/
1350-
if (!BecomeLockGroupMember(fps->parallel_master_pgproc,
1351-
fps->parallel_master_pid))
1350+
if (!BecomeLockGroupMember(fps->parallel_leader_pgproc,
1351+
fps->parallel_leader_pid))
13521352
return;
13531353

13541354
/*
@@ -1410,7 +1410,7 @@ ParallelWorkerMain(Datum main_arg)
14101410
/* Restore transaction snapshot. */
14111411
tsnapspace = shm_toc_lookup(toc, PARALLEL_KEY_TRANSACTION_SNAPSHOT, false);
14121412
RestoreTransactionSnapshot(RestoreSnapshot(tsnapspace),
1413-
fps->parallel_master_pgproc);
1413+
fps->parallel_leader_pgproc);
14141414

14151415
/* Restore active snapshot. */
14161416
asnapspace = shm_toc_lookup(toc, PARALLEL_KEY_ACTIVE_SNAPSHOT, false);
@@ -1510,9 +1510,9 @@ ParallelWorkerReportLastRecEnd(XLogRecPtr last_xlog_end)
15101510
static void
15111511
ParallelWorkerShutdown(int code, Datum arg)
15121512
{
1513-
SendProcSignal(ParallelMasterPid,
1513+
SendProcSignal(ParallelLeaderPid,
15141514
PROCSIG_PARALLEL_MESSAGE,
1515-
ParallelMasterBackendId);
1515+
ParallelLeaderBackendId);
15161516
}
15171517

15181518
/*

src/backend/access/transam/xact.c

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -750,7 +750,7 @@ GetCurrentCommandId(bool used)
750750
{
751751
/*
752752
* Forbid setting currentCommandIdUsed in a parallel worker, because
753-
* we have no provision for communicating this back to the master. We
753+
* we have no provision for communicating this back to the leader. We
754754
* could relax this restriction when currentCommandIdUsed was already
755755
* true at the start of the parallel operation.
756756
*/
@@ -987,7 +987,7 @@ ExitParallelMode(void)
987987
/*
988988
* IsInParallelMode
989989
*
990-
* Are we in a parallel operation, as either the master or a worker? Check
990+
* Are we in a parallel operation, as either the leader or a worker? Check
991991
* this to prohibit operations that change backend-local state expected to
992992
* match across all workers. Mere caches usually don't require such a
993993
* restriction. State modified in a strict push/pop fashion, such as the
@@ -2164,13 +2164,13 @@ CommitTransaction(void)
21642164
else
21652165
{
21662166
/*
2167-
* We must not mark our XID committed; the parallel master is
2167+
* We must not mark our XID committed; the parallel leader is
21682168
* responsible for that.
21692169
*/
21702170
latestXid = InvalidTransactionId;
21712171

21722172
/*
2173-
* Make sure the master will know about any WAL we wrote before it
2173+
* Make sure the leader will know about any WAL we wrote before it
21742174
* commits.
21752175
*/
21762176
ParallelWorkerReportLastRecEnd(XactLastRecEnd);
@@ -2699,7 +2699,7 @@ AbortTransaction(void)
26992699
latestXid = InvalidTransactionId;
27002700

27012701
/*
2702-
* Since the parallel master won't get our value of XactLastRecEnd in
2702+
* Since the parallel leader won't get our value of XactLastRecEnd in
27032703
* this case, we nudge WAL-writer ourselves in this case. See related
27042704
* comments in RecordTransactionAbort for why this matters.
27052705
*/
@@ -4488,7 +4488,7 @@ RollbackAndReleaseCurrentSubTransaction(void)
44884488

44894489
/*
44904490
* Unlike ReleaseCurrentSubTransaction(), this is nominally permitted
4491-
* during parallel operations. That's because we may be in the master,
4491+
* during parallel operations. That's because we may be in the leader,
44924492
* recovering from an error thrown while we were in parallel mode. We
44934493
* won't reach here in a worker, because BeginInternalSubTransaction()
44944494
* will have failed.

src/backend/executor/execGrouping.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ BuildTupleHashTableExt(PlanState *parent,
190190
hashtable->cur_eq_func = NULL;
191191

192192
/*
193-
* If parallelism is in use, even if the master backend is performing the
193+
* If parallelism is in use, even if the leader backend is performing the
194194
* scan itself, we don't want to create the hashtable exactly the same way
195195
* in all workers. As hashtables are iterated over in keyspace-order,
196196
* doing so in all processes in the same way is likely to lead to

src/backend/libpq/pqmq.c

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -23,8 +23,8 @@
2323

2424
static shm_mq_handle *pq_mq_handle;
2525
static bool pq_mq_busy = false;
26-
static pid_t pq_mq_parallel_master_pid = 0;
27-
static pid_t pq_mq_parallel_master_backend_id = InvalidBackendId;
26+
static pid_t pq_mq_parallel_leader_pid = 0;
27+
static pid_t pq_mq_parallel_leader_backend_id = InvalidBackendId;
2828

2929
static void pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg);
3030
static void mq_comm_reset(void);
@@ -73,15 +73,15 @@ pq_cleanup_redirect_to_shm_mq(dsm_segment *seg, Datum arg)
7373
}
7474

7575
/*
76-
* Arrange to SendProcSignal() to the parallel master each time we transmit
76+
* Arrange to SendProcSignal() to the parallel leader each time we transmit
7777
* message data via the shm_mq.
7878
*/
7979
void
80-
pq_set_parallel_master(pid_t pid, BackendId backend_id)
80+
pq_set_parallel_leader(pid_t pid, BackendId backend_id)
8181
{
8282
Assert(PqCommMethods == &PqCommMqMethods);
83-
pq_mq_parallel_master_pid = pid;
84-
pq_mq_parallel_master_backend_id = backend_id;
83+
pq_mq_parallel_leader_pid = pid;
84+
pq_mq_parallel_leader_backend_id = backend_id;
8585
}
8686

8787
static void
@@ -160,10 +160,10 @@ mq_putmessage(char msgtype, const char *s, size_t len)
160160
{
161161
result = shm_mq_sendv(pq_mq_handle, iov, 2, true);
162162

163-
if (pq_mq_parallel_master_pid != 0)
164-
SendProcSignal(pq_mq_parallel_master_pid,
163+
if (pq_mq_parallel_leader_pid != 0)
164+
SendProcSignal(pq_mq_parallel_leader_pid,
165165
PROCSIG_PARALLEL_MESSAGE,
166-
pq_mq_parallel_master_backend_id);
166+
pq_mq_parallel_leader_backend_id);
167167

168168
if (result != SHM_MQ_WOULD_BLOCK)
169169
break;

src/backend/optimizer/path/costsize.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -11,7 +11,7 @@
1111
* cpu_tuple_cost Cost of typical CPU time to process a tuple
1212
* cpu_index_tuple_cost Cost of typical CPU time to process an index tuple
1313
* cpu_operator_cost Cost of CPU time to execute an operator or function
14-
* parallel_tuple_cost Cost of CPU time to pass a tuple from worker to master backend
14+
* parallel_tuple_cost Cost of CPU time to pass a tuple from worker to leader backend
1515
* parallel_setup_cost Cost of setting up shared memory for parallelism
1616
*
1717
* We expect that the kernel will typically do some amount of read-ahead

src/backend/optimizer/util/clauses.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1028,8 +1028,8 @@ max_parallel_hazard_walker(Node *node, max_parallel_hazard_context *context)
10281028
* We can't pass Params to workers at the moment either, so they are also
10291029
* parallel-restricted, unless they are PARAM_EXTERN Params or are
10301030
* PARAM_EXEC Params listed in safe_param_ids, meaning they could be
1031-
* either generated within the worker or can be computed in master and
1032-
* then their value can be passed to the worker.
1031+
* either generated within workers or can be computed by the leader and
1032+
* then their value can be passed to workers.
10331033
*/
10341034
else if (IsA(node, Param))
10351035
{

src/backend/utils/init/globals.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ char postgres_exec_path[MAXPGPATH]; /* full path to backend */
8080

8181
BackendId MyBackendId = InvalidBackendId;
8282

83-
BackendId ParallelMasterBackendId = InvalidBackendId;
83+
BackendId ParallelLeaderBackendId = InvalidBackendId;
8484

8585
Oid MyDatabaseId = InvalidOid;
8686

src/backend/utils/misc/guc.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -3448,7 +3448,7 @@ static struct config_real ConfigureNamesReal[] =
34483448
{
34493449
{"parallel_tuple_cost", PGC_USERSET, QUERY_TUNING_COST,
34503450
gettext_noop("Sets the planner's estimate of the cost of "
3451-
"passing each tuple (row) from worker to master backend."),
3451+
"passing each tuple (row) from worker to leader backend."),
34523452
NULL,
34533453
GUC_EXPLAIN
34543454
},

0 commit comments

Comments
 (0)