Skip to content

Commit 4c70887

Browse files
committed
Revert "For inplace update, send nontransactional invalidations."
This reverts commit 95c5acb (v17) and counterparts in each other non-master branch. If released, that commit would have caused a worst-in-years minor release regression, via undetected LWLock self-deadlock. This commit and its self-deadlock fix warrant more bake time in the master branch. Reported by Alexander Lakhin. Discussion: https://postgr.es/m/10ec0bc3-5933-1189-6bb8-5dec4114558e@gmail.com
1 parent 9a1c736 commit 4c70887

File tree

11 files changed

+118
-290
lines changed

11 files changed

+118
-290
lines changed

src/backend/access/heap/heapam.c

Lines changed: 7 additions & 36 deletions
Original file line numberDiff line numberDiff line change
@@ -6387,24 +6387,6 @@ heap_inplace_update_and_unlock(Relation relation,
63876387
if (oldlen != newlen || htup->t_hoff != tuple->t_data->t_hoff)
63886388
elog(ERROR, "wrong tuple length");
63896389

6390-
/*
6391-
* Construct shared cache inval if necessary. Note that because we only
6392-
* pass the new version of the tuple, this mustn't be used for any
6393-
* operations that could change catcache lookup keys. But we aren't
6394-
* bothering with index updates either, so that's true a fortiori.
6395-
*/
6396-
CacheInvalidateHeapTupleInplace(relation, tuple, NULL);
6397-
6398-
/*
6399-
* Unlink relcache init files as needed. If unlinking, acquire
6400-
* RelCacheInitLock until after associated invalidations. By doing this
6401-
* in advance, if we checkpoint and then crash between inplace
6402-
* XLogInsert() and inval, we don't rely on StartupXLOG() ->
6403-
* RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6404-
* neglect to PANIC on EIO.
6405-
*/
6406-
PreInplace_Inval();
6407-
64086390
/* NO EREPORT(ERROR) from here till changes are logged */
64096391
START_CRIT_SECTION();
64106392

@@ -6448,28 +6430,17 @@ heap_inplace_update_and_unlock(Relation relation,
64486430
PageSetLSN(BufferGetPage(buffer), recptr);
64496431
}
64506432

6451-
LockBuffer(buffer, BUFFER_LOCK_UNLOCK);
6452-
6453-
/*
6454-
* Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6455-
* do this before UnlockTuple().
6456-
*
6457-
* If we're mutating a tuple visible only to this transaction, there's an
6458-
* equivalent transactional inval from the action that created the tuple,
6459-
* and this inval is superfluous.
6460-
*/
6461-
AtInplace_Inval();
6462-
64636433
END_CRIT_SECTION();
6464-
UnlockTuple(relation, &tuple->t_self, InplaceUpdateTupleLock);
64656434

6466-
AcceptInvalidationMessages(); /* local processing of just-sent inval */
6435+
heap_inplace_unlock(relation, oldtup, buffer);
64676436

64686437
/*
6469-
* Queue a transactional inval. The immediate invalidation we just sent
6470-
* is the only one known to be necessary. To reduce risk from the
6471-
* transition to immediate invalidation, continue sending a transactional
6472-
* invalidation like we've long done. Third-party code might rely on it.
6438+
* Send out shared cache inval if necessary. Note that because we only
6439+
* pass the new version of the tuple, this mustn't be used for any
6440+
* operations that could change catcache lookup keys. But we aren't
6441+
* bothering with index updates either, so that's true a fortiori.
6442+
*
6443+
* XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
64736444
*/
64746445
if (!IsBootstrapProcessingMode())
64756446
CacheInvalidateHeapTuple(relation, tuple, NULL);

src/backend/access/transam/xact.c

Lines changed: 8 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1276,24 +1276,14 @@ RecordTransactionCommit(void)
12761276

12771277
/*
12781278
* Transactions without an assigned xid can contain invalidation
1279-
* messages. While inplace updates do this, this is not known to be
1280-
* necessary; see comment at inplace CacheInvalidateHeapTuple().
1281-
* Extensions might still rely on this capability, and standbys may
1282-
* need to process those invals. We can't emit a commit record
1283-
* without an xid, and we don't want to force assigning an xid,
1284-
* because that'd be problematic for e.g. vacuum. Hence we emit a
1285-
* bespoke record for the invalidations. We don't want to use that in
1286-
* case a commit record is emitted, so they happen synchronously with
1287-
* commits (besides not wanting to emit more WAL records).
1288-
*
1289-
* XXX Every known use of this capability is a defect. Since an XID
1290-
* isn't controlling visibility of the change that prompted invals,
1291-
* other sessions need the inval even if this transactions aborts.
1292-
*
1293-
* ON COMMIT DELETE ROWS does a nontransactional index_build(), which
1294-
* queues a relcache inval, including in transactions without an xid
1295-
* that had read the (empty) table. Standbys don't need any ON COMMIT
1296-
* DELETE ROWS invals, but we've not done the work to withhold them.
1279+
* messages (e.g. explicit relcache invalidations or catcache
1280+
* invalidations for inplace updates); standbys need to process those.
1281+
* We can't emit a commit record without an xid, and we don't want to
1282+
* force assigning an xid, because that'd be problematic for e.g.
1283+
* vacuum. Hence we emit a bespoke record for the invalidations. We
1284+
* don't want to use that in case a commit record is emitted, so they
1285+
* happen synchronously with commits (besides not wanting to emit more
1286+
* WAL records).
12971287
*/
12981288
if (nmsgs != 0)
12991289
{

src/backend/catalog/index.c

Lines changed: 2 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -2895,19 +2895,12 @@ index_update_stats(Relation rel,
28952895
if (dirty)
28962896
{
28972897
systable_inplace_update_finish(state, tuple);
2898-
/* the above sends transactional and immediate cache inval messages */
2898+
/* the above sends a cache inval message */
28992899
}
29002900
else
29012901
{
29022902
systable_inplace_update_cancel(state);
2903-
2904-
/*
2905-
* While we didn't change relhasindex, CREATE INDEX needs a
2906-
* transactional inval for when the new index's catalog rows become
2907-
* visible. Other CREATE INDEX and REINDEX code happens to also queue
2908-
* this inval, but keep this in case rare callers rely on this part of
2909-
* our API contract.
2910-
*/
2903+
/* no need to change tuple, but force relcache inval anyway */
29112904
CacheInvalidateRelcacheByTuple(tuple);
29122905
}
29132906

src/backend/replication/logical/decode.c

Lines changed: 15 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -547,19 +547,23 @@ DecodeHeapOp(LogicalDecodingContext *ctx, XLogRecordBuffer *buf)
547547

548548
/*
549549
* Inplace updates are only ever performed on catalog tuples and
550-
* can, per definition, not change tuple visibility. Inplace
551-
* updates don't affect storage or interpretation of table rows,
552-
* so they don't affect logicalrep_write_tuple() outcomes. Hence,
553-
* we don't process invalidations from the original operation. If
554-
* inplace updates did affect those things, invalidations wouldn't
555-
* make it work, since there are no snapshot-specific versions of
556-
* inplace-updated values. Since we also don't decode catalog
557-
* tuples, we're not interested in the record's contents.
550+
* can, per definition, not change tuple visibility. Since we
551+
* don't decode catalog tuples, we're not interested in the
552+
* record's contents.
558553
*
559-
* WAL contains likely-unnecessary commit-time invals from the
560-
* CacheInvalidateHeapTuple() call in heap_inplace_update().
561-
* Excess invalidation is safe.
554+
* In-place updates can be used either by XID-bearing transactions
555+
* (e.g. in CREATE INDEX CONCURRENTLY) or by XID-less
556+
* transactions (e.g. VACUUM). In the former case, the commit
557+
* record will include cache invalidations, so we mark the
558+
* transaction as catalog modifying here. Currently that's
559+
* redundant because the commit will do that as well, but once we
560+
* support decoding in-progress relations, this will be important.
562561
*/
562+
if (!TransactionIdIsValid(xid))
563+
break;
564+
565+
SnapBuildProcessChange(builder, xid, buf->origptr);
566+
ReorderBufferXidSetCatalogChanges(ctx->reorder, xid, buf->origptr);
563567
break;
564568

565569
case XLOG_HEAP_CONFIRM:

src/backend/utils/cache/catcache.c

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2131,8 +2131,7 @@ void
21312131
PrepareToInvalidateCacheTuple(Relation relation,
21322132
HeapTuple tuple,
21332133
HeapTuple newtuple,
2134-
void (*function) (int, uint32, Oid, void *),
2135-
void *context)
2134+
void (*function) (int, uint32, Oid))
21362135
{
21372136
slist_iter iter;
21382137
Oid reloid;
@@ -2173,7 +2172,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
21732172
hashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, tuple);
21742173
dbid = ccp->cc_relisshared ? (Oid) 0 : MyDatabaseId;
21752174

2176-
(*function) (ccp->id, hashvalue, dbid, context);
2175+
(*function) (ccp->id, hashvalue, dbid);
21772176

21782177
if (newtuple)
21792178
{
@@ -2182,7 +2181,7 @@ PrepareToInvalidateCacheTuple(Relation relation,
21822181
newhashvalue = CatalogCacheComputeTupleHashValue(ccp, ccp->cc_nkeys, newtuple);
21832182

21842183
if (newhashvalue != hashvalue)
2185-
(*function) (ccp->id, newhashvalue, dbid, context);
2184+
(*function) (ccp->id, newhashvalue, dbid);
21862185
}
21872186
}
21882187
}

0 commit comments

Comments
 (0)