@@ -6115,24 +6115,6 @@ heap_inplace_update_and_unlock(Relation relation,
6115
6115
if (oldlen != newlen || htup -> t_hoff != tuple -> t_data -> t_hoff )
6116
6116
elog (ERROR , "wrong tuple length" );
6117
6117
6118
- /*
6119
- * Construct shared cache inval if necessary. Note that because we only
6120
- * pass the new version of the tuple, this mustn't be used for any
6121
- * operations that could change catcache lookup keys. But we aren't
6122
- * bothering with index updates either, so that's true a fortiori.
6123
- */
6124
- CacheInvalidateHeapTupleInplace (relation , tuple , NULL );
6125
-
6126
- /*
6127
- * Unlink relcache init files as needed. If unlinking, acquire
6128
- * RelCacheInitLock until after associated invalidations. By doing this
6129
- * in advance, if we checkpoint and then crash between inplace
6130
- * XLogInsert() and inval, we don't rely on StartupXLOG() ->
6131
- * RelationCacheInitFileRemove(). That uses elevel==LOG, so replay would
6132
- * neglect to PANIC on EIO.
6133
- */
6134
- PreInplace_Inval ();
6135
-
6136
6118
/* NO EREPORT(ERROR) from here till changes are logged */
6137
6119
START_CRIT_SECTION ();
6138
6120
@@ -6176,28 +6158,17 @@ heap_inplace_update_and_unlock(Relation relation,
6176
6158
PageSetLSN (BufferGetPage (buffer ), recptr );
6177
6159
}
6178
6160
6179
- LockBuffer (buffer , BUFFER_LOCK_UNLOCK );
6180
-
6181
- /*
6182
- * Send invalidations to shared queue. SearchSysCacheLocked1() assumes we
6183
- * do this before UnlockTuple().
6184
- *
6185
- * If we're mutating a tuple visible only to this transaction, there's an
6186
- * equivalent transactional inval from the action that created the tuple,
6187
- * and this inval is superfluous.
6188
- */
6189
- AtInplace_Inval ();
6190
-
6191
6161
END_CRIT_SECTION ();
6192
- UnlockTuple (relation , & tuple -> t_self , InplaceUpdateTupleLock );
6193
6162
6194
- AcceptInvalidationMessages (); /* local processing of just-sent inval */
6163
+ heap_inplace_unlock ( relation , oldtup , buffer );
6195
6164
6196
6165
/*
6197
- * Queue a transactional inval. The immediate invalidation we just sent
6198
- * is the only one known to be necessary. To reduce risk from the
6199
- * transition to immediate invalidation, continue sending a transactional
6200
- * invalidation like we've long done. Third-party code might rely on it.
6166
+ * Send out shared cache inval if necessary. Note that because we only
6167
+ * pass the new version of the tuple, this mustn't be used for any
6168
+ * operations that could change catcache lookup keys. But we aren't
6169
+ * bothering with index updates either, so that's true a fortiori.
6170
+ *
6171
+ * XXX ROLLBACK discards the invalidation. See test inplace-inval.spec.
6201
6172
*/
6202
6173
if (!IsBootstrapProcessingMode ())
6203
6174
CacheInvalidateHeapTuple (relation , tuple , NULL );
0 commit comments