@@ -2035,26 +2035,31 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2035
2035
*/
2036
2036
heaptup = heap_prepare_insert (relation , tup , xid , cid , options );
2037
2037
2038
+ /*
2039
+ * Find buffer to insert this tuple into. If the page is all visible,
2040
+ * this will also pin the requisite visibility map page.
2041
+ */
2042
+ buffer = RelationGetBufferForTuple (relation , heaptup -> t_len ,
2043
+ InvalidBuffer , options , bistate ,
2044
+ & vmbuffer , NULL );
2045
+
2038
2046
/*
2039
2047
* We're about to do the actual insert -- but check for conflict first, to
2040
2048
* avoid possibly having to roll back work we've just done.
2041
2049
*
2050
+ * This is safe without a recheck as long as there is no possibility of
2051
+ * another process scanning the page between this check and the insert
2052
+ * being visible to the scan (i.e., an exclusive buffer content lock is
2053
+ * continuously held from this point until the tuple insert is visible).
2054
+ *
2042
2055
* For a heap insert, we only need to check for table-level SSI locks. Our
2043
2056
* new tuple can't possibly conflict with existing tuple locks, and heap
2044
2057
* page locks are only consolidated versions of tuple locks; they do not
2045
- * lock "gaps" as index page locks do. So we don't need to identify a
2046
- * buffer before making the call.
2058
+ * lock "gaps" as index page locks do. So we don't need to specify a
2059
+ * buffer when making the call, which makes for a faster check .
2047
2060
*/
2048
2061
CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2049
2062
2050
- /*
2051
- * Find buffer to insert this tuple into. If the page is all visible,
2052
- * this will also pin the requisite visibility map page.
2053
- */
2054
- buffer = RelationGetBufferForTuple (relation , heaptup -> t_len ,
2055
- InvalidBuffer , options , bistate ,
2056
- & vmbuffer , NULL );
2057
-
2058
2063
/* NO EREPORT(ERROR) from here till changes are logged */
2059
2064
START_CRIT_SECTION ();
2060
2065
@@ -2278,13 +2283,26 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2278
2283
2279
2284
/*
2280
2285
* We're about to do the actual inserts -- but check for conflict first,
2281
- * to avoid possibly having to roll back work we've just done.
2286
+ * to minimize the possibility of having to roll back work we've just
2287
+ * done.
2282
2288
*
2283
- * For a heap insert, we only need to check for table-level SSI locks. Our
2284
- * new tuple can't possibly conflict with existing tuple locks, and heap
2289
+ * A check here does not definitively prevent a serialization anomaly;
2290
+ * that check MUST be done at least past the point of acquiring an
2291
+ * exclusive buffer content lock on every buffer that will be affected,
2292
+ * and MAY be done after all inserts are reflected in the buffers and
2293
+ * those locks are released; otherwise there race condition. Since
2294
+ * multiple buffers can be locked and unlocked in the loop below, and it
2295
+ * would not be feasible to identify and lock all of those buffers before
2296
+ * the loop, we must do a final check at the end.
2297
+ *
2298
+ * The check here could be omitted with no loss of correctness; it is
2299
+ * present strictly as an optimization.
2300
+ *
2301
+ * For heap inserts, we only need to check for table-level SSI locks. Our
2302
+ * new tuples can't possibly conflict with existing tuple locks, and heap
2285
2303
* page locks are only consolidated versions of tuple locks; they do not
2286
- * lock "gaps" as index page locks do. So we don't need to identify a
2287
- * buffer before making the call.
2304
+ * lock "gaps" as index page locks do. So we don't need to specify a
2305
+ * buffer when making the call, which makes for a faster check .
2288
2306
*/
2289
2307
CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2290
2308
@@ -2444,6 +2462,22 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2444
2462
ndone += nthispage ;
2445
2463
}
2446
2464
2465
+ /*
2466
+ * We're done with the actual inserts. Check for conflicts again, to
2467
+ * ensure that all rw-conflicts in to these inserts are detected. Without
2468
+ * this final check, a sequential scan of the heap may have locked the
2469
+ * table after the "before" check, missing one opportunity to detect the
2470
+ * conflict, and then scanned the table before the new tuples were there,
2471
+ * missing the other chance to detect the conflict.
2472
+ *
2473
+ * For heap inserts, we only need to check for table-level SSI locks. Our
2474
+ * new tuples can't possibly conflict with existing tuple locks, and heap
2475
+ * page locks are only consolidated versions of tuple locks; they do not
2476
+ * lock "gaps" as index page locks do. So we don't need to specify a
2477
+ * buffer when making the call.
2478
+ */
2479
+ CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2480
+
2447
2481
/*
2448
2482
* If tuples are cachable, mark them for invalidation from the caches in
2449
2483
* case we abort. Note it is OK to do this after releasing the buffer,
@@ -2730,6 +2764,11 @@ heap_delete(Relation relation, ItemPointer tid,
2730
2764
/*
2731
2765
* We're about to do the actual delete -- check for conflict first, to
2732
2766
* avoid possibly having to roll back work we've just done.
2767
+ *
2768
+ * This is safe without a recheck as long as there is no possibility of
2769
+ * another process scanning the page between this check and the delete
2770
+ * being visible to the scan (i.e., an exclusive buffer content lock is
2771
+ * continuously held from this point until the tuple delete is visible).
2733
2772
*/
2734
2773
CheckForSerializableConflictIn (relation , & tp , buffer );
2735
2774
@@ -3299,12 +3338,6 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3299
3338
goto l2 ;
3300
3339
}
3301
3340
3302
- /*
3303
- * We're about to do the actual update -- check for conflict first, to
3304
- * avoid possibly having to roll back work we've just done.
3305
- */
3306
- CheckForSerializableConflictIn (relation , & oldtup , buffer );
3307
-
3308
3341
/* Fill in transaction status data */
3309
3342
3310
3343
/*
@@ -3493,14 +3526,20 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3493
3526
}
3494
3527
3495
3528
/*
3496
- * We're about to create the new tuple -- check for conflict first, to
3529
+ * We're about to do the actual update -- check for conflict first, to
3497
3530
* avoid possibly having to roll back work we've just done.
3498
3531
*
3499
- * NOTE: For a tuple insert, we only need to check for table locks, since
3500
- * predicate locking at the index level will cover ranges for anything
3501
- * except a table scan. Therefore, only provide the relation.
3532
+ * This is safe without a recheck as long as there is no possibility of
3533
+ * another process scanning the pages between this check and the update
3534
+ * being visible to the scan (i.e., exclusive buffer content lock(s) are
3535
+ * continuously held from this point until the tuple update is visible).
3536
+ *
3537
+ * For the new tuple the only check needed is at the relation level, but
3538
+ * since both tuples are in the same relation and the check for oldtup
3539
+ * will include checking the relation level, there is no benefit to a
3540
+ * separate check for the new tuple.
3502
3541
*/
3503
- CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
3542
+ CheckForSerializableConflictIn (relation , & oldtup , buffer );
3504
3543
3505
3544
/*
3506
3545
* At this point newbuf and buffer are both pinned and locked, and newbuf
0 commit comments