@@ -2067,26 +2067,31 @@ heap_insert(Relation relation, HeapTuple tup, CommandId cid,
2067
2067
*/
2068
2068
heaptup = heap_prepare_insert (relation , tup , xid , cid , options );
2069
2069
2070
+ /*
2071
+ * Find buffer to insert this tuple into. If the page is all visible,
2072
+ * this will also pin the requisite visibility map page.
2073
+ */
2074
+ buffer = RelationGetBufferForTuple (relation , heaptup -> t_len ,
2075
+ InvalidBuffer , options , bistate ,
2076
+ & vmbuffer , NULL );
2077
+
2070
2078
/*
2071
2079
* We're about to do the actual insert -- but check for conflict first, to
2072
2080
* avoid possibly having to roll back work we've just done.
2073
2081
*
2082
+ * This is safe without a recheck as long as there is no possibility of
2083
+ * another process scanning the page between this check and the insert
2084
+ * being visible to the scan (i.e., an exclusive buffer content lock is
2085
+ * continuously held from this point until the tuple insert is visible).
2086
+ *
2074
2087
* For a heap insert, we only need to check for table-level SSI locks. Our
2075
2088
* new tuple can't possibly conflict with existing tuple locks, and heap
2076
2089
* page locks are only consolidated versions of tuple locks; they do not
2077
- * lock "gaps" as index page locks do. So we don't need to identify a
2078
- * buffer before making the call.
2090
+ * lock "gaps" as index page locks do. So we don't need to specify a
2091
+ * buffer when making the call, which makes for a faster check .
2079
2092
*/
2080
2093
CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2081
2094
2082
- /*
2083
- * Find buffer to insert this tuple into. If the page is all visible,
2084
- * this will also pin the requisite visibility map page.
2085
- */
2086
- buffer = RelationGetBufferForTuple (relation , heaptup -> t_len ,
2087
- InvalidBuffer , options , bistate ,
2088
- & vmbuffer , NULL );
2089
-
2090
2095
/* NO EREPORT(ERROR) from here till changes are logged */
2091
2096
START_CRIT_SECTION ();
2092
2097
@@ -2340,13 +2345,26 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2340
2345
2341
2346
/*
2342
2347
* We're about to do the actual inserts -- but check for conflict first,
2343
- * to avoid possibly having to roll back work we've just done.
2348
+ * to minimize the possibility of having to roll back work we've just
2349
+ * done.
2344
2350
*
2345
- * For a heap insert, we only need to check for table-level SSI locks. Our
2346
- * new tuple can't possibly conflict with existing tuple locks, and heap
2351
+ * A check here does not definitively prevent a serialization anomaly;
2352
+ * that check MUST be done at least past the point of acquiring an
2353
+ * exclusive buffer content lock on every buffer that will be affected,
2354
+ * and MAY be done after all inserts are reflected in the buffers and
2355
+ * those locks are released; otherwise there race condition. Since
2356
+ * multiple buffers can be locked and unlocked in the loop below, and it
2357
+ * would not be feasible to identify and lock all of those buffers before
2358
+ * the loop, we must do a final check at the end.
2359
+ *
2360
+ * The check here could be omitted with no loss of correctness; it is
2361
+ * present strictly as an optimization.
2362
+ *
2363
+ * For heap inserts, we only need to check for table-level SSI locks. Our
2364
+ * new tuples can't possibly conflict with existing tuple locks, and heap
2347
2365
* page locks are only consolidated versions of tuple locks; they do not
2348
- * lock "gaps" as index page locks do. So we don't need to identify a
2349
- * buffer before making the call.
2366
+ * lock "gaps" as index page locks do. So we don't need to specify a
2367
+ * buffer when making the call, which makes for a faster check .
2350
2368
*/
2351
2369
CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2352
2370
@@ -2538,6 +2556,22 @@ heap_multi_insert(Relation relation, HeapTuple *tuples, int ntuples,
2538
2556
ndone += nthispage ;
2539
2557
}
2540
2558
2559
+ /*
2560
+ * We're done with the actual inserts. Check for conflicts again, to
2561
+ * ensure that all rw-conflicts in to these inserts are detected. Without
2562
+ * this final check, a sequential scan of the heap may have locked the
2563
+ * table after the "before" check, missing one opportunity to detect the
2564
+ * conflict, and then scanned the table before the new tuples were there,
2565
+ * missing the other chance to detect the conflict.
2566
+ *
2567
+ * For heap inserts, we only need to check for table-level SSI locks. Our
2568
+ * new tuples can't possibly conflict with existing tuple locks, and heap
2569
+ * page locks are only consolidated versions of tuple locks; they do not
2570
+ * lock "gaps" as index page locks do. So we don't need to specify a
2571
+ * buffer when making the call.
2572
+ */
2573
+ CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
2574
+
2541
2575
/*
2542
2576
* If tuples are cachable, mark them for invalidation from the caches in
2543
2577
* case we abort. Note it is OK to do this after releasing the buffer,
@@ -2828,6 +2862,11 @@ heap_delete(Relation relation, ItemPointer tid,
2828
2862
/*
2829
2863
* We're about to do the actual delete -- check for conflict first, to
2830
2864
* avoid possibly having to roll back work we've just done.
2865
+ *
2866
+ * This is safe without a recheck as long as there is no possibility of
2867
+ * another process scanning the page between this check and the delete
2868
+ * being visible to the scan (i.e., an exclusive buffer content lock is
2869
+ * continuously held from this point until the tuple delete is visible).
2831
2870
*/
2832
2871
CheckForSerializableConflictIn (relation , & tp , buffer );
2833
2872
@@ -3449,12 +3488,6 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3449
3488
goto l2 ;
3450
3489
}
3451
3490
3452
- /*
3453
- * We're about to do the actual update -- check for conflict first, to
3454
- * avoid possibly having to roll back work we've just done.
3455
- */
3456
- CheckForSerializableConflictIn (relation , & oldtup , buffer );
3457
-
3458
3491
/* Fill in transaction status data */
3459
3492
3460
3493
/*
@@ -3643,14 +3676,20 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
3643
3676
}
3644
3677
3645
3678
/*
3646
- * We're about to create the new tuple -- check for conflict first, to
3679
+ * We're about to do the actual update -- check for conflict first, to
3647
3680
* avoid possibly having to roll back work we've just done.
3648
3681
*
3649
- * NOTE: For a tuple insert, we only need to check for table locks, since
3650
- * predicate locking at the index level will cover ranges for anything
3651
- * except a table scan. Therefore, only provide the relation.
3682
+ * This is safe without a recheck as long as there is no possibility of
3683
+ * another process scanning the pages between this check and the update
3684
+ * being visible to the scan (i.e., exclusive buffer content lock(s) are
3685
+ * continuously held from this point until the tuple update is visible).
3686
+ *
3687
+ * For the new tuple the only check needed is at the relation level, but
3688
+ * since both tuples are in the same relation and the check for oldtup
3689
+ * will include checking the relation level, there is no benefit to a
3690
+ * separate check for the new tuple.
3652
3691
*/
3653
- CheckForSerializableConflictIn (relation , NULL , InvalidBuffer );
3692
+ CheckForSerializableConflictIn (relation , & oldtup , buffer );
3654
3693
3655
3694
/*
3656
3695
* At this point newbuf and buffer are both pinned and locked, and newbuf
0 commit comments