8
8
*
9
9
*
10
10
* IDENTIFICATION
11
- * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.113 2001/03/25 23:23:58 tgl Exp $
11
+ * $Header: /cvsroot/pgsql/src/backend/access/heap/heapam.c,v 1.113.2.1 2001/05/17 00:48:45 tgl Exp $
12
12
*
13
13
*
14
14
* INTERFACE ROUTINES
@@ -1578,6 +1578,8 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
1578
1578
newbuf ;
1579
1579
bool need_toast ,
1580
1580
already_marked ;
1581
+ Size newtupsize ,
1582
+ pagefree ;
1581
1583
int result ;
1582
1584
1583
1585
/* increment access statistics */
@@ -1685,8 +1687,10 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
1685
1687
HeapTupleHasExtended (newtup ) ||
1686
1688
(MAXALIGN (newtup -> t_len ) > TOAST_TUPLE_THRESHOLD ));
1687
1689
1688
- if (need_toast ||
1689
- (unsigned ) MAXALIGN (newtup -> t_len ) > PageGetFreeSpace ((Page ) dp ))
1690
+ newtupsize = MAXALIGN (newtup -> t_len );
1691
+ pagefree = PageGetFreeSpace ((Page ) dp );
1692
+
1693
+ if (need_toast || newtupsize > pagefree )
1690
1694
{
1691
1695
_locked_tuple_ .node = relation -> rd_node ;
1692
1696
_locked_tuple_ .tid = oldtup .t_self ;
@@ -1704,17 +1708,59 @@ heap_update(Relation relation, ItemPointer otid, HeapTuple newtup,
1704
1708
1705
1709
/* Let the toaster do its thing */
1706
1710
if (need_toast )
1711
+ {
1707
1712
heap_tuple_toast_attrs (relation , newtup , & oldtup );
1713
+ newtupsize = MAXALIGN (newtup -> t_len );
1714
+ }
1708
1715
1709
- /* Now, do we need a new page for the tuple, or not? */
1710
- if ((unsigned ) MAXALIGN (newtup -> t_len ) <= PageGetFreeSpace ((Page ) dp ))
1711
- newbuf = buffer ;
1712
- else
1716
+ /*
1717
+ * Now, do we need a new page for the tuple, or not? This is a bit
1718
+ * tricky since someone else could have added tuples to the page
1719
+ * while we weren't looking. We have to recheck the available space
1720
+ * after reacquiring the buffer lock. But don't bother to do that
1721
+ * if the former amount of free space is still not enough; it's
1722
+ * unlikely there's more free now than before.
1723
+ *
1724
+ * What's more, if we need to get a new page, we will need to acquire
1725
+ * buffer locks on both old and new pages. To avoid deadlock against
1726
+ * some other backend trying to get the same two locks in the other
1727
+ * order, we must be consistent about the order we get the locks in.
1728
+ * We use the rule "lock the higher-numbered page of the relation
1729
+ * first". To implement this, we must do RelationGetBufferForTuple
1730
+ * while not holding the lock on the old page. In 7.1, that routine
1731
+ * always returns the last page of the relation, so the rule holds.
1732
+ * (7.2 will use a different approach, but the same ordering rule.)
1733
+ */
1734
+ if (newtupsize > pagefree )
1735
+ {
1736
+ /* Assume there's no chance to put newtup on same page. */
1713
1737
newbuf = RelationGetBufferForTuple (relation , newtup -> t_len );
1714
-
1715
- /* Re-acquire the lock on the old tuple's page. */
1716
- /* this seems to be deadlock free... */
1717
- LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
1738
+ /* Now reacquire lock on old tuple's page. */
1739
+ LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
1740
+ }
1741
+ else
1742
+ {
1743
+ /* Re-acquire the lock on the old tuple's page. */
1744
+ LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
1745
+ /* Re-check using the up-to-date free space */
1746
+ pagefree = PageGetFreeSpace ((Page ) dp );
1747
+ if (newtupsize > pagefree )
1748
+ {
1749
+ /*
1750
+ * Rats, it doesn't fit anymore. We must now unlock and
1751
+ * relock to avoid deadlock. Fortunately, this path should
1752
+ * seldom be taken.
1753
+ */
1754
+ LockBuffer (buffer , BUFFER_LOCK_UNLOCK );
1755
+ newbuf = RelationGetBufferForTuple (relation , newtup -> t_len );
1756
+ LockBuffer (buffer , BUFFER_LOCK_EXCLUSIVE );
1757
+ }
1758
+ else
1759
+ {
1760
+ /* OK, it fits here, so we're done. */
1761
+ newbuf = buffer ;
1762
+ }
1763
+ }
1718
1764
}
1719
1765
else
1720
1766
{
0 commit comments