Skip to content

Commit 7819ba1

Browse files
committed
Remove _hash_chgbufaccess().
This is basically for the same reasons I got rid of _hash_wrtbuf() in commit 25216c9: it's not convenient to have a function which encapsulates MarkBufferDirty(), especially as we move towards having hash indexes be WAL-logged. Patch by me, reviewed (but not entirely endorsed) by Amit Kapila.
1 parent 0a85c10 commit 7819ba1

File tree

6 files changed

+66
-85
lines changed

6 files changed

+66
-85
lines changed

src/backend/access/hash/hash.c

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -274,7 +274,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
274274
* Reacquire the read lock here.
275275
*/
276276
if (BufferIsValid(so->hashso_curbuf))
277-
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_NOLOCK, HASH_READ);
277+
LockBuffer(so->hashso_curbuf, BUFFER_LOCK_SHARE);
278278

279279
/*
280280
* If we've already initialized this scan, we can just advance it in the
@@ -354,7 +354,7 @@ hashgettuple(IndexScanDesc scan, ScanDirection dir)
354354

355355
/* Release read lock on current buffer, but keep it pinned */
356356
if (BufferIsValid(so->hashso_curbuf))
357-
_hash_chgbufaccess(rel, so->hashso_curbuf, HASH_READ, HASH_NOLOCK);
357+
LockBuffer(so->hashso_curbuf, BUFFER_LOCK_UNLOCK);
358358

359359
/* Return current heap TID on success */
360360
scan->xs_ctup.t_self = so->hashso_heappos;
@@ -524,7 +524,7 @@ hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
524524
orig_ntuples = metap->hashm_ntuples;
525525
memcpy(&local_metapage, metap, sizeof(local_metapage));
526526
/* release the lock, but keep pin */
527-
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
527+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
528528

529529
/* Scan the buckets that we know exist */
530530
cur_bucket = 0;
@@ -576,9 +576,9 @@ hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
576576
* (and thus can't be further split), update our cached metapage
577577
* data.
578578
*/
579-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
579+
LockBuffer(metabuf, BUFFER_LOCK_SHARE);
580580
memcpy(&local_metapage, metap, sizeof(local_metapage));
581-
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
581+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
582582
}
583583

584584
bucket_buf = buf;
@@ -597,15 +597,15 @@ hashbulkdelete(IndexVacuumInfo *info, IndexBulkDeleteResult *stats,
597597
}
598598

599599
/* Write-lock metapage and check for split since we started */
600-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
600+
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
601601
metap = HashPageGetMeta(BufferGetPage(metabuf));
602602

603603
if (cur_maxbucket != metap->hashm_maxbucket)
604604
{
605605
/* There's been a split, so process the additional bucket(s) */
606606
cur_maxbucket = metap->hashm_maxbucket;
607607
memcpy(&local_metapage, metap, sizeof(local_metapage));
608-
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
608+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
609609
goto loop_top;
610610
}
611611

@@ -821,7 +821,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
821821
* page
822822
*/
823823
if (retain_pin)
824-
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
824+
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
825825
else
826826
_hash_relbuf(rel, buf);
827827

@@ -836,7 +836,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
836836
if (buf != bucket_buf)
837837
{
838838
_hash_relbuf(rel, buf);
839-
_hash_chgbufaccess(rel, bucket_buf, HASH_NOLOCK, HASH_WRITE);
839+
LockBuffer(bucket_buf, BUFFER_LOCK_EXCLUSIVE);
840840
}
841841

842842
/*
@@ -866,7 +866,7 @@ hashbucketcleanup(Relation rel, Bucket cur_bucket, Buffer bucket_buf,
866866
_hash_squeezebucket(rel, cur_bucket, bucket_blkno, bucket_buf,
867867
bstrategy);
868868
else
869-
_hash_chgbufaccess(rel, bucket_buf, HASH_READ, HASH_NOLOCK);
869+
LockBuffer(bucket_buf, BUFFER_LOCK_UNLOCK);
870870
}
871871

872872
void

src/backend/access/hash/hashinsert.c

Lines changed: 8 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
104104
lowmask = metap->hashm_lowmask;
105105

106106
/* Release metapage lock, but keep pin. */
107-
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
107+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
108108

109109
/*
110110
* If the previous iteration of this loop locked the primary page of
@@ -125,7 +125,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
125125
* Reacquire metapage lock and check that no bucket split has taken
126126
* place while we were awaiting the bucket lock.
127127
*/
128-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_READ);
128+
LockBuffer(metabuf, BUFFER_LOCK_SHARE);
129129
oldblkno = blkno;
130130
retry = true;
131131
}
@@ -149,7 +149,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
149149
if (H_BUCKET_BEING_SPLIT(pageopaque) && IsBufferCleanupOK(buf))
150150
{
151151
/* release the lock on bucket buffer, before completing the split. */
152-
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
152+
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
153153

154154
_hash_finish_split(rel, metabuf, buf, pageopaque->hasho_bucket,
155155
maxbucket, highmask, lowmask);
@@ -180,7 +180,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
180180
if (buf != bucket_buf)
181181
_hash_relbuf(rel, buf);
182182
else
183-
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
183+
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
184184
buf = _hash_getbuf(rel, nextblkno, HASH_WRITE, LH_OVERFLOW_PAGE);
185185
page = BufferGetPage(buf);
186186
}
@@ -192,7 +192,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
192192
*/
193193

194194
/* release our write lock without modifying buffer */
195-
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
195+
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
196196

197197
/* chain to a new overflow page */
198198
buf = _hash_addovflpage(rel, metabuf, buf, (buf == bucket_buf) ? true : false);
@@ -223,7 +223,7 @@ _hash_doinsert(Relation rel, IndexTuple itup)
223223
* Write-lock the metapage so we can increment the tuple count. After
224224
* incrementing it, check to see if it's time for a split.
225225
*/
226-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
226+
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
227227

228228
metap->hashm_ntuples += 1;
229229

@@ -232,7 +232,8 @@ _hash_doinsert(Relation rel, IndexTuple itup)
232232
(double) metap->hashm_ffactor * (metap->hashm_maxbucket + 1);
233233

234234
/* Write out the metapage and drop lock, but keep pin */
235-
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
235+
MarkBufferDirty(metabuf);
236+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
236237

237238
/* Attempt to split if a split is needed */
238239
if (do_expand)

src/backend/access/hash/hashovfl.c

Lines changed: 17 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
110110
* Write-lock the tail page. It is okay to hold two buffer locks here
111111
* since there cannot be anyone else contending for access to ovflbuf.
112112
*/
113-
_hash_chgbufaccess(rel, buf, HASH_NOLOCK, HASH_WRITE);
113+
LockBuffer(buf, BUFFER_LOCK_EXCLUSIVE);
114114

115115
/* probably redundant... */
116116
_hash_checkpage(rel, buf, LH_BUCKET_PAGE | LH_OVERFLOW_PAGE);
@@ -129,7 +129,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
129129

130130
/* we assume we do not need to write the unmodified page */
131131
if ((pageopaque->hasho_flag & LH_BUCKET_PAGE) && retain_pin)
132-
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
132+
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
133133
else
134134
_hash_relbuf(rel, buf);
135135

@@ -151,7 +151,7 @@ _hash_addovflpage(Relation rel, Buffer metabuf, Buffer buf, bool retain_pin)
151151
pageopaque->hasho_nextblkno = BufferGetBlockNumber(ovflbuf);
152152
MarkBufferDirty(buf);
153153
if ((pageopaque->hasho_flag & LH_BUCKET_PAGE) && retain_pin)
154-
_hash_chgbufaccess(rel, buf, HASH_READ, HASH_NOLOCK);
154+
LockBuffer(buf, BUFFER_LOCK_UNLOCK);
155155
else
156156
_hash_relbuf(rel, buf);
157157

@@ -187,7 +187,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
187187
j;
188188

189189
/* Get exclusive lock on the meta page */
190-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
190+
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
191191

192192
_hash_checkpage(rel, metabuf, LH_META_PAGE);
193193
metap = HashPageGetMeta(BufferGetPage(metabuf));
@@ -225,7 +225,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
225225
last_inpage = BMPGSZ_BIT(metap) - 1;
226226

227227
/* Release exclusive lock on metapage while reading bitmap page */
228-
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
228+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
229229

230230
mapbuf = _hash_getbuf(rel, mapblkno, HASH_WRITE, LH_BITMAP_PAGE);
231231
mappage = BufferGetPage(mapbuf);
@@ -244,7 +244,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
244244
bit = 0;
245245

246246
/* Reacquire exclusive lock on the meta page */
247-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
247+
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
248248
}
249249

250250
/*
@@ -295,7 +295,8 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
295295
metap->hashm_firstfree = bit + 1;
296296

297297
/* Write updated metapage and release lock, but not pin */
298-
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
298+
MarkBufferDirty(metabuf);
299+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
299300

300301
return newbuf;
301302

@@ -309,7 +310,7 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
309310
_hash_relbuf(rel, mapbuf);
310311

311312
/* Reacquire exclusive lock on the meta page */
312-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
313+
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
313314

314315
/* convert bit to absolute bit number */
315316
bit += (i << BMPG_SHIFT(metap));
@@ -326,12 +327,13 @@ _hash_getovflpage(Relation rel, Buffer metabuf)
326327
metap->hashm_firstfree = bit + 1;
327328

328329
/* Write updated metapage and release lock, but not pin */
329-
_hash_chgbufaccess(rel, metabuf, HASH_WRITE, HASH_NOLOCK);
330+
MarkBufferDirty(metabuf);
331+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
330332
}
331333
else
332334
{
333335
/* We didn't change the metapage, so no need to write */
334-
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
336+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
335337
}
336338

337339
/* Fetch, init, and return the recycled page */
@@ -483,7 +485,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
483485
blkno = metap->hashm_mapp[bitmappage];
484486

485487
/* Release metapage lock while we access the bitmap page */
486-
_hash_chgbufaccess(rel, metabuf, HASH_READ, HASH_NOLOCK);
488+
LockBuffer(metabuf, BUFFER_LOCK_UNLOCK);
487489

488490
/* Clear the bitmap bit to indicate that this overflow page is free */
489491
mapbuf = _hash_getbuf(rel, blkno, HASH_WRITE, LH_BITMAP_PAGE);
@@ -495,7 +497,7 @@ _hash_freeovflpage(Relation rel, Buffer ovflbuf, Buffer wbuf,
495497
_hash_relbuf(rel, mapbuf);
496498

497499
/* Get write-lock on metapage to update firstfree */
498-
_hash_chgbufaccess(rel, metabuf, HASH_NOLOCK, HASH_WRITE);
500+
LockBuffer(metabuf, BUFFER_LOCK_EXCLUSIVE);
499501

500502
/* if this is now the first free page, update hashm_firstfree */
501503
if (ovflbitno < metap->hashm_firstfree)
@@ -633,7 +635,7 @@ _hash_squeezebucket(Relation rel,
633635
*/
634636
if (!BlockNumberIsValid(wopaque->hasho_nextblkno))
635637
{
636-
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
638+
LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
637639
return;
638640
}
639641

@@ -721,7 +723,7 @@ _hash_squeezebucket(Relation rel,
721723
if (wbuf_dirty)
722724
MarkBufferDirty(wbuf);
723725
if (retain_pin)
724-
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
726+
LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
725727
else
726728
_hash_relbuf(rel, wbuf);
727729

@@ -784,7 +786,7 @@ _hash_squeezebucket(Relation rel,
784786
{
785787
/* retain the pin on primary bucket page till end of bucket scan */
786788
if (wblkno == bucket_blkno)
787-
_hash_chgbufaccess(rel, wbuf, HASH_READ, HASH_NOLOCK);
789+
LockBuffer(wbuf, BUFFER_LOCK_UNLOCK);
788790
else
789791
_hash_relbuf(rel, wbuf);
790792
return;

0 commit comments

Comments
 (0)