@@ -156,6 +156,36 @@ _hash_getinitbuf(Relation rel, BlockNumber blkno)
156
156
return buf ;
157
157
}
158
158
159
+ /*
160
+ * _hash_initbuf() -- Get and initialize a buffer by bucket number.
161
+ */
162
+ void
163
+ _hash_initbuf (Buffer buf , uint32 max_bucket , uint32 num_bucket , uint32 flag ,
164
+ bool initpage )
165
+ {
166
+ HashPageOpaque pageopaque ;
167
+ Page page ;
168
+
169
+ page = BufferGetPage (buf );
170
+
171
+ /* initialize the page */
172
+ if (initpage )
173
+ _hash_pageinit (page , BufferGetPageSize (buf ));
174
+
175
+ pageopaque = (HashPageOpaque ) PageGetSpecialPointer (page );
176
+
177
+ /*
178
+ * Set hasho_prevblkno with current hashm_maxbucket. This value will
179
+ * be used to validate cached HashMetaPageData. See
180
+ * _hash_getbucketbuf_from_hashkey().
181
+ */
182
+ pageopaque -> hasho_prevblkno = max_bucket ;
183
+ pageopaque -> hasho_nextblkno = InvalidBlockNumber ;
184
+ pageopaque -> hasho_bucket = num_bucket ;
185
+ pageopaque -> hasho_flag = flag ;
186
+ pageopaque -> hasho_page_id = HASHO_PAGE_ID ;
187
+ }
188
+
159
189
/*
160
190
* _hash_getnewbuf() -- Get a new page at the end of the index.
161
191
*
@@ -288,7 +318,7 @@ _hash_dropscanbuf(Relation rel, HashScanOpaque so)
288
318
289
319
290
320
/*
291
- * _hash_metapinit () -- Initialize the metadata page of a hash index,
321
+ * _hash_init () -- Initialize the metadata page of a hash index,
292
322
* the initial buckets, and the initial bitmap page.
293
323
*
294
324
* The initial number of buckets is dependent on num_tuples, an estimate
@@ -300,19 +330,18 @@ _hash_dropscanbuf(Relation rel, HashScanOpaque so)
300
330
* multiple buffer locks is ignored.
301
331
*/
302
332
uint32
303
- _hash_metapinit (Relation rel , double num_tuples , ForkNumber forkNum )
333
+ _hash_init (Relation rel , double num_tuples , ForkNumber forkNum )
304
334
{
305
- HashMetaPage metap ;
306
- HashPageOpaque pageopaque ;
307
335
Buffer metabuf ;
308
336
Buffer buf ;
337
+ Buffer bitmapbuf ;
309
338
Page pg ;
339
+ HashMetaPage metap ;
340
+ RegProcedure procid ;
310
341
int32 data_width ;
311
342
int32 item_width ;
312
343
int32 ffactor ;
313
- double dnumbuckets ;
314
344
uint32 num_buckets ;
315
- uint32 log2_num_buckets ;
316
345
uint32 i ;
317
346
318
347
/* safety check */
@@ -334,6 +363,96 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
334
363
if (ffactor < 10 )
335
364
ffactor = 10 ;
336
365
366
+ procid = index_getprocid (rel , 1 , HASHPROC );
367
+
368
+ /*
369
+ * We initialize the metapage, the first N bucket pages, and the first
370
+ * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
371
+ * calls to occur. This ensures that the smgr level has the right idea of
372
+ * the physical index length.
373
+ *
374
+ * Critical section not required, because on error the creation of the
375
+ * whole relation will be rolled back.
376
+ */
377
+ metabuf = _hash_getnewbuf (rel , HASH_METAPAGE , forkNum );
378
+ _hash_init_metabuffer (metabuf , num_tuples , procid , ffactor , false);
379
+ MarkBufferDirty (metabuf );
380
+
381
+ pg = BufferGetPage (metabuf );
382
+ metap = HashPageGetMeta (pg );
383
+
384
+ num_buckets = metap -> hashm_maxbucket + 1 ;
385
+
386
+ /*
387
+ * Release buffer lock on the metapage while we initialize buckets.
388
+ * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
389
+ * won't accomplish anything. It's a bad idea to hold buffer locks for
390
+ * long intervals in any case, since that can block the bgwriter.
391
+ */
392
+ LockBuffer (metabuf , BUFFER_LOCK_UNLOCK );
393
+
394
+ /*
395
+ * Initialize and WAL Log the first N buckets
396
+ */
397
+ for (i = 0 ; i < num_buckets ; i ++ )
398
+ {
399
+ BlockNumber blkno ;
400
+
401
+ /* Allow interrupts, in case N is huge */
402
+ CHECK_FOR_INTERRUPTS ();
403
+
404
+ blkno = BUCKET_TO_BLKNO (metap , i );
405
+ buf = _hash_getnewbuf (rel , blkno , forkNum );
406
+ _hash_initbuf (buf , metap -> hashm_maxbucket , i , LH_BUCKET_PAGE , false);
407
+ MarkBufferDirty (buf );
408
+ _hash_relbuf (rel , buf );
409
+ }
410
+
411
+ /* Now reacquire buffer lock on metapage */
412
+ LockBuffer (metabuf , BUFFER_LOCK_EXCLUSIVE );
413
+
414
+ /*
415
+ * Initialize bitmap page
416
+ */
417
+ bitmapbuf = _hash_getnewbuf (rel , num_buckets + 1 , forkNum );
418
+ _hash_initbitmapbuffer (bitmapbuf , metap -> hashm_bmsize , false);
419
+ MarkBufferDirty (bitmapbuf );
420
+
421
+ /* add the new bitmap page to the metapage's list of bitmaps */
422
+ /* metapage already has a write lock */
423
+ if (metap -> hashm_nmaps >= HASH_MAX_BITMAPS )
424
+ ereport (ERROR ,
425
+ (errcode (ERRCODE_PROGRAM_LIMIT_EXCEEDED ),
426
+ errmsg ("out of overflow pages in hash index \"%s\"" ,
427
+ RelationGetRelationName (rel ))));
428
+
429
+ metap -> hashm_mapp [metap -> hashm_nmaps ] = num_buckets + 1 ;
430
+
431
+ metap -> hashm_nmaps ++ ;
432
+ MarkBufferDirty (metabuf );
433
+
434
+ /* all done */
435
+ _hash_relbuf (rel , bitmapbuf );
436
+ _hash_relbuf (rel , metabuf );
437
+
438
+ return num_buckets ;
439
+ }
440
+
441
+ /*
442
+ * _hash_init_metabuffer() -- Initialize the metadata page of a hash index.
443
+ */
444
+ void
445
+ _hash_init_metabuffer (Buffer buf , double num_tuples , RegProcedure procid ,
446
+ uint16 ffactor , bool initpage )
447
+ {
448
+ HashMetaPage metap ;
449
+ HashPageOpaque pageopaque ;
450
+ Page page ;
451
+ double dnumbuckets ;
452
+ uint32 num_buckets ;
453
+ uint32 log2_num_buckets ;
454
+ uint32 i ;
455
+
337
456
/*
338
457
* Choose the number of initial bucket pages to match the fill factor
339
458
* given the estimated number of tuples. We round up the result to the
@@ -353,30 +472,25 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
353
472
Assert (num_buckets == (((uint32 ) 1 ) << log2_num_buckets ));
354
473
Assert (log2_num_buckets < HASH_MAX_SPLITPOINTS );
355
474
356
- /*
357
- * We initialize the metapage, the first N bucket pages, and the first
358
- * bitmap page in sequence, using _hash_getnewbuf to cause smgrextend()
359
- * calls to occur. This ensures that the smgr level has the right idea of
360
- * the physical index length.
361
- */
362
- metabuf = _hash_getnewbuf (rel , HASH_METAPAGE , forkNum );
363
- pg = BufferGetPage (metabuf );
475
+ page = BufferGetPage (buf );
476
+ if (initpage )
477
+ _hash_pageinit (page , BufferGetPageSize (buf ));
364
478
365
- pageopaque = (HashPageOpaque ) PageGetSpecialPointer (pg );
479
+ pageopaque = (HashPageOpaque ) PageGetSpecialPointer (page );
366
480
pageopaque -> hasho_prevblkno = InvalidBlockNumber ;
367
481
pageopaque -> hasho_nextblkno = InvalidBlockNumber ;
368
482
pageopaque -> hasho_bucket = -1 ;
369
483
pageopaque -> hasho_flag = LH_META_PAGE ;
370
484
pageopaque -> hasho_page_id = HASHO_PAGE_ID ;
371
485
372
- metap = HashPageGetMeta (pg );
486
+ metap = HashPageGetMeta (page );
373
487
374
488
metap -> hashm_magic = HASH_MAGIC ;
375
489
metap -> hashm_version = HASH_VERSION ;
376
490
metap -> hashm_ntuples = 0 ;
377
491
metap -> hashm_nmaps = 0 ;
378
492
metap -> hashm_ffactor = ffactor ;
379
- metap -> hashm_bsize = HashGetMaxBitmapSize (pg );
493
+ metap -> hashm_bsize = HashGetMaxBitmapSize (page );
380
494
/* find largest bitmap array size that will fit in page size */
381
495
for (i = _hash_log2 (metap -> hashm_bsize ); i > 0 ; -- i )
382
496
{
@@ -393,7 +507,7 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
393
507
* pretty useless for normal operation (in fact, hashm_procid is not used
394
508
* anywhere), but it might be handy for forensic purposes so we keep it.
395
509
*/
396
- metap -> hashm_procid = index_getprocid ( rel , 1 , HASHPROC ) ;
510
+ metap -> hashm_procid = procid ;
397
511
398
512
/*
399
513
* We initialize the index with N buckets, 0 .. N-1, occupying physical
@@ -411,54 +525,9 @@ _hash_metapinit(Relation rel, double num_tuples, ForkNumber forkNum)
411
525
metap -> hashm_ovflpoint = log2_num_buckets ;
412
526
metap -> hashm_firstfree = 0 ;
413
527
414
- /*
415
- * Release buffer lock on the metapage while we initialize buckets.
416
- * Otherwise, we'll be in interrupt holdoff and the CHECK_FOR_INTERRUPTS
417
- * won't accomplish anything. It's a bad idea to hold buffer locks for
418
- * long intervals in any case, since that can block the bgwriter.
419
- */
420
- MarkBufferDirty (metabuf );
421
- LockBuffer (metabuf , BUFFER_LOCK_UNLOCK );
422
-
423
- /*
424
- * Initialize the first N buckets
425
- */
426
- for (i = 0 ; i < num_buckets ; i ++ )
427
- {
428
- /* Allow interrupts, in case N is huge */
429
- CHECK_FOR_INTERRUPTS ();
430
-
431
- buf = _hash_getnewbuf (rel , BUCKET_TO_BLKNO (metap , i ), forkNum );
432
- pg = BufferGetPage (buf );
433
- pageopaque = (HashPageOpaque ) PageGetSpecialPointer (pg );
434
-
435
- /*
436
- * Set hasho_prevblkno with current hashm_maxbucket. This value will
437
- * be used to validate cached HashMetaPageData. See
438
- * _hash_getbucketbuf_from_hashkey().
439
- */
440
- pageopaque -> hasho_prevblkno = metap -> hashm_maxbucket ;
441
- pageopaque -> hasho_nextblkno = InvalidBlockNumber ;
442
- pageopaque -> hasho_bucket = i ;
443
- pageopaque -> hasho_flag = LH_BUCKET_PAGE ;
444
- pageopaque -> hasho_page_id = HASHO_PAGE_ID ;
445
- MarkBufferDirty (buf );
446
- _hash_relbuf (rel , buf );
447
- }
448
-
449
- /* Now reacquire buffer lock on metapage */
450
- LockBuffer (metabuf , BUFFER_LOCK_EXCLUSIVE );
451
-
452
- /*
453
- * Initialize first bitmap page
454
- */
455
- _hash_initbitmap (rel , metap , num_buckets + 1 , forkNum );
456
-
457
- /* all done */
458
- MarkBufferDirty (metabuf );
459
- _hash_relbuf (rel , metabuf );
460
-
461
- return num_buckets ;
528
+ /* Set pd_lower just past the end of the metadata. */
529
+ ((PageHeader ) page )-> pd_lower =
530
+ ((char * ) metap + sizeof (HashMetaPageData )) - (char * ) page ;
462
531
}
463
532
464
533
/*
@@ -535,7 +604,7 @@ _hash_expandtable(Relation rel, Buffer metabuf)
535
604
* than a disk block then this would be an independent constraint.
536
605
*
537
606
* If you change this, see also the maximum initial number of buckets in
538
- * _hash_metapinit ().
607
+ * _hash_init ().
539
608
*/
540
609
if (metap -> hashm_maxbucket >= (uint32 ) 0x7FFFFFFE )
541
610
goto fail ;
0 commit comments