|
17 | 17 | */
|
18 | 18 | #include "postgres.h"
|
19 | 19 |
|
| 20 | +#include "access/xlog.h" |
20 | 21 | #include "lib/ilist.h"
|
21 | 22 | #include "storage/bufmgr.h"
|
22 | 23 | #include "storage/ipc.h"
|
@@ -174,8 +175,8 @@ smgropen(RelFileNode rnode, BackendId backend)
|
174 | 175 | /* hash_search already filled in the lookup key */
|
175 | 176 | reln->smgr_owner = NULL;
|
176 | 177 | reln->smgr_targblock = InvalidBlockNumber;
|
177 |
| - reln->smgr_fsm_nblocks = InvalidBlockNumber; |
178 |
| - reln->smgr_vm_nblocks = InvalidBlockNumber; |
| 178 | + for (int i = 0; i <= MAX_FORKNUM; ++i) |
| 179 | + reln->smgr_cached_nblocks[i] = InvalidBlockNumber; |
179 | 180 | reln->smgr_which = 0; /* we only have md.c at present */
|
180 | 181 |
|
181 | 182 | /* implementation-specific initialization */
|
@@ -464,6 +465,16 @@ smgrextend(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
|
464 | 465 | {
|
465 | 466 | smgrsw[reln->smgr_which].smgr_extend(reln, forknum, blocknum,
|
466 | 467 | buffer, skipFsync);
|
| 468 | + |
| 469 | + /* |
| 470 | + * Normally we expect this to increase nblocks by one, but if the cached |
| 471 | + * value isn't as expected, just invalidate it so the next call asks the |
| 472 | + * kernel. |
| 473 | + */ |
| 474 | + if (reln->smgr_cached_nblocks[forknum] == blocknum) |
| 475 | + reln->smgr_cached_nblocks[forknum] = blocknum + 1; |
| 476 | + else |
| 477 | + reln->smgr_cached_nblocks[forknum] = InvalidBlockNumber; |
467 | 478 | }
|
468 | 479 |
|
469 | 480 | /*
|
@@ -537,7 +548,20 @@ smgrwriteback(SMgrRelation reln, ForkNumber forknum, BlockNumber blocknum,
|
537 | 548 | BlockNumber
|
538 | 549 | smgrnblocks(SMgrRelation reln, ForkNumber forknum)
|
539 | 550 | {
|
540 |
| - return smgrsw[reln->smgr_which].smgr_nblocks(reln, forknum); |
| 551 | + BlockNumber result; |
| 552 | + |
| 553 | + /* |
| 554 | + * For now, we only use cached values in recovery due to lack of a shared |
| 555 | + * invalidation mechanism for changes in file size. |
| 556 | + */ |
| 557 | + if (InRecovery && reln->smgr_cached_nblocks[forknum] != InvalidBlockNumber) |
| 558 | + return reln->smgr_cached_nblocks[forknum]; |
| 559 | + |
| 560 | + result = smgrsw[reln->smgr_which].smgr_nblocks(reln, forknum); |
| 561 | + |
| 562 | + reln->smgr_cached_nblocks[forknum] = result; |
| 563 | + |
| 564 | + return result; |
541 | 565 | }
|
542 | 566 |
|
543 | 567 | /*
|
@@ -576,20 +600,19 @@ smgrtruncate(SMgrRelation reln, ForkNumber *forknum, int nforks, BlockNumber *nb
|
576 | 600 | /* Do the truncation */
|
577 | 601 | for (i = 0; i < nforks; i++)
|
578 | 602 | {
|
| 603 | + /* Make the cached size is invalid if we encounter an error. */ |
| 604 | + reln->smgr_cached_nblocks[forknum[i]] = InvalidBlockNumber; |
| 605 | + |
579 | 606 | smgrsw[reln->smgr_which].smgr_truncate(reln, forknum[i], nblocks[i]);
|
580 | 607 |
|
581 | 608 | /*
|
582 |
| - * We might as well update the local smgr_fsm_nblocks and |
583 |
| - * smgr_vm_nblocks settings. The smgr cache inval message that this |
584 |
| - * function sent will cause other backends to invalidate their copies |
585 |
| - * of smgr_fsm_nblocks and smgr_vm_nblocks, and these ones too at the |
586 |
| - * next command boundary. But these ensure they aren't outright wrong |
587 |
| - * until then. |
| 609 | + * We might as well update the local smgr_cached_nblocks values. The |
| 610 | + * smgr cache inval message that this function sent will cause other |
| 611 | + * backends to invalidate their copies of smgr_fsm_nblocks and |
| 612 | + * smgr_vm_nblocks, and these ones too at the next command boundary. |
| 613 | + * But these ensure they aren't outright wrong until then. |
588 | 614 | */
|
589 |
| - if (forknum[i] == FSM_FORKNUM) |
590 |
| - reln->smgr_fsm_nblocks = nblocks[i]; |
591 |
| - if (forknum[i] == VISIBILITYMAP_FORKNUM) |
592 |
| - reln->smgr_vm_nblocks = nblocks[i]; |
| 615 | + reln->smgr_cached_nblocks[forknum[i]] = nblocks[i]; |
593 | 616 | }
|
594 | 617 | }
|
595 | 618 |
|
|
0 commit comments