85
85
* relation is visible yet (its xact may have started before the xact that
86
86
* created the rel). The storage manager must be able to cope anyway.
87
87
*
88
- * Note: if there's any pad bytes in the struct, INIT_BUFFERTAG will have
88
+ * Note: if there's any pad bytes in the struct, InitBufferTag will have
89
89
* to be fixed to zero them, since this struct is used as a hash key.
90
90
*/
91
91
typedef struct buftag
@@ -95,42 +95,57 @@ typedef struct buftag
95
95
BlockNumber blockNum ; /* blknum relative to begin of reln */
96
96
} BufferTag ;
97
97
98
- #define CLEAR_BUFFERTAG (a ) \
99
- ( \
100
- (a).rlocator.spcOid = InvalidOid, \
101
- (a).rlocator.dbOid = InvalidOid, \
102
- (a).rlocator.relNumber = InvalidRelFileNumber, \
103
- (a).forkNum = InvalidForkNumber, \
104
- (a).blockNum = InvalidBlockNumber \
105
- )
106
-
107
- #define INIT_BUFFERTAG (a ,xx_rlocator ,xx_forkNum ,xx_blockNum ) \
108
- ( \
109
- (a).rlocator = (xx_rlocator), \
110
- (a).forkNum = (xx_forkNum), \
111
- (a).blockNum = (xx_blockNum) \
112
- )
113
-
114
- #define BUFFERTAGS_EQUAL (a ,b ) \
115
- ( \
116
- RelFileLocatorEquals((a).rlocator, (b).rlocator) && \
117
- (a).blockNum == (b).blockNum && \
118
- (a).forkNum == (b).forkNum \
119
- )
98
+ static inline void
99
+ ClearBufferTag (BufferTag * tag )
100
+ {
101
+ tag -> rlocator .spcOid = InvalidOid ;
102
+ tag -> rlocator .dbOid = InvalidOid ;
103
+ tag -> rlocator .relNumber = InvalidRelFileNumber ;
104
+ tag -> forkNum = InvalidForkNumber ;
105
+ tag -> blockNum = InvalidBlockNumber ;
106
+ }
107
+
108
+ static inline void
109
+ InitBufferTag (BufferTag * tag , const RelFileLocator * rlocator ,
110
+ ForkNumber forkNum , BlockNumber blockNum )
111
+ {
112
+ tag -> rlocator = * rlocator ;
113
+ tag -> forkNum = forkNum ;
114
+ tag -> blockNum = blockNum ;
115
+ }
116
+
117
+ static inline bool
118
+ BufferTagsEqual (const BufferTag * tag1 , const BufferTag * tag2 )
119
+ {
120
+ return RelFileLocatorEquals (tag1 -> rlocator , tag2 -> rlocator ) &&
121
+ (tag1 -> blockNum == tag2 -> blockNum ) &&
122
+ (tag1 -> forkNum == tag2 -> forkNum );
123
+ }
120
124
121
125
/*
122
126
* The shared buffer mapping table is partitioned to reduce contention.
123
127
* To determine which partition lock a given tag requires, compute the tag's
124
128
* hash code with BufTableHashCode(), then apply BufMappingPartitionLock().
125
129
* NB: NUM_BUFFER_PARTITIONS must be a power of 2!
126
130
*/
127
- #define BufTableHashPartition (hashcode ) \
128
- ((hashcode) % NUM_BUFFER_PARTITIONS)
129
- #define BufMappingPartitionLock (hashcode ) \
130
- (&MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + \
131
- BufTableHashPartition(hashcode)].lock)
132
- #define BufMappingPartitionLockByIndex (i ) \
133
- (&MainLWLockArray[BUFFER_MAPPING_LWLOCK_OFFSET + (i)].lock)
131
+ static inline uint32
132
+ BufTableHashPartition (uint32 hashcode )
133
+ {
134
+ return hashcode % NUM_BUFFER_PARTITIONS ;
135
+ }
136
+
137
+ static inline LWLock *
138
+ BufMappingPartitionLock (uint32 hashcode )
139
+ {
140
+ return & MainLWLockArray [BUFFER_MAPPING_LWLOCK_OFFSET +
141
+ BufTableHashPartition (hashcode )].lock ;
142
+ }
143
+
144
+ static inline LWLock *
145
+ BufMappingPartitionLockByIndex (uint32 index )
146
+ {
147
+ return & MainLWLockArray [BUFFER_MAPPING_LWLOCK_OFFSET + index ].lock ;
148
+ }
134
149
135
150
/*
136
151
* BufferDesc -- shared descriptor/state data for a single shared buffer.
@@ -220,37 +235,6 @@ typedef union BufferDescPadded
220
235
char pad [BUFFERDESC_PAD_TO_SIZE ];
221
236
} BufferDescPadded ;
222
237
223
- #define GetBufferDescriptor (id ) (&BufferDescriptors[(id)].bufferdesc)
224
- #define GetLocalBufferDescriptor (id ) (&LocalBufferDescriptors[(id)])
225
-
226
- #define BufferDescriptorGetBuffer (bdesc ) ((bdesc)->buf_id + 1)
227
-
228
- #define BufferDescriptorGetIOCV (bdesc ) \
229
- (&(BufferIOCVArray[(bdesc)->buf_id]).cv)
230
- #define BufferDescriptorGetContentLock (bdesc ) \
231
- ((LWLock*) (&(bdesc)->content_lock))
232
-
233
- extern PGDLLIMPORT ConditionVariableMinimallyPadded * BufferIOCVArray ;
234
-
235
- /*
236
- * The freeNext field is either the index of the next freelist entry,
237
- * or one of these special values:
238
- */
239
- #define FREENEXT_END_OF_LIST (-1)
240
- #define FREENEXT_NOT_IN_LIST (-2)
241
-
242
- /*
243
- * Functions for acquiring/releasing a shared buffer header's spinlock. Do
244
- * not apply these to local buffers!
245
- */
246
- extern uint32 LockBufHdr (BufferDesc * desc );
247
- #define UnlockBufHdr (desc , s ) \
248
- do { \
249
- pg_write_barrier(); \
250
- pg_atomic_write_u32(&(desc)->state, (s) & (~BM_LOCKED)); \
251
- } while (0)
252
-
253
-
254
238
/*
255
239
* The PendingWriteback & WritebackContext structure are used to keep
256
240
* information about pending flush requests to be issued to the OS.
@@ -276,11 +260,63 @@ typedef struct WritebackContext
276
260
277
261
/* in buf_init.c */
278
262
extern PGDLLIMPORT BufferDescPadded * BufferDescriptors ;
263
+ extern PGDLLIMPORT ConditionVariableMinimallyPadded * BufferIOCVArray ;
279
264
extern PGDLLIMPORT WritebackContext BackendWritebackContext ;
280
265
281
266
/* in localbuf.c */
282
267
extern PGDLLIMPORT BufferDesc * LocalBufferDescriptors ;
283
268
269
+
270
+ static inline BufferDesc *
271
+ GetBufferDescriptor (uint32 id )
272
+ {
273
+ return & (BufferDescriptors [id ]).bufferdesc ;
274
+ }
275
+
276
+ static inline BufferDesc *
277
+ GetLocalBufferDescriptor (uint32 id )
278
+ {
279
+ return & LocalBufferDescriptors [id ];
280
+ }
281
+
282
+ static inline Buffer
283
+ BufferDescriptorGetBuffer (const BufferDesc * bdesc )
284
+ {
285
+ return (Buffer ) (bdesc -> buf_id + 1 );
286
+ }
287
+
288
+ static inline ConditionVariable *
289
+ BufferDescriptorGetIOCV (const BufferDesc * bdesc )
290
+ {
291
+ return & (BufferIOCVArray [bdesc -> buf_id ]).cv ;
292
+ }
293
+
294
+ static inline LWLock *
295
+ BufferDescriptorGetContentLock (const BufferDesc * bdesc )
296
+ {
297
+ return (LWLock * ) (& bdesc -> content_lock );
298
+ }
299
+
300
+ /*
301
+ * The freeNext field is either the index of the next freelist entry,
302
+ * or one of these special values:
303
+ */
304
+ #define FREENEXT_END_OF_LIST (-1)
305
+ #define FREENEXT_NOT_IN_LIST (-2)
306
+
307
+ /*
308
+ * Functions for acquiring/releasing a shared buffer header's spinlock. Do
309
+ * not apply these to local buffers!
310
+ */
311
+ extern uint32 LockBufHdr (BufferDesc * desc );
312
+
313
+ static inline void
314
+ UnlockBufHdr (BufferDesc * desc , uint32 buf_state )
315
+ {
316
+ pg_write_barrier ();
317
+ pg_atomic_write_u32 (& desc -> state , buf_state & (~BM_LOCKED ));
318
+ }
319
+
284
320
/* in bufmgr.c */
285
321
286
322
/*
0 commit comments