@@ -245,8 +245,8 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
245
245
if (!RelationUsesLocalBuffers (scan -> rs_base .rs_rd ) &&
246
246
scan -> rs_nblocks > NBuffers / 4 )
247
247
{
248
- allow_strat = scan -> rs_base .rs_allow_strat ;
249
- allow_sync = scan -> rs_base .rs_allow_sync ;
248
+ allow_strat = ( scan -> rs_base .rs_flags & SO_ALLOW_STRAT ) != 0 ;
249
+ allow_sync = ( scan -> rs_base .rs_flags & SO_ALLOW_SYNC ) != 0 ;
250
250
}
251
251
else
252
252
allow_strat = allow_sync = false;
@@ -267,7 +267,10 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
267
267
if (scan -> rs_base .rs_parallel != NULL )
268
268
{
269
269
/* For parallel scan, believe whatever ParallelTableScanDesc says. */
270
- scan -> rs_base .rs_syncscan = scan -> rs_base .rs_parallel -> phs_syncscan ;
270
+ if (scan -> rs_base .rs_parallel -> phs_syncscan )
271
+ scan -> rs_base .rs_flags |= SO_ALLOW_SYNC ;
272
+ else
273
+ scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
271
274
}
272
275
else if (keep_startblock )
273
276
{
@@ -276,16 +279,19 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
276
279
* so that rewinding a cursor doesn't generate surprising results.
277
280
* Reset the active syncscan setting, though.
278
281
*/
279
- scan -> rs_base .rs_syncscan = (allow_sync && synchronize_seqscans );
282
+ if (allow_sync && synchronize_seqscans )
283
+ scan -> rs_base .rs_flags |= SO_ALLOW_SYNC ;
284
+ else
285
+ scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
280
286
}
281
287
else if (allow_sync && synchronize_seqscans )
282
288
{
283
- scan -> rs_base .rs_syncscan = true ;
289
+ scan -> rs_base .rs_flags |= SO_ALLOW_SYNC ;
284
290
scan -> rs_startblock = ss_get_location (scan -> rs_base .rs_rd , scan -> rs_nblocks );
285
291
}
286
292
else
287
293
{
288
- scan -> rs_base .rs_syncscan = false ;
294
+ scan -> rs_base .rs_flags &= ~ SO_ALLOW_SYNC ;
289
295
scan -> rs_startblock = 0 ;
290
296
}
291
297
@@ -305,11 +311,11 @@ initscan(HeapScanDesc scan, ScanKey key, bool keep_startblock)
305
311
memcpy (scan -> rs_base .rs_key , key , scan -> rs_base .rs_nkeys * sizeof (ScanKeyData ));
306
312
307
313
/*
308
- * Currently, we don't have a stats counter for bitmap heap scans (but the
309
- * underlying bitmap index scans will be counted) or sample scans (we only
310
- * update stats for tuple fetches there)
314
+ * Currently, we only have a stats counter for sequential heap scans (but
315
+ * e.g for bitmap scans the underlying bitmap index scans will be counted,
316
+ * and for sample scans we update stats for tuple fetches).
311
317
*/
312
- if (! scan -> rs_base .rs_bitmapscan && ! scan -> rs_base . rs_samplescan )
318
+ if (scan -> rs_base .rs_flags & SO_TYPE_SEQSCAN )
313
319
pgstat_count_heap_scan (scan -> rs_base .rs_rd );
314
320
}
315
321
@@ -325,7 +331,8 @@ heap_setscanlimits(TableScanDesc sscan, BlockNumber startBlk, BlockNumber numBlk
325
331
HeapScanDesc scan = (HeapScanDesc ) sscan ;
326
332
327
333
Assert (!scan -> rs_inited ); /* else too late to change */
328
- Assert (!scan -> rs_base .rs_syncscan ); /* else rs_startblock is significant */
334
+ /* else rs_startblock is significant */
335
+ Assert (!(scan -> rs_base .rs_flags & SO_ALLOW_SYNC ));
329
336
330
337
/* Check startBlk is valid (but allow case of zero blocks...) */
331
338
Assert (startBlk == 0 || startBlk < scan -> rs_nblocks );
@@ -375,7 +382,7 @@ heapgetpage(TableScanDesc sscan, BlockNumber page)
375
382
RBM_NORMAL , scan -> rs_strategy );
376
383
scan -> rs_cblock = page ;
377
384
378
- if (!scan -> rs_base .rs_pageatatime )
385
+ if (!( scan -> rs_base .rs_flags & SO_ALLOW_PAGEMODE ) )
379
386
return ;
380
387
381
388
buffer = scan -> rs_cbuf ;
@@ -574,7 +581,7 @@ heapgettup(HeapScanDesc scan,
574
581
* time, and much more likely that we'll just bollix things for
575
582
* forward scanners.
576
583
*/
577
- scan -> rs_base .rs_syncscan = false ;
584
+ scan -> rs_base .rs_flags &= ~ SO_ALLOW_SYNC ;
578
585
/* start from last page of the scan */
579
586
if (scan -> rs_startblock > 0 )
580
587
page = scan -> rs_startblock - 1 ;
@@ -738,7 +745,7 @@ heapgettup(HeapScanDesc scan,
738
745
* a little bit backwards on every invocation, which is confusing.
739
746
* We don't guarantee any specific ordering in general, though.
740
747
*/
741
- if (scan -> rs_base .rs_syncscan )
748
+ if (scan -> rs_base .rs_flags & SO_ALLOW_SYNC )
742
749
ss_report_location (scan -> rs_base .rs_rd , page );
743
750
}
744
751
@@ -885,7 +892,7 @@ heapgettup_pagemode(HeapScanDesc scan,
885
892
* time, and much more likely that we'll just bollix things for
886
893
* forward scanners.
887
894
*/
888
- scan -> rs_base .rs_syncscan = false ;
895
+ scan -> rs_base .rs_flags &= ~ SO_ALLOW_SYNC ;
889
896
/* start from last page of the scan */
890
897
if (scan -> rs_startblock > 0 )
891
898
page = scan -> rs_startblock - 1 ;
@@ -1037,7 +1044,7 @@ heapgettup_pagemode(HeapScanDesc scan,
1037
1044
* a little bit backwards on every invocation, which is confusing.
1038
1045
* We don't guarantee any specific ordering in general, though.
1039
1046
*/
1040
- if (scan -> rs_base .rs_syncscan )
1047
+ if (scan -> rs_base .rs_flags & SO_ALLOW_SYNC )
1041
1048
ss_report_location (scan -> rs_base .rs_rd , page );
1042
1049
}
1043
1050
@@ -1125,12 +1132,7 @@ TableScanDesc
1125
1132
heap_beginscan (Relation relation , Snapshot snapshot ,
1126
1133
int nkeys , ScanKey key ,
1127
1134
ParallelTableScanDesc parallel_scan ,
1128
- bool allow_strat ,
1129
- bool allow_sync ,
1130
- bool allow_pagemode ,
1131
- bool is_bitmapscan ,
1132
- bool is_samplescan ,
1133
- bool temp_snap )
1135
+ uint32 flags )
1134
1136
{
1135
1137
HeapScanDesc scan ;
1136
1138
@@ -1151,33 +1153,39 @@ heap_beginscan(Relation relation, Snapshot snapshot,
1151
1153
scan -> rs_base .rs_rd = relation ;
1152
1154
scan -> rs_base .rs_snapshot = snapshot ;
1153
1155
scan -> rs_base .rs_nkeys = nkeys ;
1154
- scan -> rs_base .rs_bitmapscan = is_bitmapscan ;
1155
- scan -> rs_base .rs_samplescan = is_samplescan ;
1156
- scan -> rs_strategy = NULL ; /* set in initscan */
1157
- scan -> rs_base .rs_allow_strat = allow_strat ;
1158
- scan -> rs_base .rs_allow_sync = allow_sync ;
1159
- scan -> rs_base .rs_temp_snap = temp_snap ;
1156
+ scan -> rs_base .rs_flags = flags ;
1160
1157
scan -> rs_base .rs_parallel = parallel_scan ;
1158
+ scan -> rs_strategy = NULL ; /* set in initscan */
1161
1159
1162
1160
/*
1163
- * we can use page-at-a-time mode if it's an MVCC-safe snapshot
1161
+ * Disable page-at-a-time mode if it's not a MVCC-safe snapshot.
1164
1162
*/
1165
- scan -> rs_base . rs_pageatatime =
1166
- allow_pagemode && snapshot && IsMVCCSnapshot ( snapshot ) ;
1163
+ if (!( snapshot && IsMVCCSnapshot ( snapshot )))
1164
+ scan -> rs_base . rs_flags &= ~ SO_ALLOW_PAGEMODE ;
1167
1165
1168
1166
/*
1169
- * For a seqscan in a serializable transaction, acquire a predicate lock
1170
- * on the entire relation. This is required not only to lock all the
1171
- * matching tuples, but also to conflict with new insertions into the
1172
- * table. In an indexscan, we take page locks on the index pages covering
1173
- * the range specified in the scan qual, but in a heap scan there is
1174
- * nothing more fine-grained to lock. A bitmap scan is a different story,
1175
- * there we have already scanned the index and locked the index pages
1176
- * covering the predicate. But in that case we still have to lock any
1177
- * matching heap tuples.
1167
+ * For seqscan and sample scans in a serializable transaction, acquire a
1168
+ * predicate lock on the entire relation. This is required not only to
1169
+ * lock all the matching tuples, but also to conflict with new insertions
1170
+ * into the table. In an indexscan, we take page locks on the index pages
1171
+ * covering the range specified in the scan qual, but in a heap scan there
1172
+ * is nothing more fine-grained to lock. A bitmap scan is a different
1173
+ * story, there we have already scanned the index and locked the index
1174
+ * pages covering the predicate. But in that case we still have to lock
1175
+ * any matching heap tuples. For sample scan we could optimize the locking
1176
+ * to be at least page-level granularity, but we'd need to add per-tuple
1177
+ * locking for that.
1178
1178
*/
1179
- if (!is_bitmapscan )
1179
+ if (scan -> rs_base .rs_flags & (SO_TYPE_SEQSCAN | SO_TYPE_SAMPLESCAN ))
1180
+ {
1181
+ /*
1182
+ * Ensure a missing snapshot is noticed reliably, even if the
1183
+ * isolation mode means predicate locking isn't performed (and
1184
+ * therefore the snapshot isn't used here).
1185
+ */
1186
+ Assert (snapshot );
1180
1187
PredicateLockRelation (relation , snapshot );
1188
+ }
1181
1189
1182
1190
/* we only need to set this up once */
1183
1191
scan -> rs_ctup .t_tableOid = RelationGetRelid (relation );
@@ -1204,10 +1212,21 @@ heap_rescan(TableScanDesc sscan, ScanKey key, bool set_params,
1204
1212
1205
1213
if (set_params )
1206
1214
{
1207
- scan -> rs_base .rs_allow_strat = allow_strat ;
1208
- scan -> rs_base .rs_allow_sync = allow_sync ;
1209
- scan -> rs_base .rs_pageatatime =
1210
- allow_pagemode && IsMVCCSnapshot (scan -> rs_base .rs_snapshot );
1215
+ if (allow_strat )
1216
+ scan -> rs_base .rs_flags |= SO_ALLOW_STRAT ;
1217
+ else
1218
+ scan -> rs_base .rs_flags &= ~SO_ALLOW_STRAT ;
1219
+
1220
+ if (allow_sync )
1221
+ scan -> rs_base .rs_flags |= SO_ALLOW_SYNC ;
1222
+ else
1223
+ scan -> rs_base .rs_flags &= ~SO_ALLOW_SYNC ;
1224
+
1225
+ if (allow_pagemode && scan -> rs_base .rs_snapshot &&
1226
+ IsMVCCSnapshot (scan -> rs_base .rs_snapshot ))
1227
+ scan -> rs_base .rs_flags |= SO_ALLOW_PAGEMODE ;
1228
+ else
1229
+ scan -> rs_base .rs_flags &= ~SO_ALLOW_PAGEMODE ;
1211
1230
}
1212
1231
1213
1232
/*
@@ -1246,7 +1265,7 @@ heap_endscan(TableScanDesc sscan)
1246
1265
if (scan -> rs_strategy != NULL )
1247
1266
FreeAccessStrategy (scan -> rs_strategy );
1248
1267
1249
- if (scan -> rs_base .rs_temp_snap )
1268
+ if (scan -> rs_base .rs_flags & SO_TEMP_SNAPSHOT )
1250
1269
UnregisterSnapshot (scan -> rs_base .rs_snapshot );
1251
1270
1252
1271
pfree (scan );
@@ -1288,7 +1307,7 @@ heap_getnext(TableScanDesc sscan, ScanDirection direction)
1288
1307
1289
1308
HEAPDEBUG_1 ; /* heap_getnext( info ) */
1290
1309
1291
- if (scan -> rs_base .rs_pageatatime )
1310
+ if (scan -> rs_base .rs_flags & SO_ALLOW_PAGEMODE )
1292
1311
heapgettup_pagemode (scan , direction ,
1293
1312
scan -> rs_base .rs_nkeys , scan -> rs_base .rs_key );
1294
1313
else
@@ -1335,11 +1354,10 @@ heap_getnextslot(TableScanDesc sscan, ScanDirection direction, TupleTableSlot *s
1335
1354
1336
1355
HEAPAMSLOTDEBUG_1 ; /* heap_getnextslot( info ) */
1337
1356
1338
- if (scan -> rs_base .rs_pageatatime )
1339
- heapgettup_pagemode (scan , direction ,
1340
- scan -> rs_base .rs_nkeys , scan -> rs_base .rs_key );
1357
+ if (sscan -> rs_flags & SO_ALLOW_PAGEMODE )
1358
+ heapgettup_pagemode (scan , direction , sscan -> rs_nkeys , sscan -> rs_key );
1341
1359
else
1342
- heapgettup (scan , direction , scan -> rs_base . rs_nkeys , scan -> rs_base . rs_key );
1360
+ heapgettup (scan , direction , sscan -> rs_nkeys , sscan -> rs_key );
1343
1361
1344
1362
if (scan -> rs_ctup .t_data == NULL )
1345
1363
{
0 commit comments