@@ -221,20 +221,22 @@ xfs_alloc_get_rec(
221
221
* Compute aligned version of the found extent.
222
222
* Takes alignment and min length into account.
223
223
*/
224
- STATIC void
224
+ STATIC bool
225
225
xfs_alloc_compute_aligned (
226
226
xfs_alloc_arg_t * args , /* allocation argument structure */
227
227
xfs_agblock_t foundbno , /* starting block in found extent */
228
228
xfs_extlen_t foundlen , /* length in found extent */
229
229
xfs_agblock_t * resbno , /* result block number */
230
- xfs_extlen_t * reslen ) /* result length */
230
+ xfs_extlen_t * reslen , /* result length */
231
+ unsigned * busy_gen )
231
232
{
232
- xfs_agblock_t bno ;
233
- xfs_extlen_t len ;
233
+ xfs_agblock_t bno = foundbno ;
234
+ xfs_extlen_t len = foundlen ;
234
235
xfs_extlen_t diff ;
236
+ bool busy ;
235
237
236
238
/* Trim busy sections out of found extent */
237
- xfs_extent_busy_trim (args , foundbno , foundlen , & bno , & len );
239
+ busy = xfs_extent_busy_trim (args , & bno , & len , busy_gen );
238
240
239
241
/*
240
242
* If we have a largish extent that happens to start before min_agbno,
@@ -259,6 +261,8 @@ xfs_alloc_compute_aligned(
259
261
* resbno = bno ;
260
262
* reslen = len ;
261
263
}
264
+
265
+ return busy ;
262
266
}
263
267
264
268
/*
@@ -737,10 +741,11 @@ xfs_alloc_ag_vextent_exact(
737
741
int error ;
738
742
xfs_agblock_t fbno ; /* start block of found extent */
739
743
xfs_extlen_t flen ; /* length of found extent */
740
- xfs_agblock_t tbno ; /* start block of trimmed extent */
741
- xfs_extlen_t tlen ; /* length of trimmed extent */
742
- xfs_agblock_t tend ; /* end block of trimmed extent */
744
+ xfs_agblock_t tbno ; /* start block of busy extent */
745
+ xfs_extlen_t tlen ; /* length of busy extent */
746
+ xfs_agblock_t tend ; /* end block of busy extent */
743
747
int i ; /* success/failure of operation */
748
+ unsigned busy_gen ;
744
749
745
750
ASSERT (args -> alignment == 1 );
746
751
@@ -773,7 +778,9 @@ xfs_alloc_ag_vextent_exact(
773
778
/*
774
779
* Check for overlapping busy extents.
775
780
*/
776
- xfs_extent_busy_trim (args , fbno , flen , & tbno , & tlen );
781
+ tbno = fbno ;
782
+ tlen = flen ;
783
+ xfs_extent_busy_trim (args , & tbno , & tlen , & busy_gen );
777
784
778
785
/*
779
786
* Give up if the start of the extent is busy, or the freespace isn't
@@ -853,6 +860,7 @@ xfs_alloc_find_best_extent(
853
860
xfs_agblock_t sdiff ;
854
861
int error ;
855
862
int i ;
863
+ unsigned busy_gen ;
856
864
857
865
/* The good extent is perfect, no need to search. */
858
866
if (!gdiff )
@@ -866,7 +874,8 @@ xfs_alloc_find_best_extent(
866
874
if (error )
867
875
goto error0 ;
868
876
XFS_WANT_CORRUPTED_GOTO (args -> mp , i == 1 , error0 );
869
- xfs_alloc_compute_aligned (args , * sbno , * slen , sbnoa , slena );
877
+ xfs_alloc_compute_aligned (args , * sbno , * slen ,
878
+ sbnoa , slena , & busy_gen );
870
879
871
880
/*
872
881
* The good extent is closer than this one.
@@ -955,7 +964,8 @@ xfs_alloc_ag_vextent_near(
955
964
xfs_extlen_t ltlena ; /* aligned ... */
956
965
xfs_agblock_t ltnew ; /* useful start bno of left side */
957
966
xfs_extlen_t rlen ; /* length of returned extent */
958
- int forced = 0 ;
967
+ bool busy ;
968
+ unsigned busy_gen ;
959
969
#ifdef DEBUG
960
970
/*
961
971
* Randomly don't execute the first algorithm.
@@ -982,6 +992,7 @@ xfs_alloc_ag_vextent_near(
982
992
ltlen = 0 ;
983
993
gtlena = 0 ;
984
994
ltlena = 0 ;
995
+ busy = false;
985
996
986
997
/*
987
998
* Get a cursor for the by-size btree.
@@ -1064,8 +1075,8 @@ xfs_alloc_ag_vextent_near(
1064
1075
if ((error = xfs_alloc_get_rec (cnt_cur , & ltbno , & ltlen , & i )))
1065
1076
goto error0 ;
1066
1077
XFS_WANT_CORRUPTED_GOTO (args -> mp , i == 1 , error0 );
1067
- xfs_alloc_compute_aligned (args , ltbno , ltlen ,
1068
- & ltbnoa , & ltlena );
1078
+ busy = xfs_alloc_compute_aligned (args , ltbno , ltlen ,
1079
+ & ltbnoa , & ltlena , & busy_gen );
1069
1080
if (ltlena < args -> minlen )
1070
1081
continue ;
1071
1082
if (ltbnoa < args -> min_agbno || ltbnoa > args -> max_agbno )
@@ -1183,8 +1194,8 @@ xfs_alloc_ag_vextent_near(
1183
1194
if ((error = xfs_alloc_get_rec (bno_cur_lt , & ltbno , & ltlen , & i )))
1184
1195
goto error0 ;
1185
1196
XFS_WANT_CORRUPTED_GOTO (args -> mp , i == 1 , error0 );
1186
- xfs_alloc_compute_aligned (args , ltbno , ltlen ,
1187
- & ltbnoa , & ltlena );
1197
+ busy |= xfs_alloc_compute_aligned (args , ltbno , ltlen ,
1198
+ & ltbnoa , & ltlena , & busy_gen );
1188
1199
if (ltlena >= args -> minlen && ltbnoa >= args -> min_agbno )
1189
1200
break ;
1190
1201
if ((error = xfs_btree_decrement (bno_cur_lt , 0 , & i )))
@@ -1199,8 +1210,8 @@ xfs_alloc_ag_vextent_near(
1199
1210
if ((error = xfs_alloc_get_rec (bno_cur_gt , & gtbno , & gtlen , & i )))
1200
1211
goto error0 ;
1201
1212
XFS_WANT_CORRUPTED_GOTO (args -> mp , i == 1 , error0 );
1202
- xfs_alloc_compute_aligned (args , gtbno , gtlen ,
1203
- & gtbnoa , & gtlena );
1213
+ busy |= xfs_alloc_compute_aligned (args , gtbno , gtlen ,
1214
+ & gtbnoa , & gtlena , & busy_gen );
1204
1215
if (gtlena >= args -> minlen && gtbnoa <= args -> max_agbno )
1205
1216
break ;
1206
1217
if ((error = xfs_btree_increment (bno_cur_gt , 0 , & i )))
@@ -1261,9 +1272,9 @@ xfs_alloc_ag_vextent_near(
1261
1272
if (bno_cur_lt == NULL && bno_cur_gt == NULL ) {
1262
1273
xfs_btree_del_cursor (cnt_cur , XFS_BTREE_NOERROR );
1263
1274
1264
- if (! forced ++ ) {
1275
+ if (busy ) {
1265
1276
trace_xfs_alloc_near_busy (args );
1266
- xfs_log_force (args -> mp , XFS_LOG_SYNC );
1277
+ xfs_extent_busy_flush (args -> mp , args -> pag , busy_gen );
1267
1278
goto restart ;
1268
1279
}
1269
1280
trace_xfs_alloc_size_neither (args );
@@ -1344,7 +1355,8 @@ xfs_alloc_ag_vextent_size(
1344
1355
int i ; /* temp status variable */
1345
1356
xfs_agblock_t rbno ; /* returned block number */
1346
1357
xfs_extlen_t rlen ; /* length of returned extent */
1347
- int forced = 0 ;
1358
+ bool busy ;
1359
+ unsigned busy_gen ;
1348
1360
1349
1361
restart :
1350
1362
/*
@@ -1353,6 +1365,7 @@ xfs_alloc_ag_vextent_size(
1353
1365
cnt_cur = xfs_allocbt_init_cursor (args -> mp , args -> tp , args -> agbp ,
1354
1366
args -> agno , XFS_BTNUM_CNT );
1355
1367
bno_cur = NULL ;
1368
+ busy = false;
1356
1369
1357
1370
/*
1358
1371
* Look for an entry >= maxlen+alignment-1 blocks.
@@ -1362,14 +1375,13 @@ xfs_alloc_ag_vextent_size(
1362
1375
goto error0 ;
1363
1376
1364
1377
/*
1365
- * If none or we have busy extents that we cannot allocate from, then
1366
- * we have to settle for a smaller extent. In the case that there are
1367
- * no large extents, this will return the last entry in the tree unless
1368
- * the tree is empty. In the case that there are only busy large
1369
- * extents, this will return the largest small extent unless there
1378
+ * If none then we have to settle for a smaller extent. In the case that
1379
+ * there are no large extents, this will return the last entry in the
1380
+ * tree unless the tree is empty. In the case that there are only busy
1381
+ * large extents, this will return the largest small extent unless there
1370
1382
* are no smaller extents available.
1371
1383
*/
1372
- if (!i || forced > 1 ) {
1384
+ if (!i ) {
1373
1385
error = xfs_alloc_ag_vextent_small (args , cnt_cur ,
1374
1386
& fbno , & flen , & i );
1375
1387
if (error )
@@ -1380,22 +1392,20 @@ xfs_alloc_ag_vextent_size(
1380
1392
return 0 ;
1381
1393
}
1382
1394
ASSERT (i == 1 );
1383
- xfs_alloc_compute_aligned (args , fbno , flen , & rbno , & rlen );
1395
+ busy = xfs_alloc_compute_aligned (args , fbno , flen , & rbno ,
1396
+ & rlen , & busy_gen );
1384
1397
} else {
1385
1398
/*
1386
1399
* Search for a non-busy extent that is large enough.
1387
- * If we are at low space, don't check, or if we fall of
1388
- * the end of the btree, turn off the busy check and
1389
- * restart.
1390
1400
*/
1391
1401
for (;;) {
1392
1402
error = xfs_alloc_get_rec (cnt_cur , & fbno , & flen , & i );
1393
1403
if (error )
1394
1404
goto error0 ;
1395
1405
XFS_WANT_CORRUPTED_GOTO (args -> mp , i == 1 , error0 );
1396
1406
1397
- xfs_alloc_compute_aligned (args , fbno , flen ,
1398
- & rbno , & rlen );
1407
+ busy = xfs_alloc_compute_aligned (args , fbno , flen ,
1408
+ & rbno , & rlen , & busy_gen );
1399
1409
1400
1410
if (rlen >= args -> maxlen )
1401
1411
break ;
@@ -1407,18 +1417,13 @@ xfs_alloc_ag_vextent_size(
1407
1417
/*
1408
1418
* Our only valid extents must have been busy.
1409
1419
* Make it unbusy by forcing the log out and
1410
- * retrying. If we've been here before, forcing
1411
- * the log isn't making the extents available,
1412
- * which means they have probably been freed in
1413
- * this transaction. In that case, we have to
1414
- * give up on them and we'll attempt a minlen
1415
- * allocation the next time around.
1420
+ * retrying.
1416
1421
*/
1417
1422
xfs_btree_del_cursor (cnt_cur ,
1418
1423
XFS_BTREE_NOERROR );
1419
1424
trace_xfs_alloc_size_busy (args );
1420
- if (! forced ++ )
1421
- xfs_log_force ( args -> mp , XFS_LOG_SYNC );
1425
+ xfs_extent_busy_flush ( args -> mp ,
1426
+ args -> pag , busy_gen );
1422
1427
goto restart ;
1423
1428
}
1424
1429
}
@@ -1454,8 +1459,8 @@ xfs_alloc_ag_vextent_size(
1454
1459
XFS_WANT_CORRUPTED_GOTO (args -> mp , i == 1 , error0 );
1455
1460
if (flen < bestrlen )
1456
1461
break ;
1457
- xfs_alloc_compute_aligned (args , fbno , flen ,
1458
- & rbno , & rlen );
1462
+ busy = xfs_alloc_compute_aligned (args , fbno , flen ,
1463
+ & rbno , & rlen , & busy_gen );
1459
1464
rlen = XFS_EXTLEN_MIN (args -> maxlen , rlen );
1460
1465
XFS_WANT_CORRUPTED_GOTO (args -> mp , rlen == 0 ||
1461
1466
(rlen <= flen && rbno + rlen <= fbno + flen ),
@@ -1484,10 +1489,10 @@ xfs_alloc_ag_vextent_size(
1484
1489
*/
1485
1490
args -> len = rlen ;
1486
1491
if (rlen < args -> minlen ) {
1487
- if (! forced ++ ) {
1492
+ if (busy ) {
1488
1493
xfs_btree_del_cursor (cnt_cur , XFS_BTREE_NOERROR );
1489
1494
trace_xfs_alloc_size_busy (args );
1490
- xfs_log_force (args -> mp , XFS_LOG_SYNC );
1495
+ xfs_extent_busy_flush (args -> mp , args -> pag , busy_gen );
1491
1496
goto restart ;
1492
1497
}
1493
1498
goto out_nominleft ;
0 commit comments