@@ -5433,6 +5433,20 @@ static int update_block_group(struct btrfs_root *root,
5433
5433
spin_unlock (& cache -> space_info -> lock );
5434
5434
} else {
5435
5435
old_val -= num_bytes ;
5436
+
5437
+ /*
5438
+ * No longer have used bytes in this block group, queue
5439
+ * it for deletion.
5440
+ */
5441
+ if (old_val == 0 ) {
5442
+ spin_lock (& info -> unused_bgs_lock );
5443
+ if (list_empty (& cache -> bg_list )) {
5444
+ btrfs_get_block_group (cache );
5445
+ list_add_tail (& cache -> bg_list ,
5446
+ & info -> unused_bgs );
5447
+ }
5448
+ spin_unlock (& info -> unused_bgs_lock );
5449
+ }
5436
5450
btrfs_set_block_group_used (& cache -> item , old_val );
5437
5451
cache -> pinned += num_bytes ;
5438
5452
cache -> space_info -> bytes_pinned += num_bytes ;
@@ -8855,6 +8869,16 @@ int btrfs_free_block_groups(struct btrfs_fs_info *info)
8855
8869
}
8856
8870
up_write (& info -> commit_root_sem );
8857
8871
8872
+ spin_lock (& info -> unused_bgs_lock );
8873
+ while (!list_empty (& info -> unused_bgs )) {
8874
+ block_group = list_first_entry (& info -> unused_bgs ,
8875
+ struct btrfs_block_group_cache ,
8876
+ bg_list );
8877
+ list_del_init (& block_group -> bg_list );
8878
+ btrfs_put_block_group (block_group );
8879
+ }
8880
+ spin_unlock (& info -> unused_bgs_lock );
8881
+
8858
8882
spin_lock (& info -> block_group_cache_lock );
8859
8883
while ((n = rb_last (& info -> block_group_cache_tree )) != NULL ) {
8860
8884
block_group = rb_entry (n , struct btrfs_block_group_cache ,
@@ -8989,7 +9013,7 @@ btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
8989
9013
init_rwsem (& cache -> data_rwsem );
8990
9014
INIT_LIST_HEAD (& cache -> list );
8991
9015
INIT_LIST_HEAD (& cache -> cluster_list );
8992
- INIT_LIST_HEAD (& cache -> new_bg_list );
9016
+ INIT_LIST_HEAD (& cache -> bg_list );
8993
9017
btrfs_init_free_space_ctl (cache );
8994
9018
8995
9019
return cache ;
@@ -9130,8 +9154,18 @@ int btrfs_read_block_groups(struct btrfs_root *root)
9130
9154
__link_block_group (space_info , cache );
9131
9155
9132
9156
set_avail_alloc_bits (root -> fs_info , cache -> flags );
9133
- if (btrfs_chunk_readonly (root , cache -> key .objectid ))
9157
+ if (btrfs_chunk_readonly (root , cache -> key .objectid )) {
9134
9158
set_block_group_ro (cache , 1 );
9159
+ } else if (btrfs_block_group_used (& cache -> item ) == 0 ) {
9160
+ spin_lock (& info -> unused_bgs_lock );
9161
+ /* Should always be true but just in case. */
9162
+ if (list_empty (& cache -> bg_list )) {
9163
+ btrfs_get_block_group (cache );
9164
+ list_add_tail (& cache -> bg_list ,
9165
+ & info -> unused_bgs );
9166
+ }
9167
+ spin_unlock (& info -> unused_bgs_lock );
9168
+ }
9135
9169
}
9136
9170
9137
9171
list_for_each_entry_rcu (space_info , & root -> fs_info -> space_info , list ) {
@@ -9172,10 +9206,8 @@ void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9172
9206
struct btrfs_key key ;
9173
9207
int ret = 0 ;
9174
9208
9175
- list_for_each_entry_safe (block_group , tmp , & trans -> new_bgs ,
9176
- new_bg_list ) {
9177
- list_del_init (& block_group -> new_bg_list );
9178
-
9209
+ list_for_each_entry_safe (block_group , tmp , & trans -> new_bgs , bg_list ) {
9210
+ list_del_init (& block_group -> bg_list );
9179
9211
if (ret )
9180
9212
continue ;
9181
9213
@@ -9261,7 +9293,7 @@ int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9261
9293
9262
9294
__link_block_group (cache -> space_info , cache );
9263
9295
9264
- list_add_tail (& cache -> new_bg_list , & trans -> new_bgs );
9296
+ list_add_tail (& cache -> bg_list , & trans -> new_bgs );
9265
9297
9266
9298
set_avail_alloc_bits (extent_root -> fs_info , type );
9267
9299
@@ -9430,6 +9462,101 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
9430
9462
return ret ;
9431
9463
}
9432
9464
9465
+ /*
9466
+ * Process the unused_bgs list and remove any that don't have any allocated
9467
+ * space inside of them.
9468
+ */
9469
+ void btrfs_delete_unused_bgs (struct btrfs_fs_info * fs_info )
9470
+ {
9471
+ struct btrfs_block_group_cache * block_group ;
9472
+ struct btrfs_space_info * space_info ;
9473
+ struct btrfs_root * root = fs_info -> extent_root ;
9474
+ struct btrfs_trans_handle * trans ;
9475
+ int ret = 0 ;
9476
+
9477
+ if (!fs_info -> open )
9478
+ return ;
9479
+
9480
+ spin_lock (& fs_info -> unused_bgs_lock );
9481
+ while (!list_empty (& fs_info -> unused_bgs )) {
9482
+ u64 start , end ;
9483
+
9484
+ block_group = list_first_entry (& fs_info -> unused_bgs ,
9485
+ struct btrfs_block_group_cache ,
9486
+ bg_list );
9487
+ space_info = block_group -> space_info ;
9488
+ list_del_init (& block_group -> bg_list );
9489
+ if (ret || btrfs_mixed_space_info (space_info )) {
9490
+ btrfs_put_block_group (block_group );
9491
+ continue ;
9492
+ }
9493
+ spin_unlock (& fs_info -> unused_bgs_lock );
9494
+
9495
+ /* Don't want to race with allocators so take the groups_sem */
9496
+ down_write (& space_info -> groups_sem );
9497
+ spin_lock (& block_group -> lock );
9498
+ if (block_group -> reserved ||
9499
+ btrfs_block_group_used (& block_group -> item ) ||
9500
+ block_group -> ro ) {
9501
+ /*
9502
+ * We want to bail if we made new allocations or have
9503
+ * outstanding allocations in this block group. We do
9504
+ * the ro check in case balance is currently acting on
9505
+ * this block group.
9506
+ */
9507
+ spin_unlock (& block_group -> lock );
9508
+ up_write (& space_info -> groups_sem );
9509
+ goto next ;
9510
+ }
9511
+ spin_unlock (& block_group -> lock );
9512
+
9513
+ /* We don't want to force the issue, only flip if it's ok. */
9514
+ ret = set_block_group_ro (block_group , 0 );
9515
+ up_write (& space_info -> groups_sem );
9516
+ if (ret < 0 ) {
9517
+ ret = 0 ;
9518
+ goto next ;
9519
+ }
9520
+
9521
+ /*
9522
+ * Want to do this before we do anything else so we can recover
9523
+ * properly if we fail to join the transaction.
9524
+ */
9525
+ trans = btrfs_join_transaction (root );
9526
+ if (IS_ERR (trans )) {
9527
+ btrfs_set_block_group_rw (root , block_group );
9528
+ ret = PTR_ERR (trans );
9529
+ goto next ;
9530
+ }
9531
+
9532
+ /*
9533
+ * We could have pending pinned extents for this block group,
9534
+ * just delete them, we don't care about them anymore.
9535
+ */
9536
+ start = block_group -> key .objectid ;
9537
+ end = start + block_group -> key .offset - 1 ;
9538
+ clear_extent_bits (& fs_info -> freed_extents [0 ], start , end ,
9539
+ EXTENT_DIRTY , GFP_NOFS );
9540
+ clear_extent_bits (& fs_info -> freed_extents [1 ], start , end ,
9541
+ EXTENT_DIRTY , GFP_NOFS );
9542
+
9543
+ /* Reset pinned so btrfs_put_block_group doesn't complain */
9544
+ block_group -> pinned = 0 ;
9545
+
9546
+ /*
9547
+ * Btrfs_remove_chunk will abort the transaction if things go
9548
+ * horribly wrong.
9549
+ */
9550
+ ret = btrfs_remove_chunk (trans , root ,
9551
+ block_group -> key .objectid );
9552
+ btrfs_end_transaction (trans , root );
9553
+ next :
9554
+ btrfs_put_block_group (block_group );
9555
+ spin_lock (& fs_info -> unused_bgs_lock );
9556
+ }
9557
+ spin_unlock (& fs_info -> unused_bgs_lock );
9558
+ }
9559
+
9433
9560
int btrfs_init_space_info (struct btrfs_fs_info * fs_info )
9434
9561
{
9435
9562
struct btrfs_space_info * space_info ;
0 commit comments