24
24
#include "free-space-cache.h"
25
25
#include "transaction.h"
26
26
#include "disk-io.h"
27
+ #include "extent_io.h"
27
28
28
29
#define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
29
30
#define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
@@ -224,6 +225,7 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
224
225
u64 num_entries ;
225
226
u64 num_bitmaps ;
226
227
u64 generation ;
228
+ u64 used = btrfs_block_group_used (& block_group -> item );
227
229
u32 cur_crc = ~(u32 )0 ;
228
230
pgoff_t index = 0 ;
229
231
unsigned long first_page_offset ;
@@ -469,6 +471,17 @@ int load_free_space_cache(struct btrfs_fs_info *fs_info,
469
471
index ++ ;
470
472
}
471
473
474
+ spin_lock (& block_group -> tree_lock );
475
+ if (block_group -> free_space != (block_group -> key .offset - used -
476
+ block_group -> bytes_super )) {
477
+ spin_unlock (& block_group -> tree_lock );
478
+ printk (KERN_ERR "block group %llu has an wrong amount of free "
479
+ "space\n" , block_group -> key .objectid );
480
+ ret = 0 ;
481
+ goto free_cache ;
482
+ }
483
+ spin_unlock (& block_group -> tree_lock );
484
+
472
485
ret = 1 ;
473
486
out :
474
487
kfree (checksums );
@@ -497,8 +510,11 @@ int btrfs_write_out_cache(struct btrfs_root *root,
497
510
struct list_head * pos , * n ;
498
511
struct page * page ;
499
512
struct extent_state * cached_state = NULL ;
513
+ struct btrfs_free_cluster * cluster = NULL ;
514
+ struct extent_io_tree * unpin = NULL ;
500
515
struct list_head bitmap_list ;
501
516
struct btrfs_key key ;
517
+ u64 start , end , len ;
502
518
u64 bytes = 0 ;
503
519
u32 * crc , * checksums ;
504
520
pgoff_t index = 0 , last_index = 0 ;
@@ -507,6 +523,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
507
523
int entries = 0 ;
508
524
int bitmaps = 0 ;
509
525
int ret = 0 ;
526
+ bool next_page = false;
510
527
511
528
root = root -> fs_info -> tree_root ;
512
529
@@ -553,6 +570,18 @@ int btrfs_write_out_cache(struct btrfs_root *root,
553
570
*/
554
571
first_page_offset = (sizeof (u32 ) * num_checksums ) + sizeof (u64 );
555
572
573
+ /* Get the cluster for this block_group if it exists */
574
+ if (!list_empty (& block_group -> cluster_list ))
575
+ cluster = list_entry (block_group -> cluster_list .next ,
576
+ struct btrfs_free_cluster ,
577
+ block_group_list );
578
+
579
+ /*
580
+ * We shouldn't have switched the pinned extents yet so this is the
581
+ * right one
582
+ */
583
+ unpin = root -> fs_info -> pinned_extents ;
584
+
556
585
/*
557
586
* Lock all pages first so we can lock the extent safely.
558
587
*
@@ -582,13 +611,21 @@ int btrfs_write_out_cache(struct btrfs_root *root,
582
611
lock_extent_bits (& BTRFS_I (inode )-> io_tree , 0 , i_size_read (inode ) - 1 ,
583
612
0 , & cached_state , GFP_NOFS );
584
613
614
+ /*
615
+ * When searching for pinned extents, we need to start at our start
616
+ * offset.
617
+ */
618
+ start = block_group -> key .objectid ;
619
+
585
620
/* Write out the extent entries */
586
621
do {
587
622
struct btrfs_free_space_entry * entry ;
588
623
void * addr ;
589
624
unsigned long offset = 0 ;
590
625
unsigned long start_offset = 0 ;
591
626
627
+ next_page = false;
628
+
592
629
if (index == 0 ) {
593
630
start_offset = first_page_offset ;
594
631
offset = start_offset ;
@@ -600,7 +637,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
600
637
entry = addr + start_offset ;
601
638
602
639
memset (addr , 0 , PAGE_CACHE_SIZE );
603
- while (1 ) {
640
+ while (node && ! next_page ) {
604
641
struct btrfs_free_space * e ;
605
642
606
643
e = rb_entry (node , struct btrfs_free_space , offset_index );
@@ -616,12 +653,49 @@ int btrfs_write_out_cache(struct btrfs_root *root,
616
653
entry -> type = BTRFS_FREE_SPACE_EXTENT ;
617
654
}
618
655
node = rb_next (node );
619
- if (!node )
620
- break ;
656
+ if (!node && cluster ) {
657
+ node = rb_first (& cluster -> root );
658
+ cluster = NULL ;
659
+ }
621
660
offset += sizeof (struct btrfs_free_space_entry );
622
661
if (offset + sizeof (struct btrfs_free_space_entry ) >=
623
662
PAGE_CACHE_SIZE )
663
+ next_page = true;
664
+ entry ++ ;
665
+ }
666
+
667
+ /*
668
+ * We want to add any pinned extents to our free space cache
669
+ * so we don't leak the space
670
+ */
671
+ while (!next_page && (start < block_group -> key .objectid +
672
+ block_group -> key .offset )) {
673
+ ret = find_first_extent_bit (unpin , start , & start , & end ,
674
+ EXTENT_DIRTY );
675
+ if (ret ) {
676
+ ret = 0 ;
677
+ break ;
678
+ }
679
+
680
+ /* This pinned extent is out of our range */
681
+ if (start >= block_group -> key .objectid +
682
+ block_group -> key .offset )
624
683
break ;
684
+
685
+ len = block_group -> key .objectid +
686
+ block_group -> key .offset - start ;
687
+ len = min (len , end + 1 - start );
688
+
689
+ entries ++ ;
690
+ entry -> offset = cpu_to_le64 (start );
691
+ entry -> bytes = cpu_to_le64 (len );
692
+ entry -> type = BTRFS_FREE_SPACE_EXTENT ;
693
+
694
+ start = end + 1 ;
695
+ offset += sizeof (struct btrfs_free_space_entry );
696
+ if (offset + sizeof (struct btrfs_free_space_entry ) >=
697
+ PAGE_CACHE_SIZE )
698
+ next_page = true;
625
699
entry ++ ;
626
700
}
627
701
* crc = ~(u32 )0 ;
@@ -652,7 +726,7 @@ int btrfs_write_out_cache(struct btrfs_root *root,
652
726
page_cache_release (page );
653
727
654
728
index ++ ;
655
- } while (node );
729
+ } while (node || next_page );
656
730
657
731
/* Write out the bitmaps */
658
732
list_for_each_safe (pos , n , & bitmap_list ) {
0 commit comments