@@ -6452,6 +6452,7 @@ static int read_one_chunk(struct btrfs_fs_info *fs_info, struct btrfs_key *key,
6452
6452
map -> stripe_len = btrfs_chunk_stripe_len (leaf , chunk );
6453
6453
map -> type = btrfs_chunk_type (leaf , chunk );
6454
6454
map -> sub_stripes = btrfs_chunk_sub_stripes (leaf , chunk );
6455
+ map -> verified_stripes = 0 ;
6455
6456
for (i = 0 ; i < num_stripes ; i ++ ) {
6456
6457
map -> stripes [i ].physical =
6457
6458
btrfs_stripe_offset_nr (leaf , chunk , i );
@@ -7318,3 +7319,186 @@ int btrfs_bg_type_to_factor(u64 flags)
7318
7319
return 2 ;
7319
7320
return 1 ;
7320
7321
}
7322
+
7323
+
7324
+ static u64 calc_stripe_length (u64 type , u64 chunk_len , int num_stripes )
7325
+ {
7326
+ int index = btrfs_bg_flags_to_raid_index (type );
7327
+ int ncopies = btrfs_raid_array [index ].ncopies ;
7328
+ int data_stripes ;
7329
+
7330
+ switch (type & BTRFS_BLOCK_GROUP_PROFILE_MASK ) {
7331
+ case BTRFS_BLOCK_GROUP_RAID5 :
7332
+ data_stripes = num_stripes - 1 ;
7333
+ break ;
7334
+ case BTRFS_BLOCK_GROUP_RAID6 :
7335
+ data_stripes = num_stripes - 2 ;
7336
+ break ;
7337
+ default :
7338
+ data_stripes = num_stripes / ncopies ;
7339
+ break ;
7340
+ }
7341
+ return div_u64 (chunk_len , data_stripes );
7342
+ }
7343
+
7344
+ static int verify_one_dev_extent (struct btrfs_fs_info * fs_info ,
7345
+ u64 chunk_offset , u64 devid ,
7346
+ u64 physical_offset , u64 physical_len )
7347
+ {
7348
+ struct extent_map_tree * em_tree = & fs_info -> mapping_tree .map_tree ;
7349
+ struct extent_map * em ;
7350
+ struct map_lookup * map ;
7351
+ u64 stripe_len ;
7352
+ bool found = false;
7353
+ int ret = 0 ;
7354
+ int i ;
7355
+
7356
+ read_lock (& em_tree -> lock );
7357
+ em = lookup_extent_mapping (em_tree , chunk_offset , 1 );
7358
+ read_unlock (& em_tree -> lock );
7359
+
7360
+ if (!em ) {
7361
+ btrfs_err (fs_info ,
7362
+ "dev extent physical offset %llu on devid %llu doesn't have corresponding chunk" ,
7363
+ physical_offset , devid );
7364
+ ret = - EUCLEAN ;
7365
+ goto out ;
7366
+ }
7367
+
7368
+ map = em -> map_lookup ;
7369
+ stripe_len = calc_stripe_length (map -> type , em -> len , map -> num_stripes );
7370
+ if (physical_len != stripe_len ) {
7371
+ btrfs_err (fs_info ,
7372
+ "dev extent physical offset %llu on devid %llu length doesn't match chunk %llu, have %llu expect %llu" ,
7373
+ physical_offset , devid , em -> start , physical_len ,
7374
+ stripe_len );
7375
+ ret = - EUCLEAN ;
7376
+ goto out ;
7377
+ }
7378
+
7379
+ for (i = 0 ; i < map -> num_stripes ; i ++ ) {
7380
+ if (map -> stripes [i ].dev -> devid == devid &&
7381
+ map -> stripes [i ].physical == physical_offset ) {
7382
+ found = true;
7383
+ if (map -> verified_stripes >= map -> num_stripes ) {
7384
+ btrfs_err (fs_info ,
7385
+ "too many dev extents for chunk %llu found" ,
7386
+ em -> start );
7387
+ ret = - EUCLEAN ;
7388
+ goto out ;
7389
+ }
7390
+ map -> verified_stripes ++ ;
7391
+ break ;
7392
+ }
7393
+ }
7394
+ if (!found ) {
7395
+ btrfs_err (fs_info ,
7396
+ "dev extent physical offset %llu devid %llu has no corresponding chunk" ,
7397
+ physical_offset , devid );
7398
+ ret = - EUCLEAN ;
7399
+ }
7400
+ out :
7401
+ free_extent_map (em );
7402
+ return ret ;
7403
+ }
7404
+
7405
+ static int verify_chunk_dev_extent_mapping (struct btrfs_fs_info * fs_info )
7406
+ {
7407
+ struct extent_map_tree * em_tree = & fs_info -> mapping_tree .map_tree ;
7408
+ struct extent_map * em ;
7409
+ struct rb_node * node ;
7410
+ int ret = 0 ;
7411
+
7412
+ read_lock (& em_tree -> lock );
7413
+ for (node = rb_first (& em_tree -> map ); node ; node = rb_next (node )) {
7414
+ em = rb_entry (node , struct extent_map , rb_node );
7415
+ if (em -> map_lookup -> num_stripes !=
7416
+ em -> map_lookup -> verified_stripes ) {
7417
+ btrfs_err (fs_info ,
7418
+ "chunk %llu has missing dev extent, have %d expect %d" ,
7419
+ em -> start , em -> map_lookup -> verified_stripes ,
7420
+ em -> map_lookup -> num_stripes );
7421
+ ret = - EUCLEAN ;
7422
+ goto out ;
7423
+ }
7424
+ }
7425
+ out :
7426
+ read_unlock (& em_tree -> lock );
7427
+ return ret ;
7428
+ }
7429
+
7430
+ /*
7431
+ * Ensure that all dev extents are mapped to correct chunk, otherwise
7432
+ * later chunk allocation/free would cause unexpected behavior.
7433
+ *
7434
+ * NOTE: This will iterate through the whole device tree, which should be of
7435
+ * the same size level as the chunk tree. This slightly increases mount time.
7436
+ */
7437
+ int btrfs_verify_dev_extents (struct btrfs_fs_info * fs_info )
7438
+ {
7439
+ struct btrfs_path * path ;
7440
+ struct btrfs_root * root = fs_info -> dev_root ;
7441
+ struct btrfs_key key ;
7442
+ int ret = 0 ;
7443
+
7444
+ key .objectid = 1 ;
7445
+ key .type = BTRFS_DEV_EXTENT_KEY ;
7446
+ key .offset = 0 ;
7447
+
7448
+ path = btrfs_alloc_path ();
7449
+ if (!path )
7450
+ return - ENOMEM ;
7451
+
7452
+ path -> reada = READA_FORWARD ;
7453
+ ret = btrfs_search_slot (NULL , root , & key , path , 0 , 0 );
7454
+ if (ret < 0 )
7455
+ goto out ;
7456
+
7457
+ if (path -> slots [0 ] >= btrfs_header_nritems (path -> nodes [0 ])) {
7458
+ ret = btrfs_next_item (root , path );
7459
+ if (ret < 0 )
7460
+ goto out ;
7461
+ /* No dev extents at all? Not good */
7462
+ if (ret > 0 ) {
7463
+ ret = - EUCLEAN ;
7464
+ goto out ;
7465
+ }
7466
+ }
7467
+ while (1 ) {
7468
+ struct extent_buffer * leaf = path -> nodes [0 ];
7469
+ struct btrfs_dev_extent * dext ;
7470
+ int slot = path -> slots [0 ];
7471
+ u64 chunk_offset ;
7472
+ u64 physical_offset ;
7473
+ u64 physical_len ;
7474
+ u64 devid ;
7475
+
7476
+ btrfs_item_key_to_cpu (leaf , & key , slot );
7477
+ if (key .type != BTRFS_DEV_EXTENT_KEY )
7478
+ break ;
7479
+ devid = key .objectid ;
7480
+ physical_offset = key .offset ;
7481
+
7482
+ dext = btrfs_item_ptr (leaf , slot , struct btrfs_dev_extent );
7483
+ chunk_offset = btrfs_dev_extent_chunk_offset (leaf , dext );
7484
+ physical_len = btrfs_dev_extent_length (leaf , dext );
7485
+
7486
+ ret = verify_one_dev_extent (fs_info , chunk_offset , devid ,
7487
+ physical_offset , physical_len );
7488
+ if (ret < 0 )
7489
+ goto out ;
7490
+ ret = btrfs_next_item (root , path );
7491
+ if (ret < 0 )
7492
+ goto out ;
7493
+ if (ret > 0 ) {
7494
+ ret = 0 ;
7495
+ break ;
7496
+ }
7497
+ }
7498
+
7499
+ /* Ensure all chunks have corresponding dev extents */
7500
+ ret = verify_chunk_dev_extent_mapping (fs_info );
7501
+ out :
7502
+ btrfs_free_path (path );
7503
+ return ret ;
7504
+ }
0 commit comments