@@ -324,18 +324,6 @@ qsize_t *ext4_get_reserved_space(struct inode *inode)
324
324
}
325
325
#endif
326
326
327
- /*
328
- * Calculate the number of metadata blocks need to reserve
329
- * to allocate a block located at @lblock
330
- */
331
- static int ext4_calc_metadata_amount (struct inode * inode , ext4_lblk_t lblock )
332
- {
333
- if (ext4_test_inode_flag (inode , EXT4_INODE_EXTENTS ))
334
- return ext4_ext_calc_metadata_amount (inode , lblock );
335
-
336
- return ext4_ind_calc_metadata_amount (inode , lblock );
337
- }
338
-
339
327
/*
340
328
* Called with i_data_sem down, which is important since we can call
341
329
* ext4_discard_preallocations() from here.
@@ -357,35 +345,10 @@ void ext4_da_update_reserve_space(struct inode *inode,
357
345
used = ei -> i_reserved_data_blocks ;
358
346
}
359
347
360
- if (unlikely (ei -> i_allocated_meta_blocks > ei -> i_reserved_meta_blocks )) {
361
- ext4_warning (inode -> i_sb , "ino %lu, allocated %d "
362
- "with only %d reserved metadata blocks "
363
- "(releasing %d blocks with reserved %d data blocks)" ,
364
- inode -> i_ino , ei -> i_allocated_meta_blocks ,
365
- ei -> i_reserved_meta_blocks , used ,
366
- ei -> i_reserved_data_blocks );
367
- WARN_ON (1 );
368
- ei -> i_allocated_meta_blocks = ei -> i_reserved_meta_blocks ;
369
- }
370
-
371
348
/* Update per-inode reservations */
372
349
ei -> i_reserved_data_blocks -= used ;
373
- ei -> i_reserved_meta_blocks -= ei -> i_allocated_meta_blocks ;
374
- percpu_counter_sub (& sbi -> s_dirtyclusters_counter ,
375
- used + ei -> i_allocated_meta_blocks );
376
- ei -> i_allocated_meta_blocks = 0 ;
350
+ percpu_counter_sub (& sbi -> s_dirtyclusters_counter , used );
377
351
378
- if (ei -> i_reserved_data_blocks == 0 ) {
379
- /*
380
- * We can release all of the reserved metadata blocks
381
- * only when we have written all of the delayed
382
- * allocation blocks.
383
- */
384
- percpu_counter_sub (& sbi -> s_dirtyclusters_counter ,
385
- ei -> i_reserved_meta_blocks );
386
- ei -> i_reserved_meta_blocks = 0 ;
387
- ei -> i_da_metadata_calc_len = 0 ;
388
- }
389
352
spin_unlock (& EXT4_I (inode )-> i_block_reservation_lock );
390
353
391
354
/* Update quota subsystem for data blocks */
@@ -1221,49 +1184,6 @@ static int ext4_journalled_write_end(struct file *file,
1221
1184
return ret ? ret : copied ;
1222
1185
}
1223
1186
1224
- /*
1225
- * Reserve a metadata for a single block located at lblock
1226
- */
1227
- static int ext4_da_reserve_metadata (struct inode * inode , ext4_lblk_t lblock )
1228
- {
1229
- struct ext4_sb_info * sbi = EXT4_SB (inode -> i_sb );
1230
- struct ext4_inode_info * ei = EXT4_I (inode );
1231
- unsigned int md_needed ;
1232
- ext4_lblk_t save_last_lblock ;
1233
- int save_len ;
1234
-
1235
- /*
1236
- * recalculate the amount of metadata blocks to reserve
1237
- * in order to allocate nrblocks
1238
- * worse case is one extent per block
1239
- */
1240
- spin_lock (& ei -> i_block_reservation_lock );
1241
- /*
1242
- * ext4_calc_metadata_amount() has side effects, which we have
1243
- * to be prepared undo if we fail to claim space.
1244
- */
1245
- save_len = ei -> i_da_metadata_calc_len ;
1246
- save_last_lblock = ei -> i_da_metadata_calc_last_lblock ;
1247
- md_needed = EXT4_NUM_B2C (sbi ,
1248
- ext4_calc_metadata_amount (inode , lblock ));
1249
- trace_ext4_da_reserve_space (inode , md_needed );
1250
-
1251
- /*
1252
- * We do still charge estimated metadata to the sb though;
1253
- * we cannot afford to run out of free blocks.
1254
- */
1255
- if (ext4_claim_free_clusters (sbi , md_needed , 0 )) {
1256
- ei -> i_da_metadata_calc_len = save_len ;
1257
- ei -> i_da_metadata_calc_last_lblock = save_last_lblock ;
1258
- spin_unlock (& ei -> i_block_reservation_lock );
1259
- return - ENOSPC ;
1260
- }
1261
- ei -> i_reserved_meta_blocks += md_needed ;
1262
- spin_unlock (& ei -> i_block_reservation_lock );
1263
-
1264
- return 0 ; /* success */
1265
- }
1266
-
1267
1187
/*
1268
1188
* Reserve a single cluster located at lblock
1269
1189
*/
@@ -1273,8 +1193,6 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1273
1193
struct ext4_inode_info * ei = EXT4_I (inode );
1274
1194
unsigned int md_needed ;
1275
1195
int ret ;
1276
- ext4_lblk_t save_last_lblock ;
1277
- int save_len ;
1278
1196
1279
1197
/*
1280
1198
* We will charge metadata quota at writeout time; this saves
@@ -1295,25 +1213,15 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
1295
1213
* ext4_calc_metadata_amount() has side effects, which we have
1296
1214
* to be prepared undo if we fail to claim space.
1297
1215
*/
1298
- save_len = ei -> i_da_metadata_calc_len ;
1299
- save_last_lblock = ei -> i_da_metadata_calc_last_lblock ;
1300
- md_needed = EXT4_NUM_B2C (sbi ,
1301
- ext4_calc_metadata_amount (inode , lblock ));
1302
- trace_ext4_da_reserve_space (inode , md_needed );
1216
+ md_needed = 0 ;
1217
+ trace_ext4_da_reserve_space (inode , 0 );
1303
1218
1304
- /*
1305
- * We do still charge estimated metadata to the sb though;
1306
- * we cannot afford to run out of free blocks.
1307
- */
1308
- if (ext4_claim_free_clusters (sbi , md_needed + 1 , 0 )) {
1309
- ei -> i_da_metadata_calc_len = save_len ;
1310
- ei -> i_da_metadata_calc_last_lblock = save_last_lblock ;
1219
+ if (ext4_claim_free_clusters (sbi , 1 , 0 )) {
1311
1220
spin_unlock (& ei -> i_block_reservation_lock );
1312
1221
dquot_release_reservation_block (inode , EXT4_C2B (sbi , 1 ));
1313
1222
return - ENOSPC ;
1314
1223
}
1315
1224
ei -> i_reserved_data_blocks ++ ;
1316
- ei -> i_reserved_meta_blocks += md_needed ;
1317
1225
spin_unlock (& ei -> i_block_reservation_lock );
1318
1226
1319
1227
return 0 ; /* success */
@@ -1346,20 +1254,6 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
1346
1254
}
1347
1255
ei -> i_reserved_data_blocks -= to_free ;
1348
1256
1349
- if (ei -> i_reserved_data_blocks == 0 ) {
1350
- /*
1351
- * We can release all of the reserved metadata blocks
1352
- * only when we have written all of the delayed
1353
- * allocation blocks.
1354
- * Note that in case of bigalloc, i_reserved_meta_blocks,
1355
- * i_reserved_data_blocks, etc. refer to number of clusters.
1356
- */
1357
- percpu_counter_sub (& sbi -> s_dirtyclusters_counter ,
1358
- ei -> i_reserved_meta_blocks );
1359
- ei -> i_reserved_meta_blocks = 0 ;
1360
- ei -> i_da_metadata_calc_len = 0 ;
1361
- }
1362
-
1363
1257
/* update fs dirty data blocks counter */
1364
1258
percpu_counter_sub (& sbi -> s_dirtyclusters_counter , to_free );
1365
1259
@@ -1500,10 +1394,6 @@ static void ext4_print_free_blocks(struct inode *inode)
1500
1394
ext4_msg (sb , KERN_CRIT , "Block reservation details" );
1501
1395
ext4_msg (sb , KERN_CRIT , "i_reserved_data_blocks=%u" ,
1502
1396
ei -> i_reserved_data_blocks );
1503
- ext4_msg (sb , KERN_CRIT , "i_reserved_meta_blocks=%u" ,
1504
- ei -> i_reserved_meta_blocks );
1505
- ext4_msg (sb , KERN_CRIT , "i_allocated_meta_blocks=%u" ,
1506
- ei -> i_allocated_meta_blocks );
1507
1397
return ;
1508
1398
}
1509
1399
@@ -1620,13 +1510,6 @@ static int ext4_da_map_blocks(struct inode *inode, sector_t iblock,
1620
1510
retval = ret ;
1621
1511
goto out_unlock ;
1622
1512
}
1623
- } else {
1624
- ret = ext4_da_reserve_metadata (inode , iblock );
1625
- if (ret ) {
1626
- /* not enough space to reserve */
1627
- retval = ret ;
1628
- goto out_unlock ;
1629
- }
1630
1513
}
1631
1514
1632
1515
ret = ext4_es_insert_extent (inode , map -> m_lblk , map -> m_len ,
@@ -2843,8 +2726,7 @@ int ext4_alloc_da_blocks(struct inode *inode)
2843
2726
{
2844
2727
trace_ext4_alloc_da_blocks (inode );
2845
2728
2846
- if (!EXT4_I (inode )-> i_reserved_data_blocks &&
2847
- !EXT4_I (inode )-> i_reserved_meta_blocks )
2729
+ if (!EXT4_I (inode )-> i_reserved_data_blocks )
2848
2730
return 0 ;
2849
2731
2850
2732
/*
0 commit comments