Skip to content

Commit c1a198d

Browse files
committed
Merge branch 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull btrfs updates from Chris Mason: "This has our usual assortment of fixes and cleanups, but the biggest change included is Omar Sandoval's free space tree. It's not the default yet, mounting -o space_cache=v2 enables it and sets a readonly compat bit. The tree can actually be deleted and regenerated if there are any problems, but it has held up really well in testing so far. For very large filesystems (30T+) our existing free space caching code can end up taking a huge amount of time during commits. The new tree based code is faster and less work overall to update as the commit progresses. Omar worked on this during the summer and we'll hammer on it in production here at FB over the next few months" * 'for-linus-4.5' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: (73 commits) Btrfs: fix fitrim discarding device area reserved for boot loader's use Btrfs: Check metadata redundancy on balance btrfs: statfs: report zero available if metadata are exhausted btrfs: preallocate path for snapshot creation at ioctl time btrfs: allocate root item at snapshot ioctl time btrfs: do an allocation earlier during snapshot creation btrfs: use smaller type for btrfs_path locks btrfs: use smaller type for btrfs_path lowest_level btrfs: use smaller type for btrfs_path reada btrfs: cleanup, use enum values for btrfs_path reada btrfs: constify static arrays btrfs: constify remaining structs with function pointers btrfs tests: replace whole ops structure for free space tests btrfs: use list_for_each_entry* in backref.c btrfs: use list_for_each_entry_safe in free-space-cache.c btrfs: use list_for_each_entry* in check-integrity.c Btrfs: use linux/sizes.h to represent constants btrfs: cleanup, remove stray return statements btrfs: zero out delayed node upon allocation btrfs: pass proper enum type to start_transaction() ...
2 parents 48f58ba + 988f1f5 commit c1a198d

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

47 files changed

+3690
-829
lines changed

fs/btrfs/Makefile

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -9,11 +9,12 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
99
export.o tree-log.o free-space-cache.o zlib.o lzo.o \
1010
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
1111
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
12-
uuid-tree.o props.o hash.o
12+
uuid-tree.o props.o hash.o free-space-tree.o
1313

1414
btrfs-$(CONFIG_BTRFS_FS_POSIX_ACL) += acl.o
1515
btrfs-$(CONFIG_BTRFS_FS_CHECK_INTEGRITY) += check-integrity.o
1616

1717
btrfs-$(CONFIG_BTRFS_FS_RUN_SANITY_TESTS) += tests/free-space-tests.o \
1818
tests/extent-buffer-tests.o tests/btrfs-tests.o \
19-
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o
19+
tests/extent-io-tests.o tests/inode-tests.o tests/qgroup-tests.o \
20+
tests/free-space-tree-tests.o

fs/btrfs/acl.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@ struct posix_acl *btrfs_get_acl(struct inode *inode, int type)
4848

4949
size = __btrfs_getxattr(inode, name, "", 0);
5050
if (size > 0) {
51-
value = kzalloc(size, GFP_NOFS);
51+
value = kzalloc(size, GFP_KERNEL);
5252
if (!value)
5353
return ERR_PTR(-ENOMEM);
5454
size = __btrfs_getxattr(inode, name, value, size);
@@ -102,7 +102,7 @@ static int __btrfs_set_acl(struct btrfs_trans_handle *trans,
102102

103103
if (acl) {
104104
size = posix_acl_xattr_size(acl->a_count);
105-
value = kmalloc(size, GFP_NOFS);
105+
value = kmalloc(size, GFP_KERNEL);
106106
if (!value) {
107107
ret = -ENOMEM;
108108
goto out;

fs/btrfs/async-thread.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -97,7 +97,7 @@ static struct __btrfs_workqueue *
9797
__btrfs_alloc_workqueue(const char *name, unsigned int flags, int limit_active,
9898
int thresh)
9999
{
100-
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
100+
struct __btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
101101

102102
if (!ret)
103103
return NULL;
@@ -148,7 +148,7 @@ struct btrfs_workqueue *btrfs_alloc_workqueue(const char *name,
148148
int limit_active,
149149
int thresh)
150150
{
151-
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_NOFS);
151+
struct btrfs_workqueue *ret = kzalloc(sizeof(*ret), GFP_KERNEL);
152152

153153
if (!ret)
154154
return NULL;

fs/btrfs/backref.c

Lines changed: 6 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -520,13 +520,10 @@ static inline int ref_for_same_block(struct __prelim_ref *ref1,
520520
static int __add_missing_keys(struct btrfs_fs_info *fs_info,
521521
struct list_head *head)
522522
{
523-
struct list_head *pos;
523+
struct __prelim_ref *ref;
524524
struct extent_buffer *eb;
525525

526-
list_for_each(pos, head) {
527-
struct __prelim_ref *ref;
528-
ref = list_entry(pos, struct __prelim_ref, list);
529-
526+
list_for_each_entry(ref, head, list) {
530527
if (ref->parent)
531528
continue;
532529
if (ref->key_for_search.type)
@@ -563,23 +560,15 @@ static int __add_missing_keys(struct btrfs_fs_info *fs_info,
563560
*/
564561
static void __merge_refs(struct list_head *head, int mode)
565562
{
566-
struct list_head *pos1;
563+
struct __prelim_ref *ref1;
567564

568-
list_for_each(pos1, head) {
569-
struct list_head *n2;
570-
struct list_head *pos2;
571-
struct __prelim_ref *ref1;
565+
list_for_each_entry(ref1, head, list) {
566+
struct __prelim_ref *ref2 = ref1, *tmp;
572567

573-
ref1 = list_entry(pos1, struct __prelim_ref, list);
574-
575-
for (pos2 = pos1->next, n2 = pos2->next; pos2 != head;
576-
pos2 = n2, n2 = pos2->next) {
577-
struct __prelim_ref *ref2;
568+
list_for_each_entry_safe_continue(ref2, tmp, head, list) {
578569
struct __prelim_ref *xchg;
579570
struct extent_inode_elem *eie;
580571

581-
ref2 = list_entry(pos2, struct __prelim_ref, list);
582-
583572
if (!ref_for_same_block(ref1, ref2))
584573
continue;
585574
if (mode == 1) {

fs/btrfs/btrfs_inode.h

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,10 @@ struct btrfs_inode {
192192
/* File creation time. */
193193
struct timespec i_otime;
194194

195+
/* Hook into fs_info->delayed_iputs */
196+
struct list_head delayed_iput;
197+
long delayed_iput_count;
198+
195199
struct inode vfs_inode;
196200
};
197201

fs/btrfs/check-integrity.c

Lines changed: 26 additions & 79 deletions
Original file line numberDiff line numberDiff line change
@@ -531,13 +531,9 @@ static struct btrfsic_block *btrfsic_block_hashtable_lookup(
531531
(((unsigned int)(dev_bytenr >> 16)) ^
532532
((unsigned int)((uintptr_t)bdev))) &
533533
(BTRFSIC_BLOCK_HASHTABLE_SIZE - 1);
534-
struct list_head *elem;
535-
536-
list_for_each(elem, h->table + hashval) {
537-
struct btrfsic_block *const b =
538-
list_entry(elem, struct btrfsic_block,
539-
collision_resolving_node);
534+
struct btrfsic_block *b;
540535

536+
list_for_each_entry(b, h->table + hashval, collision_resolving_node) {
541537
if (b->dev_state->bdev == bdev && b->dev_bytenr == dev_bytenr)
542538
return b;
543539
}
@@ -588,13 +584,9 @@ static struct btrfsic_block_link *btrfsic_block_link_hashtable_lookup(
588584
((unsigned int)((uintptr_t)bdev_ref_to)) ^
589585
((unsigned int)((uintptr_t)bdev_ref_from))) &
590586
(BTRFSIC_BLOCK_LINK_HASHTABLE_SIZE - 1);
591-
struct list_head *elem;
592-
593-
list_for_each(elem, h->table + hashval) {
594-
struct btrfsic_block_link *const l =
595-
list_entry(elem, struct btrfsic_block_link,
596-
collision_resolving_node);
587+
struct btrfsic_block_link *l;
597588

589+
list_for_each_entry(l, h->table + hashval, collision_resolving_node) {
598590
BUG_ON(NULL == l->block_ref_to);
599591
BUG_ON(NULL == l->block_ref_from);
600592
if (l->block_ref_to->dev_state->bdev == bdev_ref_to &&
@@ -639,13 +631,9 @@ static struct btrfsic_dev_state *btrfsic_dev_state_hashtable_lookup(
639631
const unsigned int hashval =
640632
(((unsigned int)((uintptr_t)bdev)) &
641633
(BTRFSIC_DEV2STATE_HASHTABLE_SIZE - 1));
642-
struct list_head *elem;
643-
644-
list_for_each(elem, h->table + hashval) {
645-
struct btrfsic_dev_state *const ds =
646-
list_entry(elem, struct btrfsic_dev_state,
647-
collision_resolving_node);
634+
struct btrfsic_dev_state *ds;
648635

636+
list_for_each_entry(ds, h->table + hashval, collision_resolving_node) {
649637
if (ds->bdev == bdev)
650638
return ds;
651639
}
@@ -1720,29 +1708,20 @@ static int btrfsic_read_block(struct btrfsic_state *state,
17201708

17211709
static void btrfsic_dump_database(struct btrfsic_state *state)
17221710
{
1723-
struct list_head *elem_all;
1711+
const struct btrfsic_block *b_all;
17241712

17251713
BUG_ON(NULL == state);
17261714

17271715
printk(KERN_INFO "all_blocks_list:\n");
1728-
list_for_each(elem_all, &state->all_blocks_list) {
1729-
const struct btrfsic_block *const b_all =
1730-
list_entry(elem_all, struct btrfsic_block,
1731-
all_blocks_node);
1732-
struct list_head *elem_ref_to;
1733-
struct list_head *elem_ref_from;
1716+
list_for_each_entry(b_all, &state->all_blocks_list, all_blocks_node) {
1717+
const struct btrfsic_block_link *l;
17341718

17351719
printk(KERN_INFO "%c-block @%llu (%s/%llu/%d)\n",
17361720
btrfsic_get_block_type(state, b_all),
17371721
b_all->logical_bytenr, b_all->dev_state->name,
17381722
b_all->dev_bytenr, b_all->mirror_num);
17391723

1740-
list_for_each(elem_ref_to, &b_all->ref_to_list) {
1741-
const struct btrfsic_block_link *const l =
1742-
list_entry(elem_ref_to,
1743-
struct btrfsic_block_link,
1744-
node_ref_to);
1745-
1724+
list_for_each_entry(l, &b_all->ref_to_list, node_ref_to) {
17461725
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
17471726
" refers %u* to"
17481727
" %c @%llu (%s/%llu/%d)\n",
@@ -1757,12 +1736,7 @@ static void btrfsic_dump_database(struct btrfsic_state *state)
17571736
l->block_ref_to->mirror_num);
17581737
}
17591738

1760-
list_for_each(elem_ref_from, &b_all->ref_from_list) {
1761-
const struct btrfsic_block_link *const l =
1762-
list_entry(elem_ref_from,
1763-
struct btrfsic_block_link,
1764-
node_ref_from);
1765-
1739+
list_for_each_entry(l, &b_all->ref_from_list, node_ref_from) {
17661740
printk(KERN_INFO " %c @%llu (%s/%llu/%d)"
17671741
" is ref %u* from"
17681742
" %c @%llu (%s/%llu/%d)\n",
@@ -1845,8 +1819,7 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
18451819
&state->block_hashtable);
18461820
if (NULL != block) {
18471821
u64 bytenr = 0;
1848-
struct list_head *elem_ref_to;
1849-
struct list_head *tmp_ref_to;
1822+
struct btrfsic_block_link *l, *tmp;
18501823

18511824
if (block->is_superblock) {
18521825
bytenr = btrfs_super_bytenr((struct btrfs_super_block *)
@@ -1967,13 +1940,8 @@ static void btrfsic_process_written_block(struct btrfsic_dev_state *dev_state,
19671940
* because it still carries valueable information
19681941
* like whether it was ever written and IO completed.
19691942
*/
1970-
list_for_each_safe(elem_ref_to, tmp_ref_to,
1971-
&block->ref_to_list) {
1972-
struct btrfsic_block_link *const l =
1973-
list_entry(elem_ref_to,
1974-
struct btrfsic_block_link,
1975-
node_ref_to);
1976-
1943+
list_for_each_entry_safe(l, tmp, &block->ref_to_list,
1944+
node_ref_to) {
19771945
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
19781946
btrfsic_print_rem_link(state, l);
19791947
l->ref_cnt--;
@@ -2436,7 +2404,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
24362404
struct btrfsic_block *const block,
24372405
int recursion_level)
24382406
{
2439-
struct list_head *elem_ref_to;
2407+
const struct btrfsic_block_link *l;
24402408
int ret = 0;
24412409

24422410
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
@@ -2464,11 +2432,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
24642432
* This algorithm is recursive because the amount of used stack
24652433
* space is very small and the max recursion depth is limited.
24662434
*/
2467-
list_for_each(elem_ref_to, &block->ref_to_list) {
2468-
const struct btrfsic_block_link *const l =
2469-
list_entry(elem_ref_to, struct btrfsic_block_link,
2470-
node_ref_to);
2471-
2435+
list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
24722436
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
24732437
printk(KERN_INFO
24742438
"rl=%d, %c @%llu (%s/%llu/%d)"
@@ -2561,7 +2525,7 @@ static int btrfsic_is_block_ref_by_superblock(
25612525
const struct btrfsic_block *block,
25622526
int recursion_level)
25632527
{
2564-
struct list_head *elem_ref_from;
2528+
const struct btrfsic_block_link *l;
25652529

25662530
if (recursion_level >= 3 + BTRFS_MAX_LEVEL) {
25672531
/* refer to comment at "abort cyclic linkage (case 1)" */
@@ -2576,11 +2540,7 @@ static int btrfsic_is_block_ref_by_superblock(
25762540
* This algorithm is recursive because the amount of used stack space
25772541
* is very small and the max recursion depth is limited.
25782542
*/
2579-
list_for_each(elem_ref_from, &block->ref_from_list) {
2580-
const struct btrfsic_block_link *const l =
2581-
list_entry(elem_ref_from, struct btrfsic_block_link,
2582-
node_ref_from);
2583-
2543+
list_for_each_entry(l, &block->ref_from_list, node_ref_from) {
25842544
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
25852545
printk(KERN_INFO
25862546
"rl=%d, %c @%llu (%s/%llu/%d)"
@@ -2669,7 +2629,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
26692629
const struct btrfsic_block *block,
26702630
int indent_level)
26712631
{
2672-
struct list_head *elem_ref_to;
2632+
const struct btrfsic_block_link *l;
26732633
int indent_add;
26742634
static char buf[80];
26752635
int cursor_position;
@@ -2704,11 +2664,7 @@ static void btrfsic_dump_tree_sub(const struct btrfsic_state *state,
27042664
}
27052665

27062666
cursor_position = indent_level;
2707-
list_for_each(elem_ref_to, &block->ref_to_list) {
2708-
const struct btrfsic_block_link *const l =
2709-
list_entry(elem_ref_to, struct btrfsic_block_link,
2710-
node_ref_to);
2711-
2667+
list_for_each_entry(l, &block->ref_to_list, node_ref_to) {
27122668
while (cursor_position < indent_level) {
27132669
printk(" ");
27142670
cursor_position++;
@@ -3165,8 +3121,7 @@ int btrfsic_mount(struct btrfs_root *root,
31653121
void btrfsic_unmount(struct btrfs_root *root,
31663122
struct btrfs_fs_devices *fs_devices)
31673123
{
3168-
struct list_head *elem_all;
3169-
struct list_head *tmp_all;
3124+
struct btrfsic_block *b_all, *tmp_all;
31703125
struct btrfsic_state *state;
31713126
struct list_head *dev_head = &fs_devices->devices;
31723127
struct btrfs_device *device;
@@ -3206,20 +3161,12 @@ void btrfsic_unmount(struct btrfs_root *root,
32063161
* just free all memory that was allocated dynamically.
32073162
* Free the blocks and the block_links.
32083163
*/
3209-
list_for_each_safe(elem_all, tmp_all, &state->all_blocks_list) {
3210-
struct btrfsic_block *const b_all =
3211-
list_entry(elem_all, struct btrfsic_block,
3212-
all_blocks_node);
3213-
struct list_head *elem_ref_to;
3214-
struct list_head *tmp_ref_to;
3215-
3216-
list_for_each_safe(elem_ref_to, tmp_ref_to,
3217-
&b_all->ref_to_list) {
3218-
struct btrfsic_block_link *const l =
3219-
list_entry(elem_ref_to,
3220-
struct btrfsic_block_link,
3221-
node_ref_to);
3164+
list_for_each_entry_safe(b_all, tmp_all, &state->all_blocks_list,
3165+
all_blocks_node) {
3166+
struct btrfsic_block_link *l, *tmp;
32223167

3168+
list_for_each_entry_safe(l, tmp, &b_all->ref_to_list,
3169+
node_ref_to) {
32233170
if (state->print_mask & BTRFSIC_PRINT_MASK_VERBOSE)
32243171
btrfsic_print_rem_link(state, l);
32253172

fs/btrfs/ctree.c

Lines changed: 5 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1555,7 +1555,7 @@ noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
15551555
return 0;
15561556
}
15571557

1558-
search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1558+
search_start = buf->start & ~((u64)SZ_1G - 1);
15591559

15601560
if (parent)
15611561
btrfs_set_lock_blocking(parent);
@@ -2248,7 +2248,6 @@ static void reada_for_search(struct btrfs_root *root,
22482248
u64 target;
22492249
u64 nread = 0;
22502250
u64 gen;
2251-
int direction = path->reada;
22522251
struct extent_buffer *eb;
22532252
u32 nr;
22542253
u32 blocksize;
@@ -2276,16 +2275,16 @@ static void reada_for_search(struct btrfs_root *root,
22762275
nr = slot;
22772276

22782277
while (1) {
2279-
if (direction < 0) {
2278+
if (path->reada == READA_BACK) {
22802279
if (nr == 0)
22812280
break;
22822281
nr--;
2283-
} else if (direction > 0) {
2282+
} else if (path->reada == READA_FORWARD) {
22842283
nr++;
22852284
if (nr >= nritems)
22862285
break;
22872286
}
2288-
if (path->reada < 0 && objectid) {
2287+
if (path->reada == READA_BACK && objectid) {
22892288
btrfs_node_key(node, &disk_key, nr);
22902289
if (btrfs_disk_key_objectid(&disk_key) != objectid)
22912290
break;
@@ -2493,7 +2492,7 @@ read_block_for_search(struct btrfs_trans_handle *trans,
24932492
btrfs_set_path_blocking(p);
24942493

24952494
free_extent_buffer(tmp);
2496-
if (p->reada)
2495+
if (p->reada != READA_NONE)
24972496
reada_for_search(root, p, level, slot, key->objectid);
24982497

24992498
btrfs_release_path(p);

0 commit comments

Comments
 (0)