Skip to content

Commit 52042d8

Browse files
Gelmakdave
authored andcommitted
btrfs: Fix typos in comments and strings
The typos accumulate over time so once in a while time they get fixed in a large patch. Signed-off-by: Andrea Gelmini <andrea.gelmini@gelma.net> Reviewed-by: David Sterba <dsterba@suse.com> Signed-off-by: David Sterba <dsterba@suse.com>
1 parent 1690dd4 commit 52042d8

25 files changed

+70
-69
lines changed

fs/btrfs/backref.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -591,7 +591,7 @@ unode_aux_to_inode_list(struct ulist_node *node)
591591
}
592592

593593
/*
594-
* We maintain three seperate rbtrees: one for direct refs, one for
594+
* We maintain three separate rbtrees: one for direct refs, one for
595595
* indirect refs which have a key, and one for indirect refs which do not
596596
* have a key. Each tree does merge on insertion.
597597
*
@@ -695,7 +695,7 @@ static int resolve_indirect_refs(struct btrfs_fs_info *fs_info,
695695
}
696696

697697
/*
698-
* Now it's a direct ref, put it in the the direct tree. We must
698+
* Now it's a direct ref, put it in the direct tree. We must
699699
* do this last because the ref could be merged/freed here.
700700
*/
701701
prelim_ref_insert(fs_info, &preftrees->direct, ref, NULL);

fs/btrfs/check-integrity.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2327,7 +2327,7 @@ static int btrfsic_check_all_ref_blocks(struct btrfsic_state *state,
23272327
* write operations. Therefore it keeps the linkage
23282328
* information for a block until a block is
23292329
* rewritten. This can temporarily cause incorrect
2330-
* and even circular linkage informations. This
2330+
* and even circular linkage information. This
23312331
* causes no harm unless such blocks are referenced
23322332
* by the most recent super block.
23332333
*/

fs/btrfs/compression.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1203,7 +1203,7 @@ int btrfs_decompress_buf2page(const char *buf, unsigned long buf_start,
12031203
/*
12041204
* Shannon Entropy calculation
12051205
*
1206-
* Pure byte distribution analysis fails to determine compressiability of data.
1206+
* Pure byte distribution analysis fails to determine compressibility of data.
12071207
* Try calculating entropy to estimate the average minimum number of bits
12081208
* needed to encode the sampled data.
12091209
*
@@ -1267,7 +1267,7 @@ static u8 get4bits(u64 num, int shift) {
12671267

12681268
/*
12691269
* Use 4 bits as radix base
1270-
* Use 16 u32 counters for calculating new possition in buf array
1270+
* Use 16 u32 counters for calculating new position in buf array
12711271
*
12721272
* @array - array that will be sorted
12731273
* @array_buf - buffer array to store sorting results

fs/btrfs/ctree.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1414,7 +1414,7 @@ static inline int should_cow_block(struct btrfs_trans_handle *trans,
14141414
*
14151415
* What is forced COW:
14161416
* when we create snapshot during committing the transaction,
1417-
* after we've finished coping src root, we must COW the shared
1417+
* after we've finished copying src root, we must COW the shared
14181418
* block to ensure the metadata consistency.
14191419
*/
14201420
if (btrfs_header_generation(buf) == trans->transid &&
@@ -3771,7 +3771,7 @@ static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
37713771
/* Key greater than all keys in the leaf, right neighbor has
37723772
* enough room for it and we're not emptying our leaf to delete
37733773
* it, therefore use right neighbor to insert the new item and
3774-
* no need to touch/dirty our left leaft. */
3774+
* no need to touch/dirty our left leaf. */
37753775
btrfs_tree_unlock(left);
37763776
free_extent_buffer(left);
37773777
path->nodes[0] = right;

fs/btrfs/dev-replace.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -991,7 +991,7 @@ int btrfs_dev_replace_is_ongoing(struct btrfs_dev_replace *dev_replace)
991991
* something that can happen if the dev_replace
992992
* procedure is suspended by an umount and then
993993
* the tgtdev is missing (or "btrfs dev scan") was
994-
* not called and the the filesystem is remounted
994+
* not called and the filesystem is remounted
995995
* in degraded state. This does not stop the
996996
* dev_replace procedure. It needs to be canceled
997997
* manually if the cancellation is wanted.

fs/btrfs/disk-io.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3100,7 +3100,7 @@ int open_ctree(struct super_block *sb,
31003100

31013101
if (!sb_rdonly(sb) && !btrfs_check_rw_degradable(fs_info, NULL)) {
31023102
btrfs_warn(fs_info,
3103-
"writeable mount is not allowed due to too many missing devices");
3103+
"writable mount is not allowed due to too many missing devices");
31043104
goto fail_sysfs;
31053105
}
31063106

@@ -4077,7 +4077,7 @@ void btrfs_mark_buffer_dirty(struct extent_buffer *buf)
40774077
#ifdef CONFIG_BTRFS_FS_RUN_SANITY_TESTS
40784078
/*
40794079
* This is a fast path so only do this check if we have sanity tests
4080-
* enabled. Normal people shouldn't be using umapped buffers as dirty
4080+
* enabled. Normal people shouldn't be using unmapped buffers as dirty
40814081
* outside of the sanity tests.
40824082
*/
40834083
if (unlikely(test_bit(EXTENT_BUFFER_UNMAPPED, &buf->bflags)))

fs/btrfs/extent-tree.c

Lines changed: 11 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -1055,7 +1055,7 @@ int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
10551055

10561056
/*
10571057
* is_data == BTRFS_REF_TYPE_BLOCK, tree block type is required,
1058-
* is_data == BTRFS_REF_TYPE_DATA, data type is requried,
1058+
* is_data == BTRFS_REF_TYPE_DATA, data type is requiried,
10591059
* is_data == BTRFS_REF_TYPE_ANY, either type is OK.
10601060
*/
10611061
int btrfs_get_extent_inline_ref_type(const struct extent_buffer *eb,
@@ -3705,7 +3705,7 @@ int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans)
37053705
}
37063706
}
37073707

3708-
/* if its not on the io list, we need to put the block group */
3708+
/* if it's not on the io list, we need to put the block group */
37093709
if (should_put)
37103710
btrfs_put_block_group(cache);
37113711
if (drop_reserve)
@@ -4675,7 +4675,7 @@ static int can_overcommit(struct btrfs_fs_info *fs_info,
46754675

46764676
/*
46774677
* If we have dup, raid1 or raid10 then only half of the free
4678-
* space is actually useable. For raid56, the space info used
4678+
* space is actually usable. For raid56, the space info used
46794679
* doesn't include the parity drive, so we don't have to
46804680
* change the math
46814681
*/
@@ -5302,7 +5302,7 @@ static int __reserve_metadata_bytes(struct btrfs_fs_info *fs_info,
53025302
* @orig_bytes - the number of bytes we want
53035303
* @flush - whether or not we can flush to make our reservation
53045304
*
5305-
* This will reserve orgi_bytes number of bytes from the space info associated
5305+
* This will reserve orig_bytes number of bytes from the space info associated
53065306
* with the block_rsv. If there is not enough space it will make an attempt to
53075307
* flush out space to make room. It will do this by flushing delalloc if
53085308
* possible or committing the transaction. If flush is 0 then no attempts to
@@ -5771,11 +5771,11 @@ int btrfs_block_rsv_refill(struct btrfs_root *root,
57715771
/**
57725772
* btrfs_inode_rsv_refill - refill the inode block rsv.
57735773
* @inode - the inode we are refilling.
5774-
* @flush - the flusing restriction.
5774+
* @flush - the flushing restriction.
57755775
*
57765776
* Essentially the same as btrfs_block_rsv_refill, except it uses the
57775777
* block_rsv->size as the minimum size. We'll either refill the missing amount
5778-
* or return if we already have enough space. This will also handle the resreve
5778+
* or return if we already have enough space. This will also handle the reserve
57795779
* tracepoint for the reserved amount.
57805780
*/
57815781
static int btrfs_inode_rsv_refill(struct btrfs_inode *inode,
@@ -8500,7 +8500,7 @@ btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
85008500
buf->log_index = root->log_transid % 2;
85018501
/*
85028502
* we allow two log transactions at a time, use different
8503-
* EXENT bit to differentiate dirty pages.
8503+
* EXTENT bit to differentiate dirty pages.
85048504
*/
85058505
if (buf->log_index == 0)
85068506
set_extent_dirty(&root->dirty_log_pages, buf->start,
@@ -9762,7 +9762,7 @@ void btrfs_dec_block_group_ro(struct btrfs_block_group_cache *cache)
97629762
}
97639763

97649764
/*
9765-
* checks to see if its even possible to relocate this block group.
9765+
* Checks to see if it's even possible to relocate this block group.
97669766
*
97679767
* @return - -1 if it's not a good idea to relocate this block group, 0 if its
97689768
* ok to go ahead and try.
@@ -10390,7 +10390,7 @@ int btrfs_read_block_groups(struct btrfs_fs_info *info)
1039010390
* check for two cases, either we are full, and therefore
1039110391
* don't need to bother with the caching work since we won't
1039210392
* find any space, or we are empty, and we can just add all
10393-
* the space in and be done with it. This saves us _alot_ of
10393+
* the space in and be done with it. This saves us _a_lot_ of
1039410394
* time, particularly in the full case.
1039510395
*/
1039610396
if (found_key.offset == btrfs_block_group_used(&cache->item)) {
@@ -10660,7 +10660,7 @@ int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
1066010660

1066110661
mutex_lock(&trans->transaction->cache_write_mutex);
1066210662
/*
10663-
* make sure our free spache cache IO is done before remove the
10663+
* Make sure our free space cache IO is done before removing the
1066410664
* free space inode
1066510665
*/
1066610666
spin_lock(&trans->transaction->dirty_bgs_lock);
@@ -11177,7 +11177,7 @@ static int btrfs_trim_free_extents(struct btrfs_device *device,
1117711177
if (!blk_queue_discard(bdev_get_queue(device->bdev)))
1117811178
return 0;
1117911179

11180-
/* Not writeable = nothing to do. */
11180+
/* Not writable = nothing to do. */
1118111181
if (!test_bit(BTRFS_DEV_STATE_WRITEABLE, &device->dev_state))
1118211182
return 0;
1118311183

fs/btrfs/extent_io.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -492,7 +492,7 @@ static struct extent_state *next_state(struct extent_state *state)
492492

493493
/*
494494
* utility function to clear some bits in an extent state struct.
495-
* it will optionally wake up any one waiting on this state (wake == 1).
495+
* it will optionally wake up anyone waiting on this state (wake == 1).
496496
*
497497
* If no bits are set on the state struct after clearing things, the
498498
* struct is freed and removed from the tree
@@ -4312,7 +4312,7 @@ static int emit_fiemap_extent(struct fiemap_extent_info *fieinfo,
43124312

43134313
/*
43144314
* Sanity check, extent_fiemap() should have ensured that new
4315-
* fiemap extent won't overlap with cahced one.
4315+
* fiemap extent won't overlap with cached one.
43164316
* Not recoverable.
43174317
*
43184318
* NOTE: Physical address can overlap, due to compression

fs/btrfs/extent_io.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ typedef blk_status_t (extent_submit_bio_start_t)(void *private_data,
9898

9999
struct extent_io_ops {
100100
/*
101-
* The following callbacks must be allways defined, the function
101+
* The following callbacks must be always defined, the function
102102
* pointer will be called unconditionally.
103103
*/
104104
extent_submit_bio_hook_t *submit_bio_hook;

fs/btrfs/extent_map.c

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -475,7 +475,8 @@ static struct extent_map *prev_extent_map(struct extent_map *em)
475475
return container_of(prev, struct extent_map, rb_node);
476476
}
477477

478-
/* helper for btfs_get_extent. Given an existing extent in the tree,
478+
/*
479+
* Helper for btrfs_get_extent. Given an existing extent in the tree,
479480
* the existing extent is the nearest extent to map_start,
480481
* and an extent that you want to insert, deal with overlap and insert
481482
* the best fitted new extent into the tree.

fs/btrfs/file.c

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2005,7 +2005,7 @@ int btrfs_release_file(struct inode *inode, struct file *filp)
20052005
filp->private_data = NULL;
20062006

20072007
/*
2008-
* ordered_data_close is set by settattr when we are about to truncate
2008+
* ordered_data_close is set by setattr when we are about to truncate
20092009
* a file from a non-zero size to a zero size. This tries to
20102010
* flush down new bytes that may have been written if the
20112011
* application were using truncate to replace a file in place.
@@ -2114,7 +2114,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
21142114

21152115
/*
21162116
* We have to do this here to avoid the priority inversion of waiting on
2117-
* IO of a lower priority task while holding a transaciton open.
2117+
* IO of a lower priority task while holding a transaction open.
21182118
*/
21192119
ret = btrfs_wait_ordered_range(inode, start, len);
21202120
if (ret) {
@@ -2154,7 +2154,7 @@ int btrfs_sync_file(struct file *file, loff_t start, loff_t end, int datasync)
21542154
* here we could get into a situation where we're waiting on IO to
21552155
* happen that is blocked on a transaction trying to commit. With start
21562156
* we inc the extwriter counter, so we wait for all extwriters to exit
2157-
* before we start blocking join'ers. This comment is to keep somebody
2157+
* before we start blocking joiners. This comment is to keep somebody
21582158
* from thinking they are super smart and changing this to
21592159
* btrfs_join_transaction *cough*Josef*cough*.
21602160
*/

fs/btrfs/inode.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,7 @@ static void __endio_write_update_ordered(struct inode *inode,
104104

105105
/*
106106
* Cleanup all submitted ordered extents in specified range to handle errors
107-
* from the fill_dellaloc() callback.
107+
* from the btrfs_run_delalloc_range() callback.
108108
*
109109
* NOTE: caller must ensure that when an error happens, it can not call
110110
* extent_clear_unlock_delalloc() to clear both the bits EXTENT_DO_ACCOUNTING
@@ -1842,7 +1842,7 @@ void btrfs_clear_delalloc_extent(struct inode *vfs_inode,
18421842

18431843
/*
18441844
* We don't reserve metadata space for space cache inodes so we
1845-
* don't need to call dellalloc_release_metadata if there is an
1845+
* don't need to call delalloc_release_metadata if there is an
18461846
* error.
18471847
*/
18481848
if (*bits & EXTENT_CLEAR_META_RESV &&
@@ -4516,7 +4516,7 @@ int btrfs_truncate_inode_items(struct btrfs_trans_handle *trans,
45164516
/*
45174517
* This function is also used to drop the items in the log tree before
45184518
* we relog the inode, so if root != BTRFS_I(inode)->root, it means
4519-
* it is used to drop the loged items. So we shouldn't kill the delayed
4519+
* it is used to drop the logged items. So we shouldn't kill the delayed
45204520
* items.
45214521
*/
45224522
if (min_type == 0 && root == BTRFS_I(inode)->root)
@@ -5108,7 +5108,7 @@ static int btrfs_setsize(struct inode *inode, struct iattr *attr)
51085108

51095109
truncate_setsize(inode, newsize);
51105110

5111-
/* Disable nonlocked read DIO to avoid the end less truncate */
5111+
/* Disable nonlocked read DIO to avoid the endless truncate */
51125112
btrfs_inode_block_unlocked_dio(BTRFS_I(inode));
51135113
inode_dio_wait(inode);
51145114
btrfs_inode_resume_unlocked_dio(BTRFS_I(inode));
@@ -8052,7 +8052,7 @@ static void __endio_write_update_ordered(struct inode *inode,
80528052
return;
80538053
/*
80548054
* Our bio might span multiple ordered extents. In this case
8055-
* we keep goin until we have accounted the whole dio.
8055+
* we keep going until we have accounted the whole dio.
80568056
*/
80578057
if (ordered_offset < offset + bytes) {
80588058
ordered_bytes = offset + bytes - ordered_offset;

fs/btrfs/lzo.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
* Records the total size (including the header) of compressed data.
2828
*
2929
* 2. Segment(s)
30-
* Variable size. Each segment includes one segment header, followd by data
30+
* Variable size. Each segment includes one segment header, followed by data
3131
* payload.
3232
* One regular LZO compressed extent can have one or more segments.
3333
* For inlined LZO compressed extent, only one segment is allowed.

fs/btrfs/qgroup.c

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -30,7 +30,7 @@
3030
* - sync
3131
* - copy also limits on subvol creation
3232
* - limit
33-
* - caches fuer ulists
33+
* - caches for ulists
3434
* - performance benchmarks
3535
* - check all ioctl parameters
3636
*/
@@ -522,7 +522,7 @@ void btrfs_free_qgroup_config(struct btrfs_fs_info *fs_info)
522522
__del_qgroup_rb(qgroup);
523523
}
524524
/*
525-
* we call btrfs_free_qgroup_config() when umounting
525+
* We call btrfs_free_qgroup_config() when unmounting
526526
* filesystem and disabling quota, so we set qgroup_ulist
527527
* to be null here to avoid double free.
528528
*/
@@ -1128,7 +1128,7 @@ static void qgroup_dirty(struct btrfs_fs_info *fs_info,
11281128
* The easy accounting, we're updating qgroup relationship whose child qgroup
11291129
* only has exclusive extents.
11301130
*
1131-
* In this case, all exclsuive extents will also be exlusive for parent, so
1131+
* In this case, all exclusive extents will also be exclusive for parent, so
11321132
* excl/rfer just get added/removed.
11331133
*
11341134
* So is qgroup reservation space, which should also be added/removed to
@@ -1755,14 +1755,14 @@ static int adjust_slots_upwards(struct btrfs_path *path, int root_level)
17551755
*
17561756
* 2) Mark the final tree blocks in @src_path and @dst_path qgroup dirty
17571757
* NOTE: In above case, OO(a) and NN(a) won't be marked qgroup dirty.
1758-
* They should be marked during preivous (@dst_level = 1) iteration.
1758+
* They should be marked during previous (@dst_level = 1) iteration.
17591759
*
17601760
* 3) Mark file extents in leaves dirty
17611761
* We don't have good way to pick out new file extents only.
17621762
* So we still follow the old method by scanning all file extents in
17631763
* the leave.
17641764
*
1765-
* This function can free us from keeping two pathes, thus later we only need
1765+
* This function can free us from keeping two paths, thus later we only need
17661766
* to care about how to iterate all new tree blocks in reloc tree.
17671767
*/
17681768
static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
@@ -1901,7 +1901,7 @@ static int qgroup_trace_extent_swap(struct btrfs_trans_handle* trans,
19011901
*
19021902
* We will iterate through tree blocks NN(b), NN(d) and info qgroup to trace
19031903
* above tree blocks along with their counter parts in file tree.
1904-
* While during search, old tree blocsk OO(c) will be skiped as tree block swap
1904+
* While during search, old tree blocks OO(c) will be skipped as tree block swap
19051905
* won't affect OO(c).
19061906
*/
19071907
static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
@@ -2026,7 +2026,7 @@ static int qgroup_trace_new_subtree_blocks(struct btrfs_trans_handle* trans,
20262026
* Will go down the tree block pointed by @dst_eb (pointed by @dst_parent and
20272027
* @dst_slot), and find any tree blocks whose generation is at @last_snapshot,
20282028
* and then go down @src_eb (pointed by @src_parent and @src_slot) to find
2029-
* the conterpart of the tree block, then mark both tree blocks as qgroup dirty,
2029+
* the counterpart of the tree block, then mark both tree blocks as qgroup dirty,
20302030
* and skip all tree blocks whose generation is smaller than last_snapshot.
20312031
*
20322032
* This would skip tons of tree blocks of original btrfs_qgroup_trace_subtree(),

fs/btrfs/qgroup.h

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -81,10 +81,10 @@ enum btrfs_qgroup_rsv_type {
8181
*
8282
* Each type should have different reservation behavior.
8383
* E.g, data follows its io_tree flag modification, while
84-
* *currently* meta is just reserve-and-clear during transcation.
84+
* *currently* meta is just reserve-and-clear during transaction.
8585
*
8686
* TODO: Add new type for reservation which can survive transaction commit.
87-
* Currect metadata reservation behavior is not suitable for such case.
87+
* Current metadata reservation behavior is not suitable for such case.
8888
*/
8989
struct btrfs_qgroup_rsv {
9090
u64 values[BTRFS_QGROUP_RSV_LAST];

fs/btrfs/raid56.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1980,7 +1980,7 @@ static void __raid_recover_end_io(struct btrfs_raid_bio *rbio)
19801980
* - In case of single failure, where rbio->failb == -1:
19811981
*
19821982
* Cache this rbio iff the above read reconstruction is
1983-
* excuted without problems.
1983+
* executed without problems.
19841984
*/
19851985
if (err == BLK_STS_OK && rbio->failb < 0)
19861986
cache_rbio_pages(rbio);

0 commit comments

Comments
 (0)