Skip to content

Commit 937621c

Browse files
liu-song-6shligit
authored andcommitted
md/r5cache: move some code to raid5.h
Move some define and inline functions to raid5.h, so they can be used in raid5-cache.c Signed-off-by: Song Liu <songliubraving@fb.com> Signed-off-by: Shaohua Li <shli@fb.com>
1 parent c757ec9 commit 937621c

File tree

2 files changed

+77
-71
lines changed

2 files changed

+77
-71
lines changed

drivers/md/raid5.c

Lines changed: 0 additions & 71 deletions
Original file line numberDiff line numberDiff line change
@@ -70,19 +70,6 @@ module_param(devices_handle_discard_safely, bool, 0644);
7070
MODULE_PARM_DESC(devices_handle_discard_safely,
7171
"Set to Y if all devices in each array reliably return zeroes on reads from discarded regions");
7272
static struct workqueue_struct *raid5_wq;
73-
/*
74-
* Stripe cache
75-
*/
76-
77-
#define NR_STRIPES 256
78-
#define STRIPE_SIZE PAGE_SIZE
79-
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
80-
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
81-
#define IO_THRESHOLD 1
82-
#define BYPASS_THRESHOLD 1
83-
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
84-
#define HASH_MASK (NR_HASH - 1)
85-
#define MAX_STRIPE_BATCH 8
8673

8774
static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
8875
{
@@ -126,64 +113,6 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
126113
local_irq_enable();
127114
}
128115

129-
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
130-
* order without overlap. There may be several bio's per stripe+device, and
131-
* a bio could span several devices.
132-
* When walking this list for a particular stripe+device, we must never proceed
133-
* beyond a bio that extends past this device, as the next bio might no longer
134-
* be valid.
135-
* This function is used to determine the 'next' bio in the list, given the sector
136-
* of the current stripe+device
137-
*/
138-
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
139-
{
140-
int sectors = bio_sectors(bio);
141-
if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
142-
return bio->bi_next;
143-
else
144-
return NULL;
145-
}
146-
147-
/*
148-
* We maintain a biased count of active stripes in the bottom 16 bits of
149-
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
150-
*/
151-
static inline int raid5_bi_processed_stripes(struct bio *bio)
152-
{
153-
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
154-
return (atomic_read(segments) >> 16) & 0xffff;
155-
}
156-
157-
static inline int raid5_dec_bi_active_stripes(struct bio *bio)
158-
{
159-
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
160-
return atomic_sub_return(1, segments) & 0xffff;
161-
}
162-
163-
static inline void raid5_inc_bi_active_stripes(struct bio *bio)
164-
{
165-
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
166-
atomic_inc(segments);
167-
}
168-
169-
static inline void raid5_set_bi_processed_stripes(struct bio *bio,
170-
unsigned int cnt)
171-
{
172-
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
173-
int old, new;
174-
175-
do {
176-
old = atomic_read(segments);
177-
new = (old & 0xffff) | (cnt << 16);
178-
} while (atomic_cmpxchg(segments, old, new) != old);
179-
}
180-
181-
static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
182-
{
183-
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
184-
atomic_set(segments, cnt);
185-
}
186-
187116
/* Find first data disk in a raid6 stripe */
188117
static inline int raid6_d0(struct stripe_head *sh)
189118
{

drivers/md/raid5.h

Lines changed: 77 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -410,6 +410,83 @@ struct disk_info {
410410
struct md_rdev *rdev, *replacement;
411411
};
412412

413+
/*
414+
* Stripe cache
415+
*/
416+
417+
#define NR_STRIPES 256
418+
#define STRIPE_SIZE PAGE_SIZE
419+
#define STRIPE_SHIFT (PAGE_SHIFT - 9)
420+
#define STRIPE_SECTORS (STRIPE_SIZE>>9)
421+
#define IO_THRESHOLD 1
422+
#define BYPASS_THRESHOLD 1
423+
#define NR_HASH (PAGE_SIZE / sizeof(struct hlist_head))
424+
#define HASH_MASK (NR_HASH - 1)
425+
#define MAX_STRIPE_BATCH 8
426+
427+
/* bio's attached to a stripe+device for I/O are linked together in bi_sector
428+
* order without overlap. There may be several bio's per stripe+device, and
429+
* a bio could span several devices.
430+
* When walking this list for a particular stripe+device, we must never proceed
431+
* beyond a bio that extends past this device, as the next bio might no longer
432+
* be valid.
433+
* This function is used to determine the 'next' bio in the list, given the
434+
* sector of the current stripe+device
435+
*/
436+
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
437+
{
438+
int sectors = bio_sectors(bio);
439+
440+
if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
441+
return bio->bi_next;
442+
else
443+
return NULL;
444+
}
445+
446+
/*
447+
* We maintain a biased count of active stripes in the bottom 16 bits of
448+
* bi_phys_segments, and a count of processed stripes in the upper 16 bits
449+
*/
450+
static inline int raid5_bi_processed_stripes(struct bio *bio)
451+
{
452+
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
453+
454+
return (atomic_read(segments) >> 16) & 0xffff;
455+
}
456+
457+
static inline int raid5_dec_bi_active_stripes(struct bio *bio)
458+
{
459+
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
460+
461+
return atomic_sub_return(1, segments) & 0xffff;
462+
}
463+
464+
static inline void raid5_inc_bi_active_stripes(struct bio *bio)
465+
{
466+
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
467+
468+
atomic_inc(segments);
469+
}
470+
471+
static inline void raid5_set_bi_processed_stripes(struct bio *bio,
472+
unsigned int cnt)
473+
{
474+
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
475+
int old, new;
476+
477+
do {
478+
old = atomic_read(segments);
479+
new = (old & 0xffff) | (cnt << 16);
480+
} while (atomic_cmpxchg(segments, old, new) != old);
481+
}
482+
483+
static inline void raid5_set_bi_stripes(struct bio *bio, unsigned int cnt)
484+
{
485+
atomic_t *segments = (atomic_t *)&bio->bi_phys_segments;
486+
487+
atomic_set(segments, cnt);
488+
}
489+
413490
/* NOTE NR_STRIPE_HASH_LOCKS must remain below 64.
414491
* This is because we sometimes take all the spinlocks
415492
* and creating that much locking depth can cause

0 commit comments

Comments
 (0)