54
54
#include <linux/slab.h>
55
55
#include <linux/ratelimit.h>
56
56
#include <linux/nodemask.h>
57
- #include <linux/flex_array.h>
58
57
59
58
#include <trace/events/block.h>
60
59
#include <linux/list_sort.h>
@@ -1394,22 +1393,16 @@ static void ops_complete_compute(void *stripe_head_ref)
1394
1393
}
1395
1394
1396
1395
/* return a pointer to the address conversion region of the scribble buffer */
1397
- static addr_conv_t * to_addr_conv (struct stripe_head * sh ,
1398
- struct raid5_percpu * percpu , int i )
1396
+ static struct page * * to_addr_page (struct raid5_percpu * percpu , int i )
1399
1397
{
1400
- void * addr ;
1401
-
1402
- addr = flex_array_get (percpu -> scribble , i );
1403
- return addr + sizeof (struct page * ) * (sh -> disks + 2 );
1398
+ return percpu -> scribble + i * percpu -> scribble_obj_size ;
1404
1399
}
1405
1400
1406
1401
/* return a pointer to the address conversion region of the scribble buffer */
1407
- static struct page * * to_addr_page (struct raid5_percpu * percpu , int i )
1402
+ static addr_conv_t * to_addr_conv (struct stripe_head * sh ,
1403
+ struct raid5_percpu * percpu , int i )
1408
1404
{
1409
- void * addr ;
1410
-
1411
- addr = flex_array_get (percpu -> scribble , i );
1412
- return addr ;
1405
+ return (void * ) (to_addr_page (percpu , i ) + sh -> disks + 2 );
1413
1406
}
1414
1407
1415
1408
static struct dma_async_tx_descriptor *
@@ -2238,21 +2231,23 @@ static int grow_stripes(struct r5conf *conf, int num)
2238
2231
* calculate over all devices (not just the data blocks), using zeros in place
2239
2232
* of the P and Q blocks.
2240
2233
*/
2241
- static struct flex_array * scribble_alloc (int num , int cnt , gfp_t flags )
2234
+ static int scribble_alloc (struct raid5_percpu * percpu ,
2235
+ int num , int cnt , gfp_t flags )
2242
2236
{
2243
- struct flex_array * ret ;
2244
- size_t len ;
2237
+ size_t obj_size =
2238
+ sizeof (struct page * ) * (num + 2 ) +
2239
+ sizeof (addr_conv_t ) * (num + 2 );
2240
+ void * scribble ;
2245
2241
2246
- len = sizeof (struct page * ) * (num + 2 ) + sizeof (addr_conv_t ) * (num + 2 );
2247
- ret = flex_array_alloc (len , cnt , flags );
2248
- if (!ret )
2249
- return NULL ;
2250
- /* always prealloc all elements, so no locking is required */
2251
- if (flex_array_prealloc (ret , 0 , cnt , flags )) {
2252
- flex_array_free (ret );
2253
- return NULL ;
2254
- }
2255
- return ret ;
2242
+ scribble = kvmalloc_array (cnt , obj_size , flags );
2243
+ if (!scribble )
2244
+ return - ENOMEM ;
2245
+
2246
+ kvfree (percpu -> scribble );
2247
+
2248
+ percpu -> scribble = scribble ;
2249
+ percpu -> scribble_obj_size = obj_size ;
2250
+ return 0 ;
2256
2251
}
2257
2252
2258
2253
static int resize_chunks (struct r5conf * conf , int new_disks , int new_sectors )
@@ -2270,23 +2265,18 @@ static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
2270
2265
return 0 ;
2271
2266
mddev_suspend (conf -> mddev );
2272
2267
get_online_cpus ();
2268
+
2273
2269
for_each_present_cpu (cpu ) {
2274
2270
struct raid5_percpu * percpu ;
2275
- struct flex_array * scribble ;
2276
2271
2277
2272
percpu = per_cpu_ptr (conf -> percpu , cpu );
2278
- scribble = scribble_alloc (new_disks ,
2279
- new_sectors / STRIPE_SECTORS ,
2280
- GFP_NOIO );
2281
-
2282
- if (scribble ) {
2283
- flex_array_free (percpu -> scribble );
2284
- percpu -> scribble = scribble ;
2285
- } else {
2286
- err = - ENOMEM ;
2273
+ err = scribble_alloc (percpu , new_disks ,
2274
+ new_sectors / STRIPE_SECTORS ,
2275
+ GFP_NOIO );
2276
+ if (err )
2287
2277
break ;
2288
- }
2289
2278
}
2279
+
2290
2280
put_online_cpus ();
2291
2281
mddev_resume (conf -> mddev );
2292
2282
if (!err ) {
@@ -6742,25 +6732,26 @@ raid5_size(struct mddev *mddev, sector_t sectors, int raid_disks)
6742
6732
static void free_scratch_buffer (struct r5conf * conf , struct raid5_percpu * percpu )
6743
6733
{
6744
6734
safe_put_page (percpu -> spare_page );
6745
- if (percpu -> scribble )
6746
- flex_array_free (percpu -> scribble );
6747
6735
percpu -> spare_page = NULL ;
6736
+ kvfree (percpu -> scribble );
6748
6737
percpu -> scribble = NULL ;
6749
6738
}
6750
6739
6751
6740
static int alloc_scratch_buffer (struct r5conf * conf , struct raid5_percpu * percpu )
6752
6741
{
6753
- if (conf -> level == 6 && !percpu -> spare_page )
6742
+ if (conf -> level == 6 && !percpu -> spare_page ) {
6754
6743
percpu -> spare_page = alloc_page (GFP_KERNEL );
6755
- if (!percpu -> scribble )
6756
- percpu -> scribble = scribble_alloc (max (conf -> raid_disks ,
6757
- conf -> previous_raid_disks ),
6758
- max (conf -> chunk_sectors ,
6759
- conf -> prev_chunk_sectors )
6760
- / STRIPE_SECTORS ,
6761
- GFP_KERNEL );
6762
-
6763
- if (!percpu -> scribble || (conf -> level == 6 && !percpu -> spare_page )) {
6744
+ if (!percpu -> spare_page )
6745
+ return - ENOMEM ;
6746
+ }
6747
+
6748
+ if (scribble_alloc (percpu ,
6749
+ max (conf -> raid_disks ,
6750
+ conf -> previous_raid_disks ),
6751
+ max (conf -> chunk_sectors ,
6752
+ conf -> prev_chunk_sectors )
6753
+ / STRIPE_SECTORS ,
6754
+ GFP_KERNEL )) {
6764
6755
free_scratch_buffer (conf , percpu );
6765
6756
return - ENOMEM ;
6766
6757
}
0 commit comments