32
32
n = wanted - n; \
33
33
}
34
34
35
+ #define iterate_kvec (i , n , __v , __p , skip , STEP ) { \
36
+ size_t wanted = n; \
37
+ __p = i->kvec; \
38
+ __v.iov_len = min(n, __p->iov_len - skip); \
39
+ if (likely(__v.iov_len)) { \
40
+ __v.iov_base = __p->iov_base + skip; \
41
+ (void)(STEP); \
42
+ skip += __v.iov_len; \
43
+ n -= __v.iov_len; \
44
+ } \
45
+ while (unlikely(n)) { \
46
+ __p++; \
47
+ __v.iov_len = min(n, __p->iov_len); \
48
+ if (unlikely(!__v.iov_len)) \
49
+ continue; \
50
+ __v.iov_base = __p->iov_base; \
51
+ (void)(STEP); \
52
+ skip = __v.iov_len; \
53
+ n -= __v.iov_len; \
54
+ } \
55
+ n = wanted; \
56
+ }
57
+
35
58
#define iterate_bvec (i , n , __v , __p , skip , STEP ) { \
36
59
size_t wanted = n; \
37
60
__p = i->bvec; \
57
80
n = wanted; \
58
81
}
59
82
60
- #define iterate_all_kinds (i , n , v , I , B ) { \
83
+ #define iterate_all_kinds (i , n , v , I , B , K ) { \
61
84
size_t skip = i->iov_offset; \
62
85
if (unlikely(i->type & ITER_BVEC)) { \
63
86
const struct bio_vec *bvec; \
64
87
struct bio_vec v; \
65
88
iterate_bvec(i, n, v, bvec, skip, (B)) \
89
+ } else if (unlikely(i->type & ITER_KVEC)) { \
90
+ const struct kvec *kvec; \
91
+ struct kvec v; \
92
+ iterate_kvec(i, n, v, kvec, skip, (K)) \
66
93
} else { \
67
94
const struct iovec *iov; \
68
95
struct iovec v; \
69
96
iterate_iovec(i, n, v, iov, skip, (I)) \
70
97
} \
71
98
}
72
99
73
- #define iterate_and_advance (i , n , v , I , B ) { \
100
+ #define iterate_and_advance (i , n , v , I , B , K ) { \
74
101
size_t skip = i->iov_offset; \
75
102
if (unlikely(i->type & ITER_BVEC)) { \
76
103
const struct bio_vec *bvec; \
82
109
} \
83
110
i->nr_segs -= bvec - i->bvec; \
84
111
i->bvec = bvec; \
112
+ } else if (unlikely(i->type & ITER_KVEC)) { \
113
+ const struct kvec *kvec; \
114
+ struct kvec v; \
115
+ iterate_kvec(i, n, v, kvec, skip, (K)) \
116
+ if (skip == kvec->iov_len) { \
117
+ kvec++; \
118
+ skip = 0; \
119
+ } \
120
+ i->nr_segs -= kvec - i->kvec; \
121
+ i->kvec = kvec; \
85
122
} else { \
86
123
const struct iovec *iov; \
87
124
struct iovec v; \
@@ -270,7 +307,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
270
307
*/
271
308
int iov_iter_fault_in_readable (struct iov_iter * i , size_t bytes )
272
309
{
273
- if (!(i -> type & ITER_BVEC )) {
310
+ if (!(i -> type & ( ITER_BVEC | ITER_KVEC ) )) {
274
311
char __user * buf = i -> iov -> iov_base + i -> iov_offset ;
275
312
bytes = min (bytes , i -> iov -> iov_len - i -> iov_offset );
276
313
return fault_in_pages_readable (buf , bytes );
@@ -284,10 +321,14 @@ void iov_iter_init(struct iov_iter *i, int direction,
284
321
size_t count )
285
322
{
286
323
/* It will get better. Eventually... */
287
- if (segment_eq (get_fs (), KERNEL_DS ))
324
+ if (segment_eq (get_fs (), KERNEL_DS )) {
288
325
direction |= ITER_KVEC ;
289
- i -> type = direction ;
290
- i -> iov = iov ;
326
+ i -> type = direction ;
327
+ i -> kvec = (struct kvec * )iov ;
328
+ } else {
329
+ i -> type = direction ;
330
+ i -> iov = iov ;
331
+ }
291
332
i -> nr_segs = nr_segs ;
292
333
i -> iov_offset = 0 ;
293
334
i -> count = count ;
@@ -328,7 +369,8 @@ size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
328
369
__copy_to_user (v .iov_base , (from += v .iov_len ) - v .iov_len ,
329
370
v .iov_len ),
330
371
memcpy_to_page (v .bv_page , v .bv_offset ,
331
- (from += v .bv_len ) - v .bv_len , v .bv_len )
372
+ (from += v .bv_len ) - v .bv_len , v .bv_len ),
373
+ memcpy (v .iov_base , (from += v .iov_len ) - v .iov_len , v .iov_len )
332
374
)
333
375
334
376
return bytes ;
@@ -348,7 +390,8 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
348
390
__copy_from_user ((to += v .iov_len ) - v .iov_len , v .iov_base ,
349
391
v .iov_len ),
350
392
memcpy_from_page ((to += v .bv_len ) - v .bv_len , v .bv_page ,
351
- v .bv_offset , v .bv_len )
393
+ v .bv_offset , v .bv_len ),
394
+ memcpy ((to += v .iov_len ) - v .iov_len , v .iov_base , v .iov_len )
352
395
)
353
396
354
397
return bytes ;
@@ -371,7 +414,7 @@ EXPORT_SYMBOL(copy_page_to_iter);
371
414
size_t copy_page_from_iter (struct page * page , size_t offset , size_t bytes ,
372
415
struct iov_iter * i )
373
416
{
374
- if (i -> type & ITER_BVEC ) {
417
+ if (i -> type & ( ITER_BVEC | ITER_KVEC ) ) {
375
418
void * kaddr = kmap_atomic (page );
376
419
size_t wanted = copy_from_iter (kaddr + offset , bytes , i );
377
420
kunmap_atomic (kaddr );
@@ -391,7 +434,8 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
391
434
392
435
iterate_and_advance (i , bytes , v ,
393
436
__clear_user (v .iov_base , v .iov_len ),
394
- memzero_page (v .bv_page , v .bv_offset , v .bv_len )
437
+ memzero_page (v .bv_page , v .bv_offset , v .bv_len ),
438
+ memset (v .iov_base , 0 , v .iov_len )
395
439
)
396
440
397
441
return bytes ;
@@ -406,7 +450,8 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
406
450
__copy_from_user_inatomic ((p += v .iov_len ) - v .iov_len ,
407
451
v .iov_base , v .iov_len ),
408
452
memcpy_from_page ((p += v .bv_len ) - v .bv_len , v .bv_page ,
409
- v .bv_offset , v .bv_len )
453
+ v .bv_offset , v .bv_len ),
454
+ memcpy ((p += v .iov_len ) - v .iov_len , v .iov_base , v .iov_len )
410
455
)
411
456
kunmap_atomic (kaddr );
412
457
return bytes ;
@@ -415,7 +460,7 @@ EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
415
460
416
461
void iov_iter_advance (struct iov_iter * i , size_t size )
417
462
{
418
- iterate_and_advance (i , size , v , 0 , 0 )
463
+ iterate_and_advance (i , size , v , 0 , 0 , 0 )
419
464
}
420
465
EXPORT_SYMBOL (iov_iter_advance );
421
466
@@ -443,7 +488,8 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
443
488
444
489
iterate_all_kinds (i , size , v ,
445
490
(res |= (unsigned long )v .iov_base | v .iov_len , 0 ),
446
- res |= v .bv_offset | v .bv_len
491
+ res |= v .bv_offset | v .bv_len ,
492
+ res |= (unsigned long )v .iov_base | v .iov_len
447
493
)
448
494
return res ;
449
495
}
@@ -478,6 +524,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
478
524
* start = v .bv_offset ;
479
525
get_page (* pages = v .bv_page );
480
526
return v .bv_len ;
527
+ }),({
528
+ return - EFAULT ;
481
529
})
482
530
)
483
531
return 0 ;
@@ -530,6 +578,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
530
578
return - ENOMEM ;
531
579
get_page (* p = v .bv_page );
532
580
return v .bv_len ;
581
+ }),({
582
+ return - EFAULT ;
533
583
})
534
584
)
535
585
return 0 ;
@@ -554,6 +604,12 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
554
604
npages ++ ;
555
605
if (npages >= maxpages )
556
606
return maxpages ;
607
+ }),({
608
+ unsigned long p = (unsigned long )v .iov_base ;
609
+ npages += DIV_ROUND_UP (p + v .iov_len , PAGE_SIZE )
610
+ - p / PAGE_SIZE ;
611
+ if (npages >= maxpages )
612
+ return maxpages ;
557
613
})
558
614
)
559
615
return npages ;
0 commit comments