Skip to content

Commit a280455

Browse files
author
Al Viro
committed
iov_iter.c: handle ITER_KVEC directly
... without bothering with copy_..._user() Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
1 parent 3d4d3e4 commit a280455

File tree

2 files changed

+70
-13
lines changed

2 files changed

+70
-13
lines changed

include/linux/uio.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@ struct iov_iter {
3131
size_t count;
3232
union {
3333
const struct iovec *iov;
34+
const struct kvec *kvec;
3435
const struct bio_vec *bvec;
3536
};
3637
unsigned long nr_segs;

mm/iov_iter.c

Lines changed: 69 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -32,6 +32,29 @@
3232
n = wanted - n; \
3333
}
3434

35+
#define iterate_kvec(i, n, __v, __p, skip, STEP) { \
36+
size_t wanted = n; \
37+
__p = i->kvec; \
38+
__v.iov_len = min(n, __p->iov_len - skip); \
39+
if (likely(__v.iov_len)) { \
40+
__v.iov_base = __p->iov_base + skip; \
41+
(void)(STEP); \
42+
skip += __v.iov_len; \
43+
n -= __v.iov_len; \
44+
} \
45+
while (unlikely(n)) { \
46+
__p++; \
47+
__v.iov_len = min(n, __p->iov_len); \
48+
if (unlikely(!__v.iov_len)) \
49+
continue; \
50+
__v.iov_base = __p->iov_base; \
51+
(void)(STEP); \
52+
skip = __v.iov_len; \
53+
n -= __v.iov_len; \
54+
} \
55+
n = wanted; \
56+
}
57+
3558
#define iterate_bvec(i, n, __v, __p, skip, STEP) { \
3659
size_t wanted = n; \
3760
__p = i->bvec; \
@@ -57,20 +80,24 @@
5780
n = wanted; \
5881
}
5982

60-
#define iterate_all_kinds(i, n, v, I, B) { \
83+
#define iterate_all_kinds(i, n, v, I, B, K) { \
6184
size_t skip = i->iov_offset; \
6285
if (unlikely(i->type & ITER_BVEC)) { \
6386
const struct bio_vec *bvec; \
6487
struct bio_vec v; \
6588
iterate_bvec(i, n, v, bvec, skip, (B)) \
89+
} else if (unlikely(i->type & ITER_KVEC)) { \
90+
const struct kvec *kvec; \
91+
struct kvec v; \
92+
iterate_kvec(i, n, v, kvec, skip, (K)) \
6693
} else { \
6794
const struct iovec *iov; \
6895
struct iovec v; \
6996
iterate_iovec(i, n, v, iov, skip, (I)) \
7097
} \
7198
}
7299

73-
#define iterate_and_advance(i, n, v, I, B) { \
100+
#define iterate_and_advance(i, n, v, I, B, K) { \
74101
size_t skip = i->iov_offset; \
75102
if (unlikely(i->type & ITER_BVEC)) { \
76103
const struct bio_vec *bvec; \
@@ -82,6 +109,16 @@
82109
} \
83110
i->nr_segs -= bvec - i->bvec; \
84111
i->bvec = bvec; \
112+
} else if (unlikely(i->type & ITER_KVEC)) { \
113+
const struct kvec *kvec; \
114+
struct kvec v; \
115+
iterate_kvec(i, n, v, kvec, skip, (K)) \
116+
if (skip == kvec->iov_len) { \
117+
kvec++; \
118+
skip = 0; \
119+
} \
120+
i->nr_segs -= kvec - i->kvec; \
121+
i->kvec = kvec; \
85122
} else { \
86123
const struct iovec *iov; \
87124
struct iovec v; \
@@ -270,7 +307,7 @@ static size_t copy_page_from_iter_iovec(struct page *page, size_t offset, size_t
270307
*/
271308
int iov_iter_fault_in_readable(struct iov_iter *i, size_t bytes)
272309
{
273-
if (!(i->type & ITER_BVEC)) {
310+
if (!(i->type & (ITER_BVEC|ITER_KVEC))) {
274311
char __user *buf = i->iov->iov_base + i->iov_offset;
275312
bytes = min(bytes, i->iov->iov_len - i->iov_offset);
276313
return fault_in_pages_readable(buf, bytes);
@@ -284,10 +321,14 @@ void iov_iter_init(struct iov_iter *i, int direction,
284321
size_t count)
285322
{
286323
/* It will get better. Eventually... */
287-
if (segment_eq(get_fs(), KERNEL_DS))
324+
if (segment_eq(get_fs(), KERNEL_DS)) {
288325
direction |= ITER_KVEC;
289-
i->type = direction;
290-
i->iov = iov;
326+
i->type = direction;
327+
i->kvec = (struct kvec *)iov;
328+
} else {
329+
i->type = direction;
330+
i->iov = iov;
331+
}
291332
i->nr_segs = nr_segs;
292333
i->iov_offset = 0;
293334
i->count = count;
@@ -328,7 +369,8 @@ size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i)
328369
__copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len,
329370
v.iov_len),
330371
memcpy_to_page(v.bv_page, v.bv_offset,
331-
(from += v.bv_len) - v.bv_len, v.bv_len)
372+
(from += v.bv_len) - v.bv_len, v.bv_len),
373+
memcpy(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len)
332374
)
333375

334376
return bytes;
@@ -348,7 +390,8 @@ size_t copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
348390
__copy_from_user((to += v.iov_len) - v.iov_len, v.iov_base,
349391
v.iov_len),
350392
memcpy_from_page((to += v.bv_len) - v.bv_len, v.bv_page,
351-
v.bv_offset, v.bv_len)
393+
v.bv_offset, v.bv_len),
394+
memcpy((to += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
352395
)
353396

354397
return bytes;
@@ -371,7 +414,7 @@ EXPORT_SYMBOL(copy_page_to_iter);
371414
size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
372415
struct iov_iter *i)
373416
{
374-
if (i->type & ITER_BVEC) {
417+
if (i->type & (ITER_BVEC|ITER_KVEC)) {
375418
void *kaddr = kmap_atomic(page);
376419
size_t wanted = copy_from_iter(kaddr + offset, bytes, i);
377420
kunmap_atomic(kaddr);
@@ -391,7 +434,8 @@ size_t iov_iter_zero(size_t bytes, struct iov_iter *i)
391434

392435
iterate_and_advance(i, bytes, v,
393436
__clear_user(v.iov_base, v.iov_len),
394-
memzero_page(v.bv_page, v.bv_offset, v.bv_len)
437+
memzero_page(v.bv_page, v.bv_offset, v.bv_len),
438+
memset(v.iov_base, 0, v.iov_len)
395439
)
396440

397441
return bytes;
@@ -406,7 +450,8 @@ size_t iov_iter_copy_from_user_atomic(struct page *page,
406450
__copy_from_user_inatomic((p += v.iov_len) - v.iov_len,
407451
v.iov_base, v.iov_len),
408452
memcpy_from_page((p += v.bv_len) - v.bv_len, v.bv_page,
409-
v.bv_offset, v.bv_len)
453+
v.bv_offset, v.bv_len),
454+
memcpy((p += v.iov_len) - v.iov_len, v.iov_base, v.iov_len)
410455
)
411456
kunmap_atomic(kaddr);
412457
return bytes;
@@ -415,7 +460,7 @@ EXPORT_SYMBOL(iov_iter_copy_from_user_atomic);
415460

416461
void iov_iter_advance(struct iov_iter *i, size_t size)
417462
{
418-
iterate_and_advance(i, size, v, 0, 0)
463+
iterate_and_advance(i, size, v, 0, 0, 0)
419464
}
420465
EXPORT_SYMBOL(iov_iter_advance);
421466

@@ -443,7 +488,8 @@ unsigned long iov_iter_alignment(const struct iov_iter *i)
443488

444489
iterate_all_kinds(i, size, v,
445490
(res |= (unsigned long)v.iov_base | v.iov_len, 0),
446-
res |= v.bv_offset | v.bv_len
491+
res |= v.bv_offset | v.bv_len,
492+
res |= (unsigned long)v.iov_base | v.iov_len
447493
)
448494
return res;
449495
}
@@ -478,6 +524,8 @@ ssize_t iov_iter_get_pages(struct iov_iter *i,
478524
*start = v.bv_offset;
479525
get_page(*pages = v.bv_page);
480526
return v.bv_len;
527+
}),({
528+
return -EFAULT;
481529
})
482530
)
483531
return 0;
@@ -530,6 +578,8 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
530578
return -ENOMEM;
531579
get_page(*p = v.bv_page);
532580
return v.bv_len;
581+
}),({
582+
return -EFAULT;
533583
})
534584
)
535585
return 0;
@@ -554,6 +604,12 @@ int iov_iter_npages(const struct iov_iter *i, int maxpages)
554604
npages++;
555605
if (npages >= maxpages)
556606
return maxpages;
607+
}),({
608+
unsigned long p = (unsigned long)v.iov_base;
609+
npages += DIV_ROUND_UP(p + v.iov_len, PAGE_SIZE)
610+
- p / PAGE_SIZE;
611+
if (npages >= maxpages)
612+
return maxpages;
557613
})
558614
)
559615
return npages;

0 commit comments

Comments
 (0)