|
97 | 97 | i->iov_offset = skip; \
|
98 | 98 | }
|
99 | 99 |
|
100 |
| -static size_t copy_to_iter_iovec(void *from, size_t bytes, struct iov_iter *i) |
101 |
| -{ |
102 |
| - size_t skip, copy, left, wanted; |
103 |
| - const struct iovec *iov; |
104 |
| - char __user *buf; |
105 |
| - |
106 |
| - if (unlikely(bytes > i->count)) |
107 |
| - bytes = i->count; |
108 |
| - |
109 |
| - if (unlikely(!bytes)) |
110 |
| - return 0; |
111 |
| - |
112 |
| - wanted = bytes; |
113 |
| - iov = i->iov; |
114 |
| - skip = i->iov_offset; |
115 |
| - buf = iov->iov_base + skip; |
116 |
| - copy = min(bytes, iov->iov_len - skip); |
117 |
| - |
118 |
| - left = __copy_to_user(buf, from, copy); |
119 |
| - copy -= left; |
120 |
| - skip += copy; |
121 |
| - from += copy; |
122 |
| - bytes -= copy; |
123 |
| - while (unlikely(!left && bytes)) { |
124 |
| - iov++; |
125 |
| - buf = iov->iov_base; |
126 |
| - copy = min(bytes, iov->iov_len); |
127 |
| - left = __copy_to_user(buf, from, copy); |
128 |
| - copy -= left; |
129 |
| - skip = copy; |
130 |
| - from += copy; |
131 |
| - bytes -= copy; |
132 |
| - } |
133 |
| - |
134 |
| - if (skip == iov->iov_len) { |
135 |
| - iov++; |
136 |
| - skip = 0; |
137 |
| - } |
138 |
| - i->count -= wanted - bytes; |
139 |
| - i->nr_segs -= iov - i->iov; |
140 |
| - i->iov = iov; |
141 |
| - i->iov_offset = skip; |
142 |
| - return wanted - bytes; |
143 |
| -} |
144 |
| - |
145 | 100 | static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
|
146 | 101 | struct iov_iter *i)
|
147 | 102 | {
|
@@ -360,51 +315,23 @@ static void memzero_page(struct page *page, size_t offset, size_t len)
|
360 | 315 | kunmap_atomic(addr);
|
361 | 316 | }
|
362 | 317 |
|
363 |
| -static size_t copy_to_iter_bvec(void *from, size_t bytes, struct iov_iter *i) |
| 318 | +size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i) |
364 | 319 | {
|
365 |
| - size_t skip, copy, wanted; |
366 |
| - const struct bio_vec *bvec; |
367 |
| - |
| 320 | + char *from = addr; |
368 | 321 | if (unlikely(bytes > i->count))
|
369 | 322 | bytes = i->count;
|
370 | 323 |
|
371 | 324 | if (unlikely(!bytes))
|
372 | 325 | return 0;
|
373 | 326 |
|
374 |
| - wanted = bytes; |
375 |
| - bvec = i->bvec; |
376 |
| - skip = i->iov_offset; |
377 |
| - copy = min_t(size_t, bytes, bvec->bv_len - skip); |
378 |
| - |
379 |
| - memcpy_to_page(bvec->bv_page, skip + bvec->bv_offset, from, copy); |
380 |
| - skip += copy; |
381 |
| - from += copy; |
382 |
| - bytes -= copy; |
383 |
| - while (bytes) { |
384 |
| - bvec++; |
385 |
| - copy = min(bytes, (size_t)bvec->bv_len); |
386 |
| - memcpy_to_page(bvec->bv_page, bvec->bv_offset, from, copy); |
387 |
| - skip = copy; |
388 |
| - from += copy; |
389 |
| - bytes -= copy; |
390 |
| - } |
391 |
| - if (skip == bvec->bv_len) { |
392 |
| - bvec++; |
393 |
| - skip = 0; |
394 |
| - } |
395 |
| - i->count -= wanted - bytes; |
396 |
| - i->nr_segs -= bvec - i->bvec; |
397 |
| - i->bvec = bvec; |
398 |
| - i->iov_offset = skip; |
399 |
| - return wanted - bytes; |
400 |
| -} |
| 327 | + iterate_and_advance(i, bytes, v, |
| 328 | + __copy_to_user(v.iov_base, (from += v.iov_len) - v.iov_len, |
| 329 | + v.iov_len), |
| 330 | + memcpy_to_page(v.bv_page, v.bv_offset, |
| 331 | + (from += v.bv_len) - v.bv_len, v.bv_len) |
| 332 | + ) |
401 | 333 |
|
402 |
| -size_t copy_to_iter(void *addr, size_t bytes, struct iov_iter *i) |
403 |
| -{ |
404 |
| - if (i->type & ITER_BVEC) |
405 |
| - return copy_to_iter_bvec(addr, bytes, i); |
406 |
| - else |
407 |
| - return copy_to_iter_iovec(addr, bytes, i); |
| 334 | + return bytes; |
408 | 335 | }
|
409 | 336 | EXPORT_SYMBOL(copy_to_iter);
|
410 | 337 |
|
|
0 commit comments