|
3 | 3 | #include <linux/pagemap.h>
|
4 | 4 | #include <linux/slab.h>
|
5 | 5 | #include <linux/vmalloc.h>
|
| 6 | +#include <net/checksum.h> |
6 | 7 |
|
7 | 8 | #define iterate_iovec(i, n, __v, __p, skip, STEP) { \
|
8 | 9 | size_t left; \
|
@@ -586,6 +587,94 @@ ssize_t iov_iter_get_pages_alloc(struct iov_iter *i,
|
586 | 587 | }
|
587 | 588 | EXPORT_SYMBOL(iov_iter_get_pages_alloc);
|
588 | 589 |
|
| 590 | +size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, |
| 591 | + struct iov_iter *i) |
| 592 | +{ |
| 593 | + char *to = addr; |
| 594 | + __wsum sum, next; |
| 595 | + size_t off = 0; |
| 596 | + if (unlikely(bytes > i->count)) |
| 597 | + bytes = i->count; |
| 598 | + |
| 599 | + if (unlikely(!bytes)) |
| 600 | + return 0; |
| 601 | + |
| 602 | + sum = *csum; |
| 603 | + iterate_and_advance(i, bytes, v, ({ |
| 604 | + int err = 0; |
| 605 | + next = csum_and_copy_from_user(v.iov_base, |
| 606 | + (to += v.iov_len) - v.iov_len, |
| 607 | + v.iov_len, 0, &err); |
| 608 | + if (!err) { |
| 609 | + sum = csum_block_add(sum, next, off); |
| 610 | + off += v.iov_len; |
| 611 | + } |
| 612 | + err ? v.iov_len : 0; |
| 613 | + }), ({ |
| 614 | + char *p = kmap_atomic(v.bv_page); |
| 615 | + next = csum_partial_copy_nocheck(p + v.bv_offset, |
| 616 | + (to += v.bv_len) - v.bv_len, |
| 617 | + v.bv_len, 0); |
| 618 | + kunmap_atomic(p); |
| 619 | + sum = csum_block_add(sum, next, off); |
| 620 | + off += v.bv_len; |
| 621 | + }),({ |
| 622 | + next = csum_partial_copy_nocheck(v.iov_base, |
| 623 | + (to += v.iov_len) - v.iov_len, |
| 624 | + v.iov_len, 0); |
| 625 | + sum = csum_block_add(sum, next, off); |
| 626 | + off += v.iov_len; |
| 627 | + }) |
| 628 | + ) |
| 629 | + *csum = sum; |
| 630 | + return bytes; |
| 631 | +} |
| 632 | +EXPORT_SYMBOL(csum_and_copy_from_iter); |
| 633 | + |
| 634 | +size_t csum_and_copy_to_iter(void *addr, size_t bytes, __wsum *csum, |
| 635 | + struct iov_iter *i) |
| 636 | +{ |
| 637 | + char *from = addr; |
| 638 | + __wsum sum, next; |
| 639 | + size_t off = 0; |
| 640 | + if (unlikely(bytes > i->count)) |
| 641 | + bytes = i->count; |
| 642 | + |
| 643 | + if (unlikely(!bytes)) |
| 644 | + return 0; |
| 645 | + |
| 646 | + sum = *csum; |
| 647 | + iterate_and_advance(i, bytes, v, ({ |
| 648 | + int err = 0; |
| 649 | + next = csum_and_copy_to_user((from += v.iov_len) - v.iov_len, |
| 650 | + v.iov_base, |
| 651 | + v.iov_len, 0, &err); |
| 652 | + if (!err) { |
| 653 | + sum = csum_block_add(sum, next, off); |
| 654 | + off += v.iov_len; |
| 655 | + } |
| 656 | + err ? v.iov_len : 0; |
| 657 | + }), ({ |
| 658 | + char *p = kmap_atomic(v.bv_page); |
| 659 | + next = csum_partial_copy_nocheck((from += v.bv_len) - v.bv_len, |
| 660 | + p + v.bv_offset, |
| 661 | + v.bv_len, 0); |
| 662 | + kunmap_atomic(p); |
| 663 | + sum = csum_block_add(sum, next, off); |
| 664 | + off += v.bv_len; |
| 665 | + }),({ |
| 666 | + next = csum_partial_copy_nocheck((from += v.iov_len) - v.iov_len, |
| 667 | + v.iov_base, |
| 668 | + v.iov_len, 0); |
| 669 | + sum = csum_block_add(sum, next, off); |
| 670 | + off += v.iov_len; |
| 671 | + }) |
| 672 | + ) |
| 673 | + *csum = sum; |
| 674 | + return bytes; |
| 675 | +} |
| 676 | +EXPORT_SYMBOL(csum_and_copy_to_iter); |
| 677 | + |
589 | 678 | int iov_iter_npages(const struct iov_iter *i, int maxpages)
|
590 | 679 | {
|
591 | 680 | size_t size = i->count;
|
|
0 commit comments