Skip to content

Commit 8780356

Browse files
djbwIngo Molnar
authored andcommitted
x86/asm/memcpy_mcsafe: Define copy_to_iter_mcsafe()
Use the updated memcpy_mcsafe() implementation to define copy_user_mcsafe() and copy_to_iter_mcsafe(). The most significant difference from typical copy_to_iter() is that the ITER_KVEC and ITER_BVEC iterator types can fail to complete a full transfer. Signed-off-by: Dan Williams <dan.j.williams@intel.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: hch@lst.de Cc: linux-fsdevel@vger.kernel.org Cc: linux-nvdimm@lists.01.org Link: http://lkml.kernel.org/r/152539239150.31796.9189779163576449784.stgit@dwillia2-desk3.amr.corp.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
1 parent 12c8913 commit 8780356

File tree

4 files changed

+88
-0
lines changed

4 files changed

+88
-0
lines changed

arch/x86/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -60,6 +60,7 @@ config X86
6060
select ARCH_HAS_PMEM_API if X86_64
6161
select ARCH_HAS_REFCOUNT
6262
select ARCH_HAS_UACCESS_FLUSHCACHE if X86_64
63+
select ARCH_HAS_UACCESS_MCSAFE if X86_64
6364
select ARCH_HAS_SET_MEMORY
6465
select ARCH_HAS_SG_CHAIN
6566
select ARCH_HAS_STRICT_KERNEL_RWX

arch/x86/include/asm/uaccess_64.h

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -46,6 +46,17 @@ copy_user_generic(void *to, const void *from, unsigned len)
4646
return ret;
4747
}
4848

49+
static __always_inline __must_check unsigned long
50+
copy_to_user_mcsafe(void *to, const void *from, unsigned len)
51+
{
52+
unsigned long ret;
53+
54+
__uaccess_begin();
55+
ret = memcpy_mcsafe(to, from, len);
56+
__uaccess_end();
57+
return ret;
58+
}
59+
4960
static __always_inline __must_check unsigned long
5061
raw_copy_from_user(void *dst, const void __user *src, unsigned long size)
5162
{

include/linux/uio.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,12 @@ size_t _copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i);
154154
#define _copy_from_iter_flushcache _copy_from_iter_nocache
155155
#endif
156156

157+
#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
158+
size_t _copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i);
159+
#else
160+
#define _copy_to_iter_mcsafe _copy_to_iter
161+
#endif
162+
157163
static __always_inline __must_check
158164
size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
159165
{
@@ -163,6 +169,15 @@ size_t copy_from_iter_flushcache(void *addr, size_t bytes, struct iov_iter *i)
163169
return _copy_from_iter_flushcache(addr, bytes, i);
164170
}
165171

172+
static __always_inline __must_check
173+
size_t copy_to_iter_mcsafe(void *addr, size_t bytes, struct iov_iter *i)
174+
{
175+
if (unlikely(!check_copy_size(addr, bytes, false)))
176+
return 0;
177+
else
178+
return _copy_to_iter_mcsafe(addr, bytes, i);
179+
}
180+
166181
size_t iov_iter_zero(size_t bytes, struct iov_iter *);
167182
unsigned long iov_iter_alignment(const struct iov_iter *i);
168183
unsigned long iov_iter_gap_alignment(const struct iov_iter *i);

lib/iov_iter.c

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -573,6 +573,67 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
573573
}
574574
EXPORT_SYMBOL(_copy_to_iter);
575575

576+
#ifdef CONFIG_ARCH_HAS_UACCESS_MCSAFE
577+
static int copyout_mcsafe(void __user *to, const void *from, size_t n)
578+
{
579+
if (access_ok(VERIFY_WRITE, to, n)) {
580+
kasan_check_read(from, n);
581+
n = copy_to_user_mcsafe((__force void *) to, from, n);
582+
}
583+
return n;
584+
}
585+
586+
static unsigned long memcpy_mcsafe_to_page(struct page *page, size_t offset,
587+
const char *from, size_t len)
588+
{
589+
unsigned long ret;
590+
char *to;
591+
592+
to = kmap_atomic(page);
593+
ret = memcpy_mcsafe(to + offset, from, len);
594+
kunmap_atomic(to);
595+
596+
return ret;
597+
}
598+
599+
size_t _copy_to_iter_mcsafe(const void *addr, size_t bytes, struct iov_iter *i)
600+
{
601+
const char *from = addr;
602+
unsigned long rem, curr_addr, s_addr = (unsigned long) addr;
603+
604+
if (unlikely(i->type & ITER_PIPE)) {
605+
WARN_ON(1);
606+
return 0;
607+
}
608+
if (iter_is_iovec(i))
609+
might_fault();
610+
iterate_and_advance(i, bytes, v,
611+
copyout_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len, v.iov_len),
612+
({
613+
rem = memcpy_mcsafe_to_page(v.bv_page, v.bv_offset,
614+
(from += v.bv_len) - v.bv_len, v.bv_len);
615+
if (rem) {
616+
curr_addr = (unsigned long) from;
617+
bytes = curr_addr - s_addr - rem;
618+
return bytes;
619+
}
620+
}),
621+
({
622+
rem = memcpy_mcsafe(v.iov_base, (from += v.iov_len) - v.iov_len,
623+
v.iov_len);
624+
if (rem) {
625+
curr_addr = (unsigned long) from;
626+
bytes = curr_addr - s_addr - rem;
627+
return bytes;
628+
}
629+
})
630+
)
631+
632+
return bytes;
633+
}
634+
EXPORT_SYMBOL_GPL(_copy_to_iter_mcsafe);
635+
#endif /* CONFIG_ARCH_HAS_UACCESS_MCSAFE */
636+
576637
size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i)
577638
{
578639
char *to = addr;

0 commit comments

Comments
 (0)