Skip to content

Commit 5de490d

Browse files
Ross Zwislerdjbw
authored andcommitted
pmem: add copy_from_iter_pmem() and clear_pmem()
Add support for two new PMEM APIs, copy_from_iter_pmem() and clear_pmem(). copy_from_iter_pmem() is used to copy data from an iterator into a PMEM buffer. clear_pmem() zeros a PMEM memory range. Both of these new APIs must be explicitly ordered using a wmb_pmem() function call and are implemented in such a way that the wmb_pmem() will make the stores to PMEM durable. Because both APIs are unordered they can be called as needed without introducing any unwanted memory barriers. Signed-off-by: Ross Zwisler <ross.zwisler@linux.intel.com> Signed-off-by: Dan Williams <dan.j.williams@intel.com>
1 parent 4a370df commit 5de490d

File tree

2 files changed

+137
-2
lines changed

2 files changed

+137
-2
lines changed

arch/x86/include/asm/pmem.h

Lines changed: 75 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,81 @@ static inline void arch_wmb_pmem(void)
6666
pcommit_sfence();
6767
}
6868

69+
/**
70+
* __arch_wb_cache_pmem - write back a cache range with CLWB
71+
* @vaddr: virtual start address
72+
* @size: number of bytes to write back
73+
*
74+
* Write back a cache range using the CLWB (cache line write back)
75+
* instruction. This function requires explicit ordering with an
76+
* arch_wmb_pmem() call. This API is internal to the x86 PMEM implementation.
77+
*/
78+
static inline void __arch_wb_cache_pmem(void *vaddr, size_t size)
79+
{
80+
u16 x86_clflush_size = boot_cpu_data.x86_clflush_size;
81+
unsigned long clflush_mask = x86_clflush_size - 1;
82+
void *vend = vaddr + size;
83+
void *p;
84+
85+
for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
86+
p < vend; p += x86_clflush_size)
87+
clwb(p);
88+
}
89+
90+
/*
91+
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
92+
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
93+
*/
94+
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
95+
{
96+
return iter_is_iovec(i) == false;
97+
}
98+
99+
/**
100+
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
101+
* @addr: PMEM destination address
102+
* @bytes: number of bytes to copy
103+
* @i: iterator with source data
104+
*
105+
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
106+
* This function requires explicit ordering with an arch_wmb_pmem() call.
107+
*/
108+
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
109+
struct iov_iter *i)
110+
{
111+
void *vaddr = (void __force *)addr;
112+
size_t len;
113+
114+
/* TODO: skip the write-back by always using non-temporal stores */
115+
len = copy_from_iter_nocache(vaddr, bytes, i);
116+
117+
if (__iter_needs_pmem_wb(i))
118+
__arch_wb_cache_pmem(vaddr, bytes);
119+
120+
return len;
121+
}
122+
123+
/**
124+
* arch_clear_pmem - zero a PMEM memory range
125+
* @addr: virtual start address
126+
* @size: number of bytes to zero
127+
*
128+
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
129+
* This function requires explicit ordering with an arch_wmb_pmem() call.
130+
*/
131+
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
132+
{
133+
void *vaddr = (void __force *)addr;
134+
135+
/* TODO: implement the zeroing via non-temporal writes */
136+
if (size == PAGE_SIZE && ((unsigned long)vaddr & ~PAGE_MASK) == 0)
137+
clear_page(vaddr);
138+
else
139+
memset(vaddr, 0, size);
140+
141+
__arch_wb_cache_pmem(vaddr, size);
142+
}
143+
69144
static inline bool arch_has_wmb_pmem(void)
70145
{
71146
#ifdef CONFIG_X86_64

include/linux/pmem.h

Lines changed: 62 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -14,6 +14,7 @@
1414
#define __PMEM_H__
1515

1616
#include <linux/io.h>
17+
#include <linux/uio.h>
1718

1819
#ifdef CONFIG_ARCH_HAS_PMEM_API
1920
#include <asm/pmem.h>
@@ -33,12 +34,24 @@ static inline void arch_memcpy_to_pmem(void __pmem *dst, const void *src,
3334
{
3435
BUG();
3536
}
37+
38+
static inline size_t arch_copy_from_iter_pmem(void __pmem *addr, size_t bytes,
39+
struct iov_iter *i)
40+
{
41+
BUG();
42+
return 0;
43+
}
44+
45+
static inline void arch_clear_pmem(void __pmem *addr, size_t size)
46+
{
47+
BUG();
48+
}
3649
#endif
3750

3851
/*
3952
* Architectures that define ARCH_HAS_PMEM_API must provide
40-
* implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(), and
41-
* arch_has_wmb_pmem().
53+
* implementations for arch_memcpy_to_pmem(), arch_wmb_pmem(),
54+
* arch_copy_from_iter_pmem(), arch_clear_pmem() and arch_has_wmb_pmem().
4255
*/
4356

4457
static inline void memcpy_from_pmem(void *dst, void __pmem const *src, size_t size)
@@ -78,6 +91,20 @@ static inline void default_memcpy_to_pmem(void __pmem *dst, const void *src,
7891
memcpy((void __force *) dst, src, size);
7992
}
8093

94+
static inline size_t default_copy_from_iter_pmem(void __pmem *addr,
95+
size_t bytes, struct iov_iter *i)
96+
{
97+
return copy_from_iter_nocache((void __force *)addr, bytes, i);
98+
}
99+
100+
static inline void default_clear_pmem(void __pmem *addr, size_t size)
101+
{
102+
if (size == PAGE_SIZE && ((unsigned long)addr & ~PAGE_MASK) == 0)
103+
clear_page((void __force *)addr);
104+
else
105+
memset((void __force *)addr, 0, size);
106+
}
107+
81108
/**
82109
* memremap_pmem - map physical persistent memory for pmem api
83110
* @offset: physical address of persistent memory
@@ -134,4 +161,37 @@ static inline void wmb_pmem(void)
134161
if (arch_has_pmem_api())
135162
arch_wmb_pmem();
136163
}
164+
165+
/**
166+
* copy_from_iter_pmem - copy data from an iterator to PMEM
167+
* @addr: PMEM destination address
168+
* @bytes: number of bytes to copy
169+
* @i: iterator with source data
170+
*
171+
* Copy data from the iterator 'i' to the PMEM buffer starting at 'addr'.
172+
* This function requires explicit ordering with a wmb_pmem() call.
173+
*/
174+
static inline size_t copy_from_iter_pmem(void __pmem *addr, size_t bytes,
175+
struct iov_iter *i)
176+
{
177+
if (arch_has_pmem_api())
178+
return arch_copy_from_iter_pmem(addr, bytes, i);
179+
return default_copy_from_iter_pmem(addr, bytes, i);
180+
}
181+
182+
/**
183+
* clear_pmem - zero a PMEM memory range
184+
* @addr: virtual start address
185+
* @size: number of bytes to zero
186+
*
187+
* Write zeros into the memory range starting at 'addr' for 'size' bytes.
188+
* This function requires explicit ordering with a wmb_pmem() call.
189+
*/
190+
static inline void clear_pmem(void __pmem *addr, size_t size)
191+
{
192+
if (arch_has_pmem_api())
193+
arch_clear_pmem(addr, size);
194+
else
195+
default_clear_pmem(addr, size);
196+
}
137197
#endif /* __PMEM_H__ */

0 commit comments

Comments
 (0)