Skip to content

Commit d5ff081

Browse files
committed
Merge branch 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm
Pull nvdimm fixes from Dan Williams: "A small crop of lockdep, sleeping while atomic, and other fixes / band-aids in advance of the full-blown reworks targeting the next merge window. The largest change here is "libnvdimm: fix blk free space accounting" which deletes a pile of buggy code that better testing would have caught before merging. The next change that is borderline too big for a late rc is switching the device-dax locking from rcu to srcu, I couldn't think of a smaller way to make that fix. The __copy_user_nocache fix will have a full replacement in 4.12 to move those pmem special case considerations into the pmem driver. The "libnvdimm: band aid btt vs clear poison locking" commit admits that our error clearing support for btt went in broken, so we just disable it in 4.11 and -stable. A replacement / full fix is in the pipeline for 4.12 Some of these would have been caught earlier had DEBUG_ATOMIC_SLEEP been enabled on my development station. I wonder if we should have: config DEBUG_ATOMIC_SLEEP default PROVE_LOCKING ...since I mistakenly thought I got both with PROVE_LOCKING=y. These have received a build success notification from the 0day robot, and some have appeared in a -next release with no reported issues" * 'libnvdimm-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/nvdimm/nvdimm: x86, pmem: fix broken __copy_user_nocache cache-bypass assumptions device-dax: switch to srcu, fix rcu_read_lock() vs pte allocation libnvdimm: band aid btt vs clear poison locking libnvdimm: fix reconfig_mutex, mmap_sem, and jbd2_handle lockdep splat libnvdimm: fix blk free space accounting acpi, nfit, libnvdimm: fix interleave set cookie calculation (64-bit comparison)
2 parents 403a39f + 11e63f6 commit d5ff081

File tree

7 files changed

+70
-85
lines changed

7 files changed

+70
-85
lines changed

arch/x86/include/asm/pmem.h

Lines changed: 31 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,8 @@ static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
5555
* @size: number of bytes to write back
5656
*
5757
* Write back a cache range using the CLWB (cache line write back)
58-
* instruction.
58+
* instruction. Note that @size is internally rounded up to be cache
59+
* line size aligned.
5960
*/
6061
static inline void arch_wb_cache_pmem(void *addr, size_t size)
6162
{
@@ -69,15 +70,6 @@ static inline void arch_wb_cache_pmem(void *addr, size_t size)
6970
clwb(p);
7071
}
7172

72-
/*
73-
* copy_from_iter_nocache() on x86 only uses non-temporal stores for iovec
74-
* iterators, so for other types (bvec & kvec) we must do a cache write-back.
75-
*/
76-
static inline bool __iter_needs_pmem_wb(struct iov_iter *i)
77-
{
78-
return iter_is_iovec(i) == false;
79-
}
80-
8173
/**
8274
* arch_copy_from_iter_pmem - copy data from an iterator to PMEM
8375
* @addr: PMEM destination address
@@ -94,7 +86,35 @@ static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
9486
/* TODO: skip the write-back by always using non-temporal stores */
9587
len = copy_from_iter_nocache(addr, bytes, i);
9688

97-
if (__iter_needs_pmem_wb(i))
89+
/*
90+
* In the iovec case on x86_64 copy_from_iter_nocache() uses
91+
* non-temporal stores for the bulk of the transfer, but we need
92+
* to manually flush if the transfer is unaligned. A cached
93+
* memory copy is used when destination or size is not naturally
94+
* aligned. That is:
95+
* - Require 8-byte alignment when size is 8 bytes or larger.
96+
* - Require 4-byte alignment when size is 4 bytes.
97+
*
98+
* In the non-iovec case the entire destination needs to be
99+
* flushed.
100+
*/
101+
if (iter_is_iovec(i)) {
102+
unsigned long flushed, dest = (unsigned long) addr;
103+
104+
if (bytes < 8) {
105+
if (!IS_ALIGNED(dest, 4) || (bytes != 4))
106+
arch_wb_cache_pmem(addr, 1);
107+
} else {
108+
if (!IS_ALIGNED(dest, 8)) {
109+
dest = ALIGN(dest, boot_cpu_data.x86_clflush_size);
110+
arch_wb_cache_pmem(addr, 1);
111+
}
112+
113+
flushed = dest - (unsigned long) addr;
114+
if (bytes > flushed && !IS_ALIGNED(bytes - flushed, 8))
115+
arch_wb_cache_pmem(addr + bytes - 1, 1);
116+
}
117+
} else
98118
arch_wb_cache_pmem(addr, bytes);
99119

100120
return len;

drivers/acpi/nfit/core.c

Lines changed: 5 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1617,7 +1617,11 @@ static int cmp_map(const void *m0, const void *m1)
16171617
const struct nfit_set_info_map *map0 = m0;
16181618
const struct nfit_set_info_map *map1 = m1;
16191619

1620-
return map0->region_offset - map1->region_offset;
1620+
if (map0->region_offset < map1->region_offset)
1621+
return -1;
1622+
else if (map0->region_offset > map1->region_offset)
1623+
return 1;
1624+
return 0;
16211625
}
16221626

16231627
/* Retrieve the nth entry referencing this spa */

drivers/dax/Kconfig

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2,6 +2,7 @@ menuconfig DEV_DAX
22
tristate "DAX: direct access to differentiated memory"
33
default m if NVDIMM_DAX
44
depends on TRANSPARENT_HUGEPAGE
5+
select SRCU
56
help
67
Support raw access to differentiated (persistence, bandwidth,
78
latency...) memory via an mmap(2) capable character

drivers/dax/dax.c

Lines changed: 7 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,7 @@
2525
#include "dax.h"
2626

2727
static dev_t dax_devt;
28+
DEFINE_STATIC_SRCU(dax_srcu);
2829
static struct class *dax_class;
2930
static DEFINE_IDA(dax_minor_ida);
3031
static int nr_dax = CONFIG_NR_DEV_DAX;
@@ -60,7 +61,7 @@ struct dax_region {
6061
* @region - parent region
6162
* @dev - device backing the character device
6263
* @cdev - core chardev data
63-
* @alive - !alive + rcu grace period == no new mappings can be established
64+
* @alive - !alive + srcu grace period == no new mappings can be established
6465
* @id - child id in the region
6566
* @num_resources - number of physical address extents in this device
6667
* @res - array of physical address ranges
@@ -569,7 +570,7 @@ static int __dax_dev_pud_fault(struct dax_dev *dax_dev, struct vm_fault *vmf)
569570
static int dax_dev_huge_fault(struct vm_fault *vmf,
570571
enum page_entry_size pe_size)
571572
{
572-
int rc;
573+
int rc, id;
573574
struct file *filp = vmf->vma->vm_file;
574575
struct dax_dev *dax_dev = filp->private_data;
575576

@@ -578,7 +579,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
578579
? "write" : "read",
579580
vmf->vma->vm_start, vmf->vma->vm_end);
580581

581-
rcu_read_lock();
582+
id = srcu_read_lock(&dax_srcu);
582583
switch (pe_size) {
583584
case PE_SIZE_PTE:
584585
rc = __dax_dev_pte_fault(dax_dev, vmf);
@@ -592,7 +593,7 @@ static int dax_dev_huge_fault(struct vm_fault *vmf,
592593
default:
593594
return VM_FAULT_FALLBACK;
594595
}
595-
rcu_read_unlock();
596+
srcu_read_unlock(&dax_srcu, id);
596597

597598
return rc;
598599
}
@@ -713,11 +714,11 @@ static void unregister_dax_dev(void *dev)
713714
* Note, rcu is not protecting the liveness of dax_dev, rcu is
714715
* ensuring that any fault handlers that might have seen
715716
* dax_dev->alive == true, have completed. Any fault handlers
716-
* that start after synchronize_rcu() has started will abort
717+
* that start after synchronize_srcu() has started will abort
717718
* upon seeing dax_dev->alive == false.
718719
*/
719720
dax_dev->alive = false;
720-
synchronize_rcu();
721+
synchronize_srcu(&dax_srcu);
721722
unmap_mapping_range(dax_dev->inode->i_mapping, 0, 0, 1);
722723
cdev_del(cdev);
723724
device_unregister(dev);

drivers/nvdimm/bus.c

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -934,8 +934,14 @@ static int __nd_ioctl(struct nvdimm_bus *nvdimm_bus, struct nvdimm *nvdimm,
934934
rc = nd_desc->ndctl(nd_desc, nvdimm, cmd, buf, buf_len, NULL);
935935
if (rc < 0)
936936
goto out_unlock;
937+
nvdimm_bus_unlock(&nvdimm_bus->dev);
938+
937939
if (copy_to_user(p, buf, buf_len))
938940
rc = -EFAULT;
941+
942+
vfree(buf);
943+
return rc;
944+
939945
out_unlock:
940946
nvdimm_bus_unlock(&nvdimm_bus->dev);
941947
out:

drivers/nvdimm/claim.c

Lines changed: 9 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -243,7 +243,15 @@ static int nsio_rw_bytes(struct nd_namespace_common *ndns,
243243
}
244244

245245
if (unlikely(is_bad_pmem(&nsio->bb, sector, sz_align))) {
246-
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)) {
246+
/*
247+
* FIXME: nsio_rw_bytes() may be called from atomic
248+
* context in the btt case and nvdimm_clear_poison()
249+
* takes a sleeping lock. Until the locking can be
250+
* reworked this capability requires that the namespace
251+
* is not claimed by btt.
252+
*/
253+
if (IS_ALIGNED(offset, 512) && IS_ALIGNED(size, 512)
254+
&& (!ndns->claim || !is_nd_btt(ndns->claim))) {
247255
long cleared;
248256

249257
cleared = nvdimm_clear_poison(&ndns->dev, offset, size);

drivers/nvdimm/dimm_devs.c

Lines changed: 11 additions & 66 deletions
Original file line numberDiff line numberDiff line change
@@ -395,7 +395,7 @@ EXPORT_SYMBOL_GPL(nvdimm_create);
395395

396396
int alias_dpa_busy(struct device *dev, void *data)
397397
{
398-
resource_size_t map_end, blk_start, new, busy;
398+
resource_size_t map_end, blk_start, new;
399399
struct blk_alloc_info *info = data;
400400
struct nd_mapping *nd_mapping;
401401
struct nd_region *nd_region;
@@ -436,29 +436,19 @@ int alias_dpa_busy(struct device *dev, void *data)
436436
retry:
437437
/*
438438
* Find the free dpa from the end of the last pmem allocation to
439-
* the end of the interleave-set mapping that is not already
440-
* covered by a blk allocation.
439+
* the end of the interleave-set mapping.
441440
*/
442-
busy = 0;
443441
for_each_dpa_resource(ndd, res) {
442+
if (strncmp(res->name, "pmem", 4) != 0)
443+
continue;
444444
if ((res->start >= blk_start && res->start < map_end)
445445
|| (res->end >= blk_start
446446
&& res->end <= map_end)) {
447-
if (strncmp(res->name, "pmem", 4) == 0) {
448-
new = max(blk_start, min(map_end + 1,
449-
res->end + 1));
450-
if (new != blk_start) {
451-
blk_start = new;
452-
goto retry;
453-
}
454-
} else
455-
busy += min(map_end, res->end)
456-
- max(nd_mapping->start, res->start) + 1;
457-
} else if (nd_mapping->start > res->start
458-
&& map_end < res->end) {
459-
/* total eclipse of the PMEM region mapping */
460-
busy += nd_mapping->size;
461-
break;
447+
new = max(blk_start, min(map_end + 1, res->end + 1));
448+
if (new != blk_start) {
449+
blk_start = new;
450+
goto retry;
451+
}
462452
}
463453
}
464454

@@ -470,52 +460,11 @@ int alias_dpa_busy(struct device *dev, void *data)
470460
return 1;
471461
}
472462

473-
info->available -= blk_start - nd_mapping->start + busy;
463+
info->available -= blk_start - nd_mapping->start;
474464

475465
return 0;
476466
}
477467

478-
static int blk_dpa_busy(struct device *dev, void *data)
479-
{
480-
struct blk_alloc_info *info = data;
481-
struct nd_mapping *nd_mapping;
482-
struct nd_region *nd_region;
483-
resource_size_t map_end;
484-
int i;
485-
486-
if (!is_nd_pmem(dev))
487-
return 0;
488-
489-
nd_region = to_nd_region(dev);
490-
for (i = 0; i < nd_region->ndr_mappings; i++) {
491-
nd_mapping = &nd_region->mapping[i];
492-
if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
493-
break;
494-
}
495-
496-
if (i >= nd_region->ndr_mappings)
497-
return 0;
498-
499-
map_end = nd_mapping->start + nd_mapping->size - 1;
500-
if (info->res->start >= nd_mapping->start
501-
&& info->res->start < map_end) {
502-
if (info->res->end <= map_end) {
503-
info->busy = 0;
504-
return 1;
505-
} else {
506-
info->busy -= info->res->end - map_end;
507-
return 0;
508-
}
509-
} else if (info->res->end >= nd_mapping->start
510-
&& info->res->end <= map_end) {
511-
info->busy -= nd_mapping->start - info->res->start;
512-
return 0;
513-
} else {
514-
info->busy -= nd_mapping->size;
515-
return 0;
516-
}
517-
}
518-
519468
/**
520469
* nd_blk_available_dpa - account the unused dpa of BLK region
521470
* @nd_mapping: container of dpa-resource-root + labels
@@ -545,11 +494,7 @@ resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
545494
for_each_dpa_resource(ndd, res) {
546495
if (strncmp(res->name, "blk", 3) != 0)
547496
continue;
548-
549-
info.res = res;
550-
info.busy = resource_size(res);
551-
device_for_each_child(&nvdimm_bus->dev, &info, blk_dpa_busy);
552-
info.available -= info.busy;
497+
info.available -= resource_size(res);
553498
}
554499

555500
return info.available;

0 commit comments

Comments
 (0)