Skip to content

Commit c336bf8

Browse files
committed
Merge tag 'vfio-v4.12-rc1' of git://github.com/awilliam/linux-vfio
Pull VFIO updates from Alex Williamson: - Updates for SPAPR IOMMU backend including compatibility test and memory allocation check (Alexey Kardashevskiy) - Updates for type1 IOMMU backend to remove asynchronous locked page accounting and remove redundancy (Alex Williamson) * tag 'vfio-v4.12-rc1' of git://github.com/awilliam/linux-vfio: vfio/type1: Reduce repetitive calls in vfio_pin_pages_remote() vfio/type1: Prune vfio_pin_page_external() vfio/type1: Remove locked page accounting workqueue vfio/spapr_tce: Check kzalloc() return when preregistering memory vfio/powerpc/spapr_tce: Enforce IOMMU type compatibility check
2 parents a964807 + 7cb671e commit c336bf8

File tree

2 files changed

+77
-86
lines changed

2 files changed

+77
-86
lines changed

drivers/vfio/vfio_iommu_spapr_tce.c

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -198,6 +198,11 @@ static long tce_iommu_register_pages(struct tce_container *container,
198198
return ret;
199199

200200
tcemem = kzalloc(sizeof(*tcemem), GFP_KERNEL);
201+
if (!tcemem) {
202+
mm_iommu_put(container->mm, mem);
203+
return -ENOMEM;
204+
}
205+
201206
tcemem->mem = mem;
202207
list_add(&tcemem->next, &container->prereg_list);
203208

@@ -1335,8 +1340,16 @@ static int tce_iommu_attach_group(void *iommu_data,
13351340

13361341
if (!table_group->ops || !table_group->ops->take_ownership ||
13371342
!table_group->ops->release_ownership) {
1343+
if (container->v2) {
1344+
ret = -EPERM;
1345+
goto unlock_exit;
1346+
}
13381347
ret = tce_iommu_take_ownership(container, table_group);
13391348
} else {
1349+
if (!container->v2) {
1350+
ret = -EPERM;
1351+
goto unlock_exit;
1352+
}
13401353
ret = tce_iommu_take_ownership_ddw(container, table_group);
13411354
if (!tce_groups_attached(container) && !container->tables[0])
13421355
container->def_window_pending = true;

drivers/vfio/vfio_iommu_type1.c

Lines changed: 64 additions & 86 deletions
Original file line numberDiff line numberDiff line change
@@ -246,69 +246,46 @@ static int vfio_iova_put_vfio_pfn(struct vfio_dma *dma, struct vfio_pfn *vpfn)
246246
return ret;
247247
}
248248

249-
struct vwork {
250-
struct mm_struct *mm;
251-
long npage;
252-
struct work_struct work;
253-
};
254-
255-
/* delayed decrement/increment for locked_vm */
256-
static void vfio_lock_acct_bg(struct work_struct *work)
249+
static int vfio_lock_acct(struct task_struct *task, long npage, bool *lock_cap)
257250
{
258-
struct vwork *vwork = container_of(work, struct vwork, work);
259-
struct mm_struct *mm;
260-
261-
mm = vwork->mm;
262-
down_write(&mm->mmap_sem);
263-
mm->locked_vm += vwork->npage;
264-
up_write(&mm->mmap_sem);
265-
mmput(mm);
266-
kfree(vwork);
267-
}
268-
269-
static void vfio_lock_acct(struct task_struct *task, long npage)
270-
{
271-
struct vwork *vwork;
272251
struct mm_struct *mm;
273252
bool is_current;
253+
int ret;
274254

275255
if (!npage)
276-
return;
256+
return 0;
277257

278258
is_current = (task->mm == current->mm);
279259

280260
mm = is_current ? task->mm : get_task_mm(task);
281261
if (!mm)
282-
return; /* process exited */
262+
return -ESRCH; /* process exited */
283263

284-
if (down_write_trylock(&mm->mmap_sem)) {
285-
mm->locked_vm += npage;
286-
up_write(&mm->mmap_sem);
287-
if (!is_current)
288-
mmput(mm);
289-
return;
290-
}
264+
ret = down_write_killable(&mm->mmap_sem);
265+
if (!ret) {
266+
if (npage > 0) {
267+
if (lock_cap ? !*lock_cap :
268+
!has_capability(task, CAP_IPC_LOCK)) {
269+
unsigned long limit;
270+
271+
limit = task_rlimit(task,
272+
RLIMIT_MEMLOCK) >> PAGE_SHIFT;
273+
274+
if (mm->locked_vm + npage > limit)
275+
ret = -ENOMEM;
276+
}
277+
}
278+
279+
if (!ret)
280+
mm->locked_vm += npage;
291281

292-
if (is_current) {
293-
mm = get_task_mm(task);
294-
if (!mm)
295-
return;
282+
up_write(&mm->mmap_sem);
296283
}
297284

298-
/*
299-
* Couldn't get mmap_sem lock, so must setup to update
300-
* mm->locked_vm later. If locked_vm were atomic, we
301-
* wouldn't need this silliness
302-
*/
303-
vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL);
304-
if (WARN_ON(!vwork)) {
285+
if (!is_current)
305286
mmput(mm);
306-
return;
307-
}
308-
INIT_WORK(&vwork->work, vfio_lock_acct_bg);
309-
vwork->mm = mm;
310-
vwork->npage = npage;
311-
schedule_work(&vwork->work);
287+
288+
return ret;
312289
}
313290

314291
/*
@@ -403,10 +380,10 @@ static int vaddr_get_pfn(struct mm_struct *mm, unsigned long vaddr,
403380
* first page and all consecutive pages with the same locking.
404381
*/
405382
static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
406-
long npage, unsigned long *pfn_base)
383+
long npage, unsigned long *pfn_base,
384+
bool lock_cap, unsigned long limit)
407385
{
408-
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
409-
bool lock_cap = capable(CAP_IPC_LOCK);
386+
unsigned long pfn = 0;
410387
long ret, pinned = 0, lock_acct = 0;
411388
bool rsvd;
412389
dma_addr_t iova = vaddr - dma->vaddr + dma->iova;
@@ -442,8 +419,6 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
442419
/* Lock all the consecutive pages from pfn_base */
443420
for (vaddr += PAGE_SIZE, iova += PAGE_SIZE; pinned < npage;
444421
pinned++, vaddr += PAGE_SIZE, iova += PAGE_SIZE) {
445-
unsigned long pfn = 0;
446-
447422
ret = vaddr_get_pfn(current->mm, vaddr, dma->prot, &pfn);
448423
if (ret)
449424
break;
@@ -460,14 +435,25 @@ static long vfio_pin_pages_remote(struct vfio_dma *dma, unsigned long vaddr,
460435
put_pfn(pfn, dma->prot);
461436
pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n",
462437
__func__, limit << PAGE_SHIFT);
463-
break;
438+
ret = -ENOMEM;
439+
goto unpin_out;
464440
}
465441
lock_acct++;
466442
}
467443
}
468444

469445
out:
470-
vfio_lock_acct(current, lock_acct);
446+
ret = vfio_lock_acct(current, lock_acct, &lock_cap);
447+
448+
unpin_out:
449+
if (ret) {
450+
if (!rsvd) {
451+
for (pfn = *pfn_base ; pinned ; pfn++, pinned--)
452+
put_pfn(pfn, dma->prot);
453+
}
454+
455+
return ret;
456+
}
471457

472458
return pinned;
473459
}
@@ -488,45 +474,34 @@ static long vfio_unpin_pages_remote(struct vfio_dma *dma, dma_addr_t iova,
488474
}
489475

490476
if (do_accounting)
491-
vfio_lock_acct(dma->task, locked - unlocked);
477+
vfio_lock_acct(dma->task, locked - unlocked, NULL);
492478

493479
return unlocked;
494480
}
495481

496482
static int vfio_pin_page_external(struct vfio_dma *dma, unsigned long vaddr,
497483
unsigned long *pfn_base, bool do_accounting)
498484
{
499-
unsigned long limit;
500-
bool lock_cap = has_capability(dma->task, CAP_IPC_LOCK);
501485
struct mm_struct *mm;
502486
int ret;
503-
bool rsvd;
504487

505488
mm = get_task_mm(dma->task);
506489
if (!mm)
507490
return -ENODEV;
508491

509492
ret = vaddr_get_pfn(mm, vaddr, dma->prot, pfn_base);
510-
if (ret)
511-
goto pin_page_exit;
512-
513-
rsvd = is_invalid_reserved_pfn(*pfn_base);
514-
limit = task_rlimit(dma->task, RLIMIT_MEMLOCK) >> PAGE_SHIFT;
515-
516-
if (!rsvd && !lock_cap && mm->locked_vm + 1 > limit) {
517-
put_pfn(*pfn_base, dma->prot);
518-
pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK (%ld) exceeded\n",
519-
__func__, dma->task->comm, task_pid_nr(dma->task),
520-
limit << PAGE_SHIFT);
521-
ret = -ENOMEM;
522-
goto pin_page_exit;
493+
if (!ret && do_accounting && !is_invalid_reserved_pfn(*pfn_base)) {
494+
ret = vfio_lock_acct(dma->task, 1, NULL);
495+
if (ret) {
496+
put_pfn(*pfn_base, dma->prot);
497+
if (ret == -ENOMEM)
498+
pr_warn("%s: Task %s (%d) RLIMIT_MEMLOCK "
499+
"(%ld) exceeded\n", __func__,
500+
dma->task->comm, task_pid_nr(dma->task),
501+
task_rlimit(dma->task, RLIMIT_MEMLOCK));
502+
}
523503
}
524504

525-
if (!rsvd && do_accounting)
526-
vfio_lock_acct(dma->task, 1);
527-
ret = 1;
528-
529-
pin_page_exit:
530505
mmput(mm);
531506
return ret;
532507
}
@@ -543,7 +518,7 @@ static int vfio_unpin_page_external(struct vfio_dma *dma, dma_addr_t iova,
543518
unlocked = vfio_iova_put_vfio_pfn(dma, vpfn);
544519

545520
if (do_accounting)
546-
vfio_lock_acct(dma->task, -unlocked);
521+
vfio_lock_acct(dma->task, -unlocked, NULL);
547522

548523
return unlocked;
549524
}
@@ -606,10 +581,8 @@ static int vfio_iommu_type1_pin_pages(void *iommu_data,
606581
remote_vaddr = dma->vaddr + iova - dma->iova;
607582
ret = vfio_pin_page_external(dma, remote_vaddr, &phys_pfn[i],
608583
do_accounting);
609-
if (ret <= 0) {
610-
WARN_ON(!ret);
584+
if (ret)
611585
goto pin_unwind;
612-
}
613586

614587
ret = vfio_add_to_pfn_list(dma, iova, phys_pfn[i]);
615588
if (ret) {
@@ -740,7 +713,7 @@ static long vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma,
740713

741714
dma->iommu_mapped = false;
742715
if (do_accounting) {
743-
vfio_lock_acct(dma->task, -unlocked);
716+
vfio_lock_acct(dma->task, -unlocked, NULL);
744717
return 0;
745718
}
746719
return unlocked;
@@ -951,13 +924,15 @@ static int vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,
951924
unsigned long vaddr = dma->vaddr;
952925
size_t size = map_size;
953926
long npage;
954-
unsigned long pfn;
927+
unsigned long pfn, limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
928+
bool lock_cap = capable(CAP_IPC_LOCK);
955929
int ret = 0;
956930

957931
while (size) {
958932
/* Pin a contiguous chunk of memory */
959933
npage = vfio_pin_pages_remote(dma, vaddr + dma->size,
960-
size >> PAGE_SHIFT, &pfn);
934+
size >> PAGE_SHIFT, &pfn,
935+
lock_cap, limit);
961936
if (npage <= 0) {
962937
WARN_ON(!npage);
963938
ret = (int)npage;
@@ -1067,6 +1042,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
10671042
{
10681043
struct vfio_domain *d;
10691044
struct rb_node *n;
1045+
unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1046+
bool lock_cap = capable(CAP_IPC_LOCK);
10701047
int ret;
10711048

10721049
/* Arbitrarily pick the first domain in the list for lookups */
@@ -1113,7 +1090,8 @@ static int vfio_iommu_replay(struct vfio_iommu *iommu,
11131090

11141091
npage = vfio_pin_pages_remote(dma, vaddr,
11151092
n >> PAGE_SHIFT,
1116-
&pfn);
1093+
&pfn, lock_cap,
1094+
limit);
11171095
if (npage <= 0) {
11181096
WARN_ON(!npage);
11191097
ret = (int)npage;
@@ -1382,7 +1360,7 @@ static void vfio_iommu_unmap_unpin_reaccount(struct vfio_iommu *iommu)
13821360
if (!is_invalid_reserved_pfn(vpfn->pfn))
13831361
locked++;
13841362
}
1385-
vfio_lock_acct(dma->task, locked - unlocked);
1363+
vfio_lock_acct(dma->task, locked - unlocked, NULL);
13861364
}
13871365
}
13881366

0 commit comments

Comments
 (0)