@@ -58,7 +58,7 @@ static u64 node_start(struct umem_odp_node *n)
58
58
struct ib_umem_odp * umem_odp =
59
59
container_of (n , struct ib_umem_odp , interval_tree );
60
60
61
- return ib_umem_start (umem_odp -> umem );
61
+ return ib_umem_start (& umem_odp -> umem );
62
62
}
63
63
64
64
/* Note that the representation of the intervals in the interval tree
@@ -71,7 +71,7 @@ static u64 node_last(struct umem_odp_node *n)
71
71
struct ib_umem_odp * umem_odp =
72
72
container_of (n , struct ib_umem_odp , interval_tree );
73
73
74
- return ib_umem_end (umem_odp -> umem ) - 1 ;
74
+ return ib_umem_end (& umem_odp -> umem ) - 1 ;
75
75
}
76
76
77
77
INTERVAL_TREE_DEFINE (struct umem_odp_node , rb , u64 , __subtree_last ,
@@ -159,7 +159,7 @@ static void ib_ucontext_notifier_end_account(struct ib_ucontext *context)
159
159
static int ib_umem_notifier_release_trampoline (struct ib_umem_odp * umem_odp ,
160
160
u64 start , u64 end , void * cookie )
161
161
{
162
- struct ib_umem * umem = umem_odp -> umem ;
162
+ struct ib_umem * umem = & umem_odp -> umem ;
163
163
164
164
/*
165
165
* Increase the number of notifiers running, to
@@ -198,7 +198,7 @@ static int invalidate_page_trampoline(struct ib_umem_odp *item, u64 start,
198
198
u64 end , void * cookie )
199
199
{
200
200
ib_umem_notifier_start_account (item );
201
- item -> umem -> context -> invalidate_range (item , start , start + PAGE_SIZE );
201
+ item -> umem . context -> invalidate_range (item , start , start + PAGE_SIZE );
202
202
ib_umem_notifier_end_account (item );
203
203
return 0 ;
204
204
}
@@ -207,7 +207,7 @@ static int invalidate_range_start_trampoline(struct ib_umem_odp *item,
207
207
u64 start , u64 end , void * cookie )
208
208
{
209
209
ib_umem_notifier_start_account (item );
210
- item -> umem -> context -> invalidate_range (item , start , end );
210
+ item -> umem . context -> invalidate_range (item , start , end );
211
211
return 0 ;
212
212
}
213
213
@@ -277,28 +277,21 @@ static const struct mmu_notifier_ops ib_umem_notifiers = {
277
277
struct ib_umem_odp * ib_alloc_odp_umem (struct ib_ucontext * context ,
278
278
unsigned long addr , size_t size )
279
279
{
280
- struct ib_umem * umem ;
281
280
struct ib_umem_odp * odp_data ;
281
+ struct ib_umem * umem ;
282
282
int pages = size >> PAGE_SHIFT ;
283
283
int ret ;
284
284
285
- umem = kzalloc (sizeof (* umem ), GFP_KERNEL );
286
- if (!umem )
285
+ odp_data = kzalloc (sizeof (* odp_data ), GFP_KERNEL );
286
+ if (!odp_data )
287
287
return ERR_PTR (- ENOMEM );
288
-
288
+ umem = & odp_data -> umem ;
289
289
umem -> context = context ;
290
290
umem -> length = size ;
291
291
umem -> address = addr ;
292
292
umem -> page_shift = PAGE_SHIFT ;
293
293
umem -> writable = 1 ;
294
294
295
- odp_data = kzalloc (sizeof (* odp_data ), GFP_KERNEL );
296
- if (!odp_data ) {
297
- ret = - ENOMEM ;
298
- goto out_umem ;
299
- }
300
- odp_data -> umem = umem ;
301
-
302
295
mutex_init (& odp_data -> umem_mutex );
303
296
init_completion (& odp_data -> notifier_completion );
304
297
@@ -334,15 +327,14 @@ struct ib_umem_odp *ib_alloc_odp_umem(struct ib_ucontext *context,
334
327
vfree (odp_data -> page_list );
335
328
out_odp_data :
336
329
kfree (odp_data );
337
- out_umem :
338
- kfree (umem );
339
330
return ERR_PTR (ret );
340
331
}
341
332
EXPORT_SYMBOL (ib_alloc_odp_umem );
342
333
343
- int ib_umem_odp_get (struct ib_ucontext * context , struct ib_umem * umem ,
344
- int access )
334
+ int ib_umem_odp_get (struct ib_umem_odp * umem_odp , int access )
345
335
{
336
+ struct ib_ucontext * context = umem_odp -> umem .context ;
337
+ struct ib_umem * umem = & umem_odp -> umem ;
346
338
int ret_val ;
347
339
struct pid * our_pid ;
348
340
struct mm_struct * mm = get_task_mm (current );
@@ -378,30 +370,23 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
378
370
goto out_mm ;
379
371
}
380
372
381
- umem -> odp_data = kzalloc (sizeof (* umem -> odp_data ), GFP_KERNEL );
382
- if (!umem -> odp_data ) {
383
- ret_val = - ENOMEM ;
384
- goto out_mm ;
385
- }
386
- umem -> odp_data -> umem = umem ;
387
-
388
- mutex_init (& umem -> odp_data -> umem_mutex );
373
+ mutex_init (& umem_odp -> umem_mutex );
389
374
390
- init_completion (& umem -> odp_data -> notifier_completion );
375
+ init_completion (& umem_odp -> notifier_completion );
391
376
392
377
if (ib_umem_num_pages (umem )) {
393
- umem -> odp_data -> page_list =
394
- vzalloc (array_size (sizeof (* umem -> odp_data -> page_list ),
378
+ umem_odp -> page_list =
379
+ vzalloc (array_size (sizeof (* umem_odp -> page_list ),
395
380
ib_umem_num_pages (umem )));
396
- if (!umem -> odp_data -> page_list ) {
381
+ if (!umem_odp -> page_list ) {
397
382
ret_val = - ENOMEM ;
398
- goto out_odp_data ;
383
+ goto out_mm ;
399
384
}
400
385
401
- umem -> odp_data -> dma_list =
402
- vzalloc (array_size (sizeof (* umem -> odp_data -> dma_list ),
386
+ umem_odp -> dma_list =
387
+ vzalloc (array_size (sizeof (* umem_odp -> dma_list ),
403
388
ib_umem_num_pages (umem )));
404
- if (!umem -> odp_data -> dma_list ) {
389
+ if (!umem_odp -> dma_list ) {
405
390
ret_val = - ENOMEM ;
406
391
goto out_page_list ;
407
392
}
@@ -415,13 +400,13 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
415
400
down_write (& context -> umem_rwsem );
416
401
context -> odp_mrs_count ++ ;
417
402
if (likely (ib_umem_start (umem ) != ib_umem_end (umem )))
418
- rbt_ib_umem_insert (& umem -> odp_data -> interval_tree ,
403
+ rbt_ib_umem_insert (& umem_odp -> interval_tree ,
419
404
& context -> umem_tree );
420
405
if (likely (!atomic_read (& context -> notifier_count )) ||
421
406
context -> odp_mrs_count == 1 )
422
- umem -> odp_data -> mn_counters_active = true;
407
+ umem_odp -> mn_counters_active = true;
423
408
else
424
- list_add (& umem -> odp_data -> no_private_counters ,
409
+ list_add (& umem_odp -> no_private_counters ,
425
410
& context -> no_private_counters );
426
411
downgrade_write (& context -> umem_rwsem );
427
412
@@ -454,19 +439,17 @@ int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
454
439
455
440
out_mutex :
456
441
up_read (& context -> umem_rwsem );
457
- vfree (umem -> odp_data -> dma_list );
442
+ vfree (umem_odp -> dma_list );
458
443
out_page_list :
459
- vfree (umem -> odp_data -> page_list );
460
- out_odp_data :
461
- kfree (umem -> odp_data );
444
+ vfree (umem_odp -> page_list );
462
445
out_mm :
463
446
mmput (mm );
464
447
return ret_val ;
465
448
}
466
449
467
450
void ib_umem_odp_release (struct ib_umem_odp * umem_odp )
468
451
{
469
- struct ib_umem * umem = umem_odp -> umem ;
452
+ struct ib_umem * umem = & umem_odp -> umem ;
470
453
struct ib_ucontext * context = umem -> context ;
471
454
472
455
/*
@@ -528,8 +511,6 @@ void ib_umem_odp_release(struct ib_umem_odp *umem_odp)
528
511
529
512
vfree (umem_odp -> dma_list );
530
513
vfree (umem_odp -> page_list );
531
- kfree (umem_odp );
532
- kfree (umem );
533
514
}
534
515
535
516
/*
@@ -557,7 +538,7 @@ static int ib_umem_odp_map_dma_single_page(
557
538
u64 access_mask ,
558
539
unsigned long current_seq )
559
540
{
560
- struct ib_umem * umem = umem_odp -> umem ;
541
+ struct ib_umem * umem = & umem_odp -> umem ;
561
542
struct ib_device * dev = umem -> context -> device ;
562
543
dma_addr_t dma_addr ;
563
544
int stored_page = 0 ;
@@ -643,7 +624,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem_odp *umem_odp, u64 user_virt,
643
624
u64 bcnt , u64 access_mask ,
644
625
unsigned long current_seq )
645
626
{
646
- struct ib_umem * umem = umem_odp -> umem ;
627
+ struct ib_umem * umem = & umem_odp -> umem ;
647
628
struct task_struct * owning_process = NULL ;
648
629
struct mm_struct * owning_mm = NULL ;
649
630
struct page * * local_page_list = NULL ;
@@ -759,7 +740,7 @@ EXPORT_SYMBOL(ib_umem_odp_map_dma_pages);
759
740
void ib_umem_odp_unmap_dma_pages (struct ib_umem_odp * umem_odp , u64 virt ,
760
741
u64 bound )
761
742
{
762
- struct ib_umem * umem = umem_odp -> umem ;
743
+ struct ib_umem * umem = & umem_odp -> umem ;
763
744
int idx ;
764
745
u64 addr ;
765
746
struct ib_device * dev = umem -> context -> device ;
0 commit comments