@@ -80,105 +80,22 @@ struct nouveau_dmem {
80
80
struct mutex mutex ;
81
81
};
82
82
83
- struct nouveau_migrate_hmem {
84
- struct scatterlist * sg ;
85
- struct nouveau_mem mem ;
86
- unsigned long npages ;
87
- struct nvif_vma vma ;
88
- };
89
-
90
83
struct nouveau_dmem_fault {
91
84
struct nouveau_drm * drm ;
92
85
struct nouveau_fence * fence ;
93
- struct nouveau_migrate_hmem hmem ;
86
+ dma_addr_t * dma ;
87
+ unsigned long npages ;
94
88
};
95
89
96
90
struct nouveau_migrate {
97
91
struct vm_area_struct * vma ;
98
92
struct nouveau_drm * drm ;
99
93
struct nouveau_fence * fence ;
100
94
unsigned long npages ;
101
- struct nouveau_migrate_hmem hmem ;
95
+ dma_addr_t * dma ;
96
+ unsigned long dma_nr ;
102
97
};
103
98
104
- static void
105
- nouveau_migrate_hmem_fini (struct nouveau_drm * drm ,
106
- struct nouveau_migrate_hmem * hmem )
107
- {
108
- struct nvif_vmm * vmm = & drm -> client .vmm .vmm ;
109
-
110
- nouveau_mem_fini (& hmem -> mem );
111
- nvif_vmm_put (vmm , & hmem -> vma );
112
-
113
- if (hmem -> sg ) {
114
- dma_unmap_sg_attrs (drm -> dev -> dev , hmem -> sg ,
115
- hmem -> npages , DMA_BIDIRECTIONAL ,
116
- DMA_ATTR_SKIP_CPU_SYNC );
117
- kfree (hmem -> sg );
118
- hmem -> sg = NULL ;
119
- }
120
- }
121
-
122
- static int
123
- nouveau_migrate_hmem_init (struct nouveau_drm * drm ,
124
- struct nouveau_migrate_hmem * hmem ,
125
- unsigned long npages ,
126
- const unsigned long * pfns )
127
- {
128
- struct nvif_vmm * vmm = & drm -> client .vmm .vmm ;
129
- unsigned long i ;
130
- int ret ;
131
-
132
- hmem -> sg = kzalloc (npages * sizeof (* hmem -> sg ), GFP_KERNEL );
133
- if (hmem -> sg == NULL )
134
- return - ENOMEM ;
135
-
136
- for (i = 0 , hmem -> npages = 0 ; hmem -> npages < npages ; ++ i ) {
137
- struct page * page ;
138
-
139
- if (!pfns [i ] || pfns [i ] == MIGRATE_PFN_ERROR )
140
- continue ;
141
-
142
- page = migrate_pfn_to_page (pfns [i ]);
143
- if (page == NULL ) {
144
- ret = - EINVAL ;
145
- goto error ;
146
- }
147
-
148
- sg_set_page (& hmem -> sg [hmem -> npages ], page , PAGE_SIZE , 0 );
149
- hmem -> npages ++ ;
150
- }
151
- sg_mark_end (& hmem -> sg [hmem -> npages - 1 ]);
152
-
153
- i = dma_map_sg_attrs (drm -> dev -> dev , hmem -> sg , hmem -> npages ,
154
- DMA_BIDIRECTIONAL , DMA_ATTR_SKIP_CPU_SYNC );
155
- if (i != hmem -> npages ) {
156
- ret = - ENOMEM ;
157
- goto error ;
158
- }
159
-
160
- ret = nouveau_mem_sgl (& hmem -> mem , & drm -> client ,
161
- hmem -> npages , hmem -> sg );
162
- if (ret )
163
- goto error ;
164
-
165
- ret = nvif_vmm_get (vmm , LAZY , false, hmem -> mem .mem .page ,
166
- 0 , hmem -> mem .mem .size , & hmem -> vma );
167
- if (ret )
168
- goto error ;
169
-
170
- ret = nouveau_mem_map (& hmem -> mem , vmm , & hmem -> vma );
171
- if (ret )
172
- goto error ;
173
-
174
- return 0 ;
175
-
176
- error :
177
- nouveau_migrate_hmem_fini (drm , hmem );
178
- return ret ;
179
- }
180
-
181
-
182
99
static void
183
100
nouveau_dmem_free (struct hmm_devmem * devmem , struct page * page )
184
101
{
@@ -218,7 +135,8 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
218
135
{
219
136
struct nouveau_dmem_fault * fault = private ;
220
137
struct nouveau_drm * drm = fault -> drm ;
221
- unsigned long addr , i , c , npages = 0 ;
138
+ struct device * dev = drm -> dev -> dev ;
139
+ unsigned long addr , i , npages = 0 ;
222
140
nouveau_migrate_copy_t copy ;
223
141
int ret ;
224
142
@@ -243,14 +161,14 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
243
161
npages ++ ;
244
162
}
245
163
246
- /* Create scatter list FIXME: get rid of scatter list */
247
- ret = nouveau_migrate_hmem_init ( drm , & fault -> hmem , npages , dst_pfns );
248
- if (ret )
164
+ /* Allocate storage for DMA addresses, so we can unmap later. */
165
+ fault -> dma = kmalloc ( sizeof ( * fault -> dma ) * npages , GFP_KERNEL );
166
+ if (! fault -> dma )
249
167
goto error ;
250
168
251
169
/* Copy things over */
252
170
copy = drm -> dmem -> migrate .copy_func ;
253
- for (addr = start , i = c = 0 ; addr < end ; addr += PAGE_SIZE , i ++ ) {
171
+ for (addr = start , i = 0 ; addr < end ; addr += PAGE_SIZE , i ++ ) {
254
172
struct nouveau_dmem_chunk * chunk ;
255
173
struct page * spage , * dpage ;
256
174
u64 src_addr , dst_addr ;
@@ -259,21 +177,30 @@ nouveau_dmem_fault_alloc_and_copy(struct vm_area_struct *vma,
259
177
if (!dpage || dst_pfns [i ] == MIGRATE_PFN_ERROR )
260
178
continue ;
261
179
262
- dst_addr = fault -> hmem .vma .addr + (c << PAGE_SHIFT );
263
- c ++ ;
264
-
265
180
spage = migrate_pfn_to_page (src_pfns [i ]);
266
181
if (!spage || !(src_pfns [i ] & MIGRATE_PFN_MIGRATE )) {
267
182
dst_pfns [i ] = MIGRATE_PFN_ERROR ;
268
183
__free_page (dpage );
269
184
continue ;
270
185
}
271
186
187
+ fault -> dma [fault -> npages ] =
188
+ dma_map_page_attrs (dev , dpage , 0 , PAGE_SIZE ,
189
+ PCI_DMA_BIDIRECTIONAL ,
190
+ DMA_ATTR_SKIP_CPU_SYNC );
191
+ if (dma_mapping_error (dev , fault -> dma [fault -> npages ])) {
192
+ dst_pfns [i ] = MIGRATE_PFN_ERROR ;
193
+ __free_page (dpage );
194
+ continue ;
195
+ }
196
+
197
+ dst_addr = fault -> dma [fault -> npages ++ ];
198
+
272
199
chunk = (void * )hmm_devmem_page_get_drvdata (spage );
273
200
src_addr = page_to_pfn (spage ) - chunk -> pfn_first ;
274
201
src_addr = (src_addr << PAGE_SHIFT ) + chunk -> bo -> bo .offset ;
275
202
276
- ret = copy (drm , 1 , NOUVEAU_APER_VIRT , dst_addr ,
203
+ ret = copy (drm , 1 , NOUVEAU_APER_HOST , dst_addr ,
277
204
NOUVEAU_APER_VRAM , src_addr );
278
205
if (ret ) {
279
206
dst_pfns [i ] = MIGRATE_PFN_ERROR ;
@@ -321,7 +248,12 @@ void nouveau_dmem_fault_finalize_and_map(struct vm_area_struct *vma,
321
248
* the hmem object below (nouveau_migrate_hmem_fini()).
322
249
*/
323
250
}
324
- nouveau_migrate_hmem_fini (drm , & fault -> hmem );
251
+
252
+ while (fault -> npages -- ) {
253
+ dma_unmap_page (drm -> dev -> dev , fault -> dma [fault -> npages ],
254
+ PAGE_SIZE , PCI_DMA_BIDIRECTIONAL );
255
+ }
256
+ kfree (fault -> dma );
325
257
}
326
258
327
259
static const struct migrate_vma_ops nouveau_dmem_fault_migrate_ops = {
@@ -732,7 +664,8 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
732
664
{
733
665
struct nouveau_migrate * migrate = private ;
734
666
struct nouveau_drm * drm = migrate -> drm ;
735
- unsigned long addr , i , c , npages = 0 ;
667
+ struct device * dev = drm -> dev -> dev ;
668
+ unsigned long addr , i , npages = 0 ;
736
669
nouveau_migrate_copy_t copy ;
737
670
int ret ;
738
671
@@ -758,14 +691,14 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
758
691
if (!npages )
759
692
return ;
760
693
761
- /* Create scatter list FIXME: get rid of scatter list */
762
- ret = nouveau_migrate_hmem_init ( drm , & migrate -> hmem , npages , src_pfns );
763
- if (ret )
694
+ /* Allocate storage for DMA addresses, so we can unmap later. */
695
+ migrate -> dma = kmalloc ( sizeof ( * migrate -> dma ) * npages , GFP_KERNEL );
696
+ if (! migrate -> dma )
764
697
goto error ;
765
698
766
699
/* Copy things over */
767
700
copy = drm -> dmem -> migrate .copy_func ;
768
- for (addr = start , i = c = 0 ; addr < end ; addr += PAGE_SIZE , i ++ ) {
701
+ for (addr = start , i = 0 ; addr < end ; addr += PAGE_SIZE , i ++ ) {
769
702
struct nouveau_dmem_chunk * chunk ;
770
703
struct page * spage , * dpage ;
771
704
u64 src_addr , dst_addr ;
@@ -785,11 +718,20 @@ nouveau_dmem_migrate_alloc_and_copy(struct vm_area_struct *vma,
785
718
continue ;
786
719
}
787
720
788
- src_addr = migrate -> hmem .vma .addr + (c << PAGE_SHIFT );
789
- c ++ ;
721
+ migrate -> dma [migrate -> dma_nr ] =
722
+ dma_map_page_attrs (dev , spage , 0 , PAGE_SIZE ,
723
+ PCI_DMA_BIDIRECTIONAL ,
724
+ DMA_ATTR_SKIP_CPU_SYNC );
725
+ if (dma_mapping_error (dev , migrate -> dma [migrate -> dma_nr ])) {
726
+ nouveau_dmem_page_free_locked (drm , dpage );
727
+ dst_pfns [i ] = 0 ;
728
+ continue ;
729
+ }
730
+
731
+ src_addr = migrate -> dma [migrate -> dma_nr ++ ];
790
732
791
733
ret = copy (drm , 1 , NOUVEAU_APER_VRAM , dst_addr ,
792
- NOUVEAU_APER_VIRT , src_addr );
734
+ NOUVEAU_APER_HOST , src_addr );
793
735
if (ret ) {
794
736
nouveau_dmem_page_free_locked (drm , dpage );
795
737
dst_pfns [i ] = 0 ;
@@ -836,7 +778,12 @@ void nouveau_dmem_migrate_finalize_and_map(struct vm_area_struct *vma,
836
778
* the hmem object below (nouveau_migrate_hmem_fini()) ?
837
779
*/
838
780
}
839
- nouveau_migrate_hmem_fini (drm , & migrate -> hmem );
781
+
782
+ while (migrate -> dma_nr -- ) {
783
+ dma_unmap_page (drm -> dev -> dev , migrate -> dma [migrate -> dma_nr ],
784
+ PAGE_SIZE , PCI_DMA_BIDIRECTIONAL );
785
+ }
786
+ kfree (migrate -> dma );
840
787
841
788
/*
842
789
* FIXME optimization: update GPU page table to point to newly
0 commit comments