Skip to content

Commit 0d00c48

Browse files
committed
drm/vmwgfx: Fix the driver for large dma addresses
With dma compliance / IOMMU support added to the driver in kernel 3.13, the dma addresses can exceed 44 bits, which is what we support in 32-bit mode and with GMR1. So in 32-bit mode and optionally in 64-bit mode, restrict the dma addresses to 44 bits, and strip the old GMR1 code. Signed-off-by: Thomas Hellstrom <thellstrom@vmware.com> Reviewed-by: Jakob Bornecrantz <jakob@vmware.com> Cc: stable@vger.kernel.org
1 parent c5416d6 commit 0d00c48

File tree

3 files changed

+39
-169
lines changed

3 files changed

+39
-169
lines changed

drivers/gpu/drm/vmwgfx/vmwgfx_drv.c

Lines changed: 36 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -189,6 +189,7 @@ static int enable_fbdev = IS_ENABLED(CONFIG_DRM_VMWGFX_FBCON);
189189
static int vmw_force_iommu;
190190
static int vmw_restrict_iommu;
191191
static int vmw_force_coherent;
192+
static int vmw_restrict_dma_mask;
192193

193194
static int vmw_probe(struct pci_dev *, const struct pci_device_id *);
194195
static void vmw_master_init(struct vmw_master *);
@@ -203,6 +204,8 @@ MODULE_PARM_DESC(restrict_iommu, "Try to limit IOMMU usage for TTM pages");
203204
module_param_named(restrict_iommu, vmw_restrict_iommu, int, 0600);
204205
MODULE_PARM_DESC(force_coherent, "Force coherent TTM pages");
205206
module_param_named(force_coherent, vmw_force_coherent, int, 0600);
207+
MODULE_PARM_DESC(restrict_dma_mask, "Restrict DMA mask to 44 bits with IOMMU");
208+
module_param_named(restrict_dma_mask, vmw_restrict_dma_mask, int, 0600);
206209

207210

208211
static void vmw_print_capabilities(uint32_t capabilities)
@@ -510,6 +513,33 @@ static int vmw_dma_select_mode(struct vmw_private *dev_priv)
510513
return 0;
511514
}
512515

516+
/**
517+
* vmw_dma_masks - set required page- and dma masks
518+
*
519+
* @dev: Pointer to struct drm-device
520+
*
521+
* With 32-bit we can only handle 32 bit PFNs. Optionally set that
522+
* restriction also for 64-bit systems.
523+
*/
524+
#ifdef CONFIG_INTEL_IOMMU
525+
static int vmw_dma_masks(struct vmw_private *dev_priv)
526+
{
527+
struct drm_device *dev = dev_priv->dev;
528+
529+
if (intel_iommu_enabled &&
530+
(sizeof(unsigned long) == 4 || vmw_restrict_dma_mask)) {
531+
DRM_INFO("Restricting DMA addresses to 44 bits.\n");
532+
return dma_set_mask(dev->dev, DMA_BIT_MASK(44));
533+
}
534+
return 0;
535+
}
536+
#else
537+
static int vmw_dma_masks(struct vmw_private *dev_priv)
538+
{
539+
return 0;
540+
}
541+
#endif
542+
513543
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
514544
{
515545
struct vmw_private *dev_priv;
@@ -578,14 +608,9 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
578608

579609
vmw_get_initial_size(dev_priv);
580610

581-
if (dev_priv->capabilities & SVGA_CAP_GMR) {
582-
dev_priv->max_gmr_descriptors =
583-
vmw_read(dev_priv,
584-
SVGA_REG_GMR_MAX_DESCRIPTOR_LENGTH);
611+
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
585612
dev_priv->max_gmr_ids =
586613
vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS);
587-
}
588-
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
589614
dev_priv->max_gmr_pages =
590615
vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES);
591616
dev_priv->memory_size =
@@ -599,17 +624,17 @@ static int vmw_driver_load(struct drm_device *dev, unsigned long chipset)
599624
dev_priv->memory_size = 512*1024*1024;
600625
}
601626

627+
ret = vmw_dma_masks(dev_priv);
628+
if (unlikely(ret != 0))
629+
goto out_err0;
630+
602631
mutex_unlock(&dev_priv->hw_mutex);
603632

604633
vmw_print_capabilities(dev_priv->capabilities);
605634

606-
if (dev_priv->capabilities & SVGA_CAP_GMR) {
635+
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
607636
DRM_INFO("Max GMR ids is %u\n",
608637
(unsigned)dev_priv->max_gmr_ids);
609-
DRM_INFO("Max GMR descriptors is %u\n",
610-
(unsigned)dev_priv->max_gmr_descriptors);
611-
}
612-
if (dev_priv->capabilities & SVGA_CAP_GMR2) {
613638
DRM_INFO("Max number of GMR pages is %u\n",
614639
(unsigned)dev_priv->max_gmr_pages);
615640
DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n",

drivers/gpu/drm/vmwgfx/vmwgfx_drv.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -290,7 +290,6 @@ struct vmw_private {
290290
__le32 __iomem *mmio_virt;
291291
int mmio_mtrr;
292292
uint32_t capabilities;
293-
uint32_t max_gmr_descriptors;
294293
uint32_t max_gmr_ids;
295294
uint32_t max_gmr_pages;
296295
uint32_t memory_size;

drivers/gpu/drm/vmwgfx/vmwgfx_gmr.c

Lines changed: 3 additions & 157 deletions
Original file line numberDiff line numberDiff line change
@@ -125,181 +125,27 @@ static void vmw_gmr2_unbind(struct vmw_private *dev_priv,
125125
}
126126

127127

128-
static void vmw_gmr_free_descriptors(struct device *dev, dma_addr_t desc_dma,
129-
struct list_head *desc_pages)
130-
{
131-
struct page *page, *next;
132-
struct svga_guest_mem_descriptor *page_virtual;
133-
unsigned int desc_per_page = PAGE_SIZE /
134-
sizeof(struct svga_guest_mem_descriptor) - 1;
135-
136-
if (list_empty(desc_pages))
137-
return;
138-
139-
list_for_each_entry_safe(page, next, desc_pages, lru) {
140-
list_del_init(&page->lru);
141-
142-
if (likely(desc_dma != DMA_ADDR_INVALID)) {
143-
dma_unmap_page(dev, desc_dma, PAGE_SIZE,
144-
DMA_TO_DEVICE);
145-
}
146-
147-
page_virtual = kmap_atomic(page);
148-
desc_dma = (dma_addr_t)
149-
le32_to_cpu(page_virtual[desc_per_page].ppn) <<
150-
PAGE_SHIFT;
151-
kunmap_atomic(page_virtual);
152-
153-
__free_page(page);
154-
}
155-
}
156-
157-
/**
158-
* FIXME: Adjust to the ttm lowmem / highmem storage to minimize
159-
* the number of used descriptors.
160-
*
161-
*/
162-
163-
static int vmw_gmr_build_descriptors(struct device *dev,
164-
struct list_head *desc_pages,
165-
struct vmw_piter *iter,
166-
unsigned long num_pages,
167-
dma_addr_t *first_dma)
168-
{
169-
struct page *page;
170-
struct svga_guest_mem_descriptor *page_virtual = NULL;
171-
struct svga_guest_mem_descriptor *desc_virtual = NULL;
172-
unsigned int desc_per_page;
173-
unsigned long prev_pfn;
174-
unsigned long pfn;
175-
int ret;
176-
dma_addr_t desc_dma;
177-
178-
desc_per_page = PAGE_SIZE /
179-
sizeof(struct svga_guest_mem_descriptor) - 1;
180-
181-
while (likely(num_pages != 0)) {
182-
page = alloc_page(__GFP_HIGHMEM);
183-
if (unlikely(page == NULL)) {
184-
ret = -ENOMEM;
185-
goto out_err;
186-
}
187-
188-
list_add_tail(&page->lru, desc_pages);
189-
page_virtual = kmap_atomic(page);
190-
desc_virtual = page_virtual - 1;
191-
prev_pfn = ~(0UL);
192-
193-
while (likely(num_pages != 0)) {
194-
pfn = vmw_piter_dma_addr(iter) >> PAGE_SHIFT;
195-
196-
if (pfn != prev_pfn + 1) {
197-
198-
if (desc_virtual - page_virtual ==
199-
desc_per_page - 1)
200-
break;
201-
202-
(++desc_virtual)->ppn = cpu_to_le32(pfn);
203-
desc_virtual->num_pages = cpu_to_le32(1);
204-
} else {
205-
uint32_t tmp =
206-
le32_to_cpu(desc_virtual->num_pages);
207-
desc_virtual->num_pages = cpu_to_le32(tmp + 1);
208-
}
209-
prev_pfn = pfn;
210-
--num_pages;
211-
vmw_piter_next(iter);
212-
}
213-
214-
(++desc_virtual)->ppn = DMA_PAGE_INVALID;
215-
desc_virtual->num_pages = cpu_to_le32(0);
216-
kunmap_atomic(page_virtual);
217-
}
218-
219-
desc_dma = 0;
220-
list_for_each_entry_reverse(page, desc_pages, lru) {
221-
page_virtual = kmap_atomic(page);
222-
page_virtual[desc_per_page].ppn = cpu_to_le32
223-
(desc_dma >> PAGE_SHIFT);
224-
kunmap_atomic(page_virtual);
225-
desc_dma = dma_map_page(dev, page, 0, PAGE_SIZE,
226-
DMA_TO_DEVICE);
227-
228-
if (unlikely(dma_mapping_error(dev, desc_dma)))
229-
goto out_err;
230-
}
231-
*first_dma = desc_dma;
232-
233-
return 0;
234-
out_err:
235-
vmw_gmr_free_descriptors(dev, DMA_ADDR_INVALID, desc_pages);
236-
return ret;
237-
}
238-
239-
static void vmw_gmr_fire_descriptors(struct vmw_private *dev_priv,
240-
int gmr_id, dma_addr_t desc_dma)
241-
{
242-
mutex_lock(&dev_priv->hw_mutex);
243-
244-
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
245-
wmb();
246-
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, desc_dma >> PAGE_SHIFT);
247-
mb();
248-
249-
mutex_unlock(&dev_priv->hw_mutex);
250-
251-
}
252-
253128
int vmw_gmr_bind(struct vmw_private *dev_priv,
254129
const struct vmw_sg_table *vsgt,
255130
unsigned long num_pages,
256131
int gmr_id)
257132
{
258-
struct list_head desc_pages;
259-
dma_addr_t desc_dma = 0;
260-
struct device *dev = dev_priv->dev->dev;
261133
struct vmw_piter data_iter;
262-
int ret;
263134

264135
vmw_piter_start(&data_iter, vsgt, 0);
265136

266137
if (unlikely(!vmw_piter_next(&data_iter)))
267138
return 0;
268139

269-
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
270-
return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
271-
272-
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR)))
273-
return -EINVAL;
274-
275-
if (vsgt->num_regions > dev_priv->max_gmr_descriptors)
140+
if (unlikely(!(dev_priv->capabilities & SVGA_CAP_GMR2)))
276141
return -EINVAL;
277142

278-
INIT_LIST_HEAD(&desc_pages);
279-
280-
ret = vmw_gmr_build_descriptors(dev, &desc_pages, &data_iter,
281-
num_pages, &desc_dma);
282-
if (unlikely(ret != 0))
283-
return ret;
284-
285-
vmw_gmr_fire_descriptors(dev_priv, gmr_id, desc_dma);
286-
vmw_gmr_free_descriptors(dev, desc_dma, &desc_pages);
287-
288-
return 0;
143+
return vmw_gmr2_bind(dev_priv, &data_iter, num_pages, gmr_id);
289144
}
290145

291146

292147
void vmw_gmr_unbind(struct vmw_private *dev_priv, int gmr_id)
293148
{
294-
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2)) {
149+
if (likely(dev_priv->capabilities & SVGA_CAP_GMR2))
295150
vmw_gmr2_unbind(dev_priv, gmr_id);
296-
return;
297-
}
298-
299-
mutex_lock(&dev_priv->hw_mutex);
300-
vmw_write(dev_priv, SVGA_REG_GMR_ID, gmr_id);
301-
wmb();
302-
vmw_write(dev_priv, SVGA_REG_GMR_DESCRIPTOR, 0);
303-
mb();
304-
mutex_unlock(&dev_priv->hw_mutex);
305151
}

0 commit comments

Comments
 (0)