Skip to content

Commit d7b417f

Browse files
tlendackyKAGA-KOKO
authored andcommitted
x86/mm: Add DMA support for SEV memory encryption
DMA access to encrypted memory cannot be performed when SEV is active. In order for DMA to properly work when SEV is active, the SWIOTLB bounce buffers must be used. Signed-off-by: Tom Lendacky <thomas.lendacky@amd.com> Signed-off-by: Brijesh Singh <brijesh.singh@amd.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Reviewed-by: Borislav Petkov <bp@suse.de>C Tested-by: Borislav Petkov <bp@suse.de> Cc: kvm@vger.kernel.org Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Borislav Petkov <bp@alien8.de> Link: https://lkml.kernel.org/r/20171020143059.3291-12-brijesh.singh@amd.com
1 parent 0e4c12b commit d7b417f

File tree

2 files changed

+89
-2
lines changed

2 files changed

+89
-2
lines changed

arch/x86/mm/mem_encrypt.c

Lines changed: 86 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -192,6 +192,70 @@ void __init sme_early_init(void)
192192
/* Update the protection map with memory encryption mask */
193193
for (i = 0; i < ARRAY_SIZE(protection_map); i++)
194194
protection_map[i] = pgprot_encrypted(protection_map[i]);
195+
196+
if (sev_active())
197+
swiotlb_force = SWIOTLB_FORCE;
198+
}
199+
200+
static void *sev_alloc(struct device *dev, size_t size, dma_addr_t *dma_handle,
201+
gfp_t gfp, unsigned long attrs)
202+
{
203+
unsigned long dma_mask;
204+
unsigned int order;
205+
struct page *page;
206+
void *vaddr = NULL;
207+
208+
dma_mask = dma_alloc_coherent_mask(dev, gfp);
209+
order = get_order(size);
210+
211+
/*
212+
* Memory will be memset to zero after marking decrypted, so don't
213+
* bother clearing it before.
214+
*/
215+
gfp &= ~__GFP_ZERO;
216+
217+
page = alloc_pages_node(dev_to_node(dev), gfp, order);
218+
if (page) {
219+
dma_addr_t addr;
220+
221+
/*
222+
* Since we will be clearing the encryption bit, check the
223+
* mask with it already cleared.
224+
*/
225+
addr = __sme_clr(phys_to_dma(dev, page_to_phys(page)));
226+
if ((addr + size) > dma_mask) {
227+
__free_pages(page, get_order(size));
228+
} else {
229+
vaddr = page_address(page);
230+
*dma_handle = addr;
231+
}
232+
}
233+
234+
if (!vaddr)
235+
vaddr = swiotlb_alloc_coherent(dev, size, dma_handle, gfp);
236+
237+
if (!vaddr)
238+
return NULL;
239+
240+
/* Clear the SME encryption bit for DMA use if not swiotlb area */
241+
if (!is_swiotlb_buffer(dma_to_phys(dev, *dma_handle))) {
242+
set_memory_decrypted((unsigned long)vaddr, 1 << order);
243+
memset(vaddr, 0, PAGE_SIZE << order);
244+
*dma_handle = __sme_clr(*dma_handle);
245+
}
246+
247+
return vaddr;
248+
}
249+
250+
static void sev_free(struct device *dev, size_t size, void *vaddr,
251+
dma_addr_t dma_handle, unsigned long attrs)
252+
{
253+
/* Set the SME encryption bit for re-use if not swiotlb area */
254+
if (!is_swiotlb_buffer(dma_to_phys(dev, dma_handle)))
255+
set_memory_encrypted((unsigned long)vaddr,
256+
1 << get_order(size));
257+
258+
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
195259
}
196260

197261
/*
@@ -218,6 +282,20 @@ bool sev_active(void)
218282
}
219283
EXPORT_SYMBOL_GPL(sev_active);
220284

285+
static const struct dma_map_ops sev_dma_ops = {
286+
.alloc = sev_alloc,
287+
.free = sev_free,
288+
.map_page = swiotlb_map_page,
289+
.unmap_page = swiotlb_unmap_page,
290+
.map_sg = swiotlb_map_sg_attrs,
291+
.unmap_sg = swiotlb_unmap_sg_attrs,
292+
.sync_single_for_cpu = swiotlb_sync_single_for_cpu,
293+
.sync_single_for_device = swiotlb_sync_single_for_device,
294+
.sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
295+
.sync_sg_for_device = swiotlb_sync_sg_for_device,
296+
.mapping_error = swiotlb_dma_mapping_error,
297+
};
298+
221299
/* Architecture __weak replacement functions */
222300
void __init mem_encrypt_init(void)
223301
{
@@ -227,6 +305,14 @@ void __init mem_encrypt_init(void)
227305
/* Call into SWIOTLB to update the SWIOTLB DMA buffers */
228306
swiotlb_update_mem_attributes();
229307

308+
/*
309+
* With SEV, DMA operations cannot use encryption. New DMA ops
310+
* are required in order to mark the DMA areas as decrypted or
311+
* to use bounce buffers.
312+
*/
313+
if (sev_active())
314+
dma_ops = &sev_dma_ops;
315+
230316
pr_info("AMD Secure Memory Encryption (SME) active\n");
231317
}
232318

lib/swiotlb.c

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -507,8 +507,9 @@ phys_addr_t swiotlb_tbl_map_single(struct device *hwdev,
507507
if (no_iotlb_memory)
508508
panic("Can not allocate SWIOTLB buffer earlier and can't now provide you with the DMA bounce buffer");
509509

510-
if (sme_active())
511-
pr_warn_once("SME is active and system is using DMA bounce buffers\n");
510+
if (mem_encrypt_active())
511+
pr_warn_once("%s is active and system is using DMA bounce buffers\n",
512+
sme_active() ? "SME" : "SEV");
512513

513514
mask = dma_get_seg_boundary(hwdev);
514515

0 commit comments

Comments
 (0)