|
40 | 40 |
|
41 | 41 | #include "trace_hv.h"
|
42 | 42 |
|
43 |
| -/* Power architecture requires HPT is at least 256kB */ |
44 |
| -#define PPC_MIN_HPT_ORDER 18 |
45 |
| - |
46 | 43 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
|
47 | 44 | long pte_index, unsigned long pteh,
|
48 | 45 | unsigned long ptel, unsigned long *pte_idx_ret);
|
49 | 46 | static void kvmppc_rmap_reset(struct kvm *kvm);
|
50 | 47 |
|
51 |
| -long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) |
| 48 | +int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) |
52 | 49 | {
|
53 | 50 | unsigned long hpt = 0;
|
54 |
| - struct revmap_entry *rev; |
| 51 | + int cma = 0; |
55 | 52 | struct page *page = NULL;
|
56 |
| - long order = KVM_DEFAULT_HPT_ORDER; |
| 53 | + struct revmap_entry *rev; |
| 54 | + unsigned long npte; |
57 | 55 |
|
58 |
| - if (htab_orderp) { |
59 |
| - order = *htab_orderp; |
60 |
| - if (order < PPC_MIN_HPT_ORDER) |
61 |
| - order = PPC_MIN_HPT_ORDER; |
62 |
| - } |
| 56 | + if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER)) |
| 57 | + return -EINVAL; |
63 | 58 |
|
64 |
| - kvm->arch.hpt.cma = 0; |
65 | 59 | page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT));
|
66 | 60 | if (page) {
|
67 | 61 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
|
68 | 62 | memset((void *)hpt, 0, (1ul << order));
|
69 |
| - kvm->arch.hpt.cma = 1; |
| 63 | + cma = 1; |
70 | 64 | }
|
71 | 65 |
|
72 |
| - /* Lastly try successively smaller sizes from the page allocator */ |
73 |
| - /* Only do this if userspace didn't specify a size via ioctl */ |
74 |
| - while (!hpt && order > PPC_MIN_HPT_ORDER && !htab_orderp) { |
75 |
| - hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| |
76 |
| - __GFP_NOWARN, order - PAGE_SHIFT); |
77 |
| - if (!hpt) |
78 |
| - --order; |
79 |
| - } |
| 66 | + if (!hpt) |
| 67 | + hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT |
| 68 | + |__GFP_NOWARN, order - PAGE_SHIFT); |
80 | 69 |
|
81 | 70 | if (!hpt)
|
82 | 71 | return -ENOMEM;
|
83 | 72 |
|
84 |
| - kvm->arch.hpt.virt = hpt; |
85 |
| - kvm->arch.hpt.order = order; |
86 |
| - |
87 |
| - atomic64_set(&kvm->arch.mmio_update, 0); |
| 73 | + /* HPTEs are 2**4 bytes long */ |
| 74 | + npte = 1ul << (order - 4); |
88 | 75 |
|
89 | 76 | /* Allocate reverse map array */
|
90 |
| - rev = vmalloc(sizeof(struct revmap_entry) * kvmppc_hpt_npte(&kvm->arch.hpt)); |
| 77 | + rev = vmalloc(sizeof(struct revmap_entry) * npte); |
91 | 78 | if (!rev) {
|
92 |
| - pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); |
93 |
| - goto out_freehpt; |
| 79 | + pr_err("kvmppc_allocate_hpt: Couldn't alloc reverse map array\n"); |
| 80 | + if (cma) |
| 81 | + kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); |
| 82 | + else |
| 83 | + free_pages(hpt, order - PAGE_SHIFT); |
| 84 | + return -ENOMEM; |
94 | 85 | }
|
95 |
| - kvm->arch.hpt.rev = rev; |
96 |
| - kvm->arch.sdr1 = __pa(hpt) | (order - 18); |
97 | 86 |
|
98 |
| - pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", |
99 |
| - hpt, order, kvm->arch.lpid); |
| 87 | + info->order = order; |
| 88 | + info->virt = hpt; |
| 89 | + info->cma = cma; |
| 90 | + info->rev = rev; |
100 | 91 |
|
101 |
| - if (htab_orderp) |
102 |
| - *htab_orderp = order; |
103 | 92 | return 0;
|
| 93 | +} |
104 | 94 |
|
105 |
| - out_freehpt: |
106 |
| - if (kvm->arch.hpt.cma) |
107 |
| - kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); |
108 |
| - else |
109 |
| - free_pages(hpt, order - PAGE_SHIFT); |
110 |
| - return -ENOMEM; |
| 95 | +void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) |
| 96 | +{ |
| 97 | + atomic64_set(&kvm->arch.mmio_update, 0); |
| 98 | + kvm->arch.hpt = *info; |
| 99 | + kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); |
| 100 | + |
| 101 | + pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", |
| 102 | + info->virt, (long)info->order, kvm->arch.lpid); |
111 | 103 | }
|
112 | 104 |
|
113 | 105 | long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
|
@@ -141,23 +133,28 @@ long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
|
141 | 133 | *htab_orderp = order;
|
142 | 134 | err = 0;
|
143 | 135 | } else {
|
144 |
| - err = kvmppc_alloc_hpt(kvm, htab_orderp); |
145 |
| - order = *htab_orderp; |
| 136 | + struct kvm_hpt_info info; |
| 137 | + |
| 138 | + err = kvmppc_allocate_hpt(&info, *htab_orderp); |
| 139 | + if (err < 0) |
| 140 | + goto out; |
| 141 | + kvmppc_set_hpt(kvm, &info); |
146 | 142 | }
|
147 | 143 | out:
|
148 | 144 | mutex_unlock(&kvm->lock);
|
149 | 145 | return err;
|
150 | 146 | }
|
151 | 147 |
|
152 |
| -void kvmppc_free_hpt(struct kvm *kvm) |
| 148 | +void kvmppc_free_hpt(struct kvm_hpt_info *info) |
153 | 149 | {
|
154 |
| - vfree(kvm->arch.hpt.rev); |
155 |
| - if (kvm->arch.hpt.cma) |
156 |
| - kvm_free_hpt_cma(virt_to_page(kvm->arch.hpt.virt), |
157 |
| - 1 << (kvm->arch.hpt.order - PAGE_SHIFT)); |
158 |
| - else if (kvm->arch.hpt.virt) |
159 |
| - free_pages(kvm->arch.hpt.virt, |
160 |
| - kvm->arch.hpt.order - PAGE_SHIFT); |
| 150 | + vfree(info->rev); |
| 151 | + if (info->cma) |
| 152 | + kvm_free_hpt_cma(virt_to_page(info->virt), |
| 153 | + 1 << (info->order - PAGE_SHIFT)); |
| 154 | + else if (info->virt) |
| 155 | + free_pages(info->virt, info->order - PAGE_SHIFT); |
| 156 | + info->virt = 0; |
| 157 | + info->order = 0; |
161 | 158 | }
|
162 | 159 |
|
163 | 160 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
|
|
0 commit comments