PageRenderTime 48ms CodeModel.GetById 25ms app.highlight 20ms RepoModel.GetById 0ms app.codeStats 0ms

/arch/powerpc/kvm/book3s_64_mmu_host.c

https://bitbucket.org/abioy/linux
C | 408 lines | 289 code | 81 blank | 38 comment | 51 complexity | e69566969073d40163c29d90e9effb57 MD5 | raw file
Possible License(s): CC-BY-SA-3.0, GPL-2.0, LGPL-2.0, AGPL-1.0
  1/*
  2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
  3 *
  4 * Authors:
  5 *     Alexander Graf <agraf@suse.de>
  6 *     Kevin Wolf <mail@kevin-wolf.de>
  7 *
  8 * This program is free software; you can redistribute it and/or modify
  9 * it under the terms of the GNU General Public License, version 2, as
 10 * published by the Free Software Foundation.
 11 *
 12 * This program is distributed in the hope that it will be useful,
 13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
 14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
 15 * GNU General Public License for more details.
 16 *
 17 * You should have received a copy of the GNU General Public License
 18 * along with this program; if not, write to the Free Software
 19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
 20 */
 21
 22#include <linux/kvm_host.h>
 23
 24#include <asm/kvm_ppc.h>
 25#include <asm/kvm_book3s.h>
 26#include <asm/mmu-hash64.h>
 27#include <asm/machdep.h>
 28#include <asm/mmu_context.h>
 29#include <asm/hw_irq.h>
 30
 31#define PTE_SIZE 12
 32#define VSID_ALL 0
 33
 34/* #define DEBUG_MMU */
 35/* #define DEBUG_SLB */
 36
 37#ifdef DEBUG_MMU
 38#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
 39#else
 40#define dprintk_mmu(a, ...) do { } while(0)
 41#endif
 42
 43#ifdef DEBUG_SLB
 44#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
 45#else
 46#define dprintk_slb(a, ...) do { } while(0)
 47#endif
 48
 49static void invalidate_pte(struct hpte_cache *pte)
 50{
 51	dprintk_mmu("KVM: Flushing SPT %d: 0x%llx (0x%llx) -> 0x%llx\n",
 52		    i, pte->pte.eaddr, pte->pte.vpage, pte->host_va);
 53
 54	ppc_md.hpte_invalidate(pte->slot, pte->host_va,
 55			       MMU_PAGE_4K, MMU_SEGSIZE_256M,
 56			       false);
 57	pte->host_va = 0;
 58	kvm_release_pfn_dirty(pte->pfn);
 59}
 60
 61void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, u64 guest_ea, u64 ea_mask)
 62{
 63	int i;
 64
 65	dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%llx & 0x%llx\n",
 66		    vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
 67	BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
 68
 69	guest_ea &= ea_mask;
 70	for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
 71		struct hpte_cache *pte;
 72
 73		pte = &vcpu->arch.hpte_cache[i];
 74		if (!pte->host_va)
 75			continue;
 76
 77		if ((pte->pte.eaddr & ea_mask) == guest_ea) {
 78			invalidate_pte(pte);
 79		}
 80	}
 81
 82	/* Doing a complete flush -> start from scratch */
 83	if (!ea_mask)
 84		vcpu->arch.hpte_cache_offset = 0;
 85}
 86
 87void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
 88{
 89	int i;
 90
 91	dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
 92		    vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
 93	BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
 94
 95	guest_vp &= vp_mask;
 96	for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
 97		struct hpte_cache *pte;
 98
 99		pte = &vcpu->arch.hpte_cache[i];
100		if (!pte->host_va)
101			continue;
102
103		if ((pte->pte.vpage & vp_mask) == guest_vp) {
104			invalidate_pte(pte);
105		}
106	}
107}
108
109void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, u64 pa_start, u64 pa_end)
110{
111	int i;
112
113	dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%llx & 0x%llx\n",
114		    vcpu->arch.hpte_cache_offset, guest_pa, pa_mask);
115	BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
116
117	for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
118		struct hpte_cache *pte;
119
120		pte = &vcpu->arch.hpte_cache[i];
121		if (!pte->host_va)
122			continue;
123
124		if ((pte->pte.raddr >= pa_start) &&
125		    (pte->pte.raddr < pa_end)) {
126			invalidate_pte(pte);
127		}
128	}
129}
130
131struct kvmppc_pte *kvmppc_mmu_find_pte(struct kvm_vcpu *vcpu, u64 ea, bool data)
132{
133	int i;
134	u64 guest_vp;
135
136	guest_vp = vcpu->arch.mmu.ea_to_vp(vcpu, ea, false);
137	for (i=0; i<vcpu->arch.hpte_cache_offset; i++) {
138		struct hpte_cache *pte;
139
140		pte = &vcpu->arch.hpte_cache[i];
141		if (!pte->host_va)
142			continue;
143
144		if (pte->pte.vpage == guest_vp)
145			return &pte->pte;
146	}
147
148	return NULL;
149}
150
151static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
152{
153	if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
154		kvmppc_mmu_pte_flush(vcpu, 0, 0);
155
156	return vcpu->arch.hpte_cache_offset++;
157}
158
159/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
160 * a hash, so we don't waste cycles on looping */
161static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
162{
163	return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^
164		     ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^
165		     ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^
166		     ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^
167		     ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^
168		     ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^
169		     ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^
170		     ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK));
171}
172
173
174static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
175{
176	struct kvmppc_sid_map *map;
177	u16 sid_map_mask;
178
179	if (vcpu->arch.msr & MSR_PR)
180		gvsid |= VSID_PR;
181
182	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
183	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
184	if (map->guest_vsid == gvsid) {
185		dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
186			    gvsid, map->host_vsid);
187		return map;
188	}
189
190	map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
191	if (map->guest_vsid == gvsid) {
192		dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
193			    gvsid, map->host_vsid);
194		return map;
195	}
196
197	dprintk_slb("SLB: Searching 0x%llx -> not found\n", gvsid);
198	return NULL;
199}
200
201int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
202{
203	pfn_t hpaddr;
204	ulong hash, hpteg, va;
205	u64 vsid;
206	int ret;
207	int rflags = 0x192;
208	int vflags = 0;
209	int attempt = 0;
210	struct kvmppc_sid_map *map;
211
212	/* Get host physical address for gpa */
213	hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
214	if (kvm_is_error_hva(hpaddr)) {
215		printk(KERN_INFO "Couldn't get guest page for gfn %llx!\n", orig_pte->eaddr);
216		return -EINVAL;
217	}
218	hpaddr <<= PAGE_SHIFT;
219#if PAGE_SHIFT == 12
220#elif PAGE_SHIFT == 16
221	hpaddr |= orig_pte->raddr & 0xf000;
222#else
223#error Unknown page size
224#endif
225
226	/* and write the mapping ea -> hpa into the pt */
227	vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
228	map = find_sid_vsid(vcpu, vsid);
229	if (!map) {
230		kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
231		map = find_sid_vsid(vcpu, vsid);
232	}
233	BUG_ON(!map);
234
235	vsid = map->host_vsid;
236	va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
237
238	if (!orig_pte->may_write)
239		rflags |= HPTE_R_PP;
240	else
241		mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
242
243	if (!orig_pte->may_execute)
244		rflags |= HPTE_R_N;
245
246	hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
247
248map_again:
249	hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
250
251	/* In case we tried normal mapping already, let's nuke old entries */
252	if (attempt > 1)
253		if (ppc_md.hpte_remove(hpteg) < 0)
254			return -1;
255
256	ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
257
258	if (ret < 0) {
259		/* If we couldn't map a primary PTE, try a secondary */
260#ifdef USE_SECONDARY
261		hash = ~hash;
262		attempt++;
263		if (attempt % 2)
264			vflags = HPTE_V_SECONDARY;
265		else
266			vflags = 0;
267#else
268		attempt = 2;
269#endif
270		goto map_again;
271	} else {
272		int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
273		struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
274
275		dprintk_mmu("KVM: %c%c Map 0x%llx: [%lx] 0x%lx (0x%llx) -> %lx\n",
276			    ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
277			    (rflags & HPTE_R_N) ? '-' : 'x',
278			    orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
279
280		pte->slot = hpteg + (ret & 7);
281		pte->host_va = va;
282		pte->pte = *orig_pte;
283		pte->pfn = hpaddr >> PAGE_SHIFT;
284	}
285
286	return 0;
287}
288
289static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
290{
291	struct kvmppc_sid_map *map;
292	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
293	u16 sid_map_mask;
294	static int backwards_map = 0;
295
296	if (vcpu->arch.msr & MSR_PR)
297		gvsid |= VSID_PR;
298
299	/* We might get collisions that trap in preceding order, so let's
300	   map them differently */
301
302	sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
303	if (backwards_map)
304		sid_map_mask = SID_MAP_MASK - sid_map_mask;
305
306	map = &to_book3s(vcpu)->sid_map[sid_map_mask];
307
308	/* Make sure we're taking the other map next time */
309	backwards_map = !backwards_map;
310
311	/* Uh-oh ... out of mappings. Let's flush! */
312	if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
313		vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
314		memset(vcpu_book3s->sid_map, 0,
315		       sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
316		kvmppc_mmu_pte_flush(vcpu, 0, 0);
317		kvmppc_mmu_flush_segments(vcpu);
318	}
319	map->host_vsid = vcpu_book3s->vsid_next++;
320
321	map->guest_vsid = gvsid;
322	map->valid = true;
323
324	return map;
325}
326
327static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
328{
329	int i;
330	int max_slb_size = 64;
331	int found_inval = -1;
332	int r;
333
334	if (!get_paca()->kvm_slb_max)
335		get_paca()->kvm_slb_max = 1;
336
337	/* Are we overwriting? */
338	for (i = 1; i < get_paca()->kvm_slb_max; i++) {
339		if (!(get_paca()->kvm_slb[i].esid & SLB_ESID_V))
340			found_inval = i;
341		else if ((get_paca()->kvm_slb[i].esid & ESID_MASK) == esid)
342			return i;
343	}
344
345	/* Found a spare entry that was invalidated before */
346	if (found_inval > 0)
347		return found_inval;
348
349	/* No spare invalid entry, so create one */
350
351	if (mmu_slb_size < 64)
352		max_slb_size = mmu_slb_size;
353
354	/* Overflowing -> purge */
355	if ((get_paca()->kvm_slb_max) == max_slb_size)
356		kvmppc_mmu_flush_segments(vcpu);
357
358	r = get_paca()->kvm_slb_max;
359	get_paca()->kvm_slb_max++;
360
361	return r;
362}
363
364int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
365{
366	u64 esid = eaddr >> SID_SHIFT;
367	u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
368	u64 slb_vsid = SLB_VSID_USER;
369	u64 gvsid;
370	int slb_index;
371	struct kvmppc_sid_map *map;
372
373	slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
374
375	if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
376		/* Invalidate an entry */
377		get_paca()->kvm_slb[slb_index].esid = 0;
378		return -ENOENT;
379	}
380
381	map = find_sid_vsid(vcpu, gvsid);
382	if (!map)
383		map = create_sid_map(vcpu, gvsid);
384
385	map->guest_esid = esid;
386
387	slb_vsid |= (map->host_vsid << 12);
388	slb_vsid &= ~SLB_VSID_KP;
389	slb_esid |= slb_index;
390
391	get_paca()->kvm_slb[slb_index].esid = slb_esid;
392	get_paca()->kvm_slb[slb_index].vsid = slb_vsid;
393
394	dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
395
396	return 0;
397}
398
399void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
400{
401	get_paca()->kvm_slb_max = 1;
402	get_paca()->kvm_slb[0].esid = 0;
403}
404
405void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
406{
407	kvmppc_mmu_pte_flush(vcpu, 0, 0);
408}