diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /arch/mips/kvm | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'arch/mips/kvm')
-rw-r--r-- | arch/mips/kvm/Kconfig | 77 | ||||
-rw-r--r-- | arch/mips/kvm/Makefile | 27 | ||||
-rw-r--r-- | arch/mips/kvm/callback.c | 14 | ||||
-rw-r--r-- | arch/mips/kvm/commpage.c | 32 | ||||
-rw-r--r-- | arch/mips/kvm/commpage.h | 24 | ||||
-rw-r--r-- | arch/mips/kvm/dyntrans.c | 143 | ||||
-rw-r--r-- | arch/mips/kvm/emulate.c | 3292 | ||||
-rw-r--r-- | arch/mips/kvm/entry.c | 955 | ||||
-rw-r--r-- | arch/mips/kvm/fpu.S | 125 | ||||
-rw-r--r-- | arch/mips/kvm/hypcall.c | 53 | ||||
-rw-r--r-- | arch/mips/kvm/interrupt.c | 175 | ||||
-rw-r--r-- | arch/mips/kvm/interrupt.h | 59 | ||||
-rw-r--r-- | arch/mips/kvm/loongson_ipi.c | 214 | ||||
-rw-r--r-- | arch/mips/kvm/mips.c | 1701 | ||||
-rw-r--r-- | arch/mips/kvm/mmu.c | 1236 | ||||
-rw-r--r-- | arch/mips/kvm/msa.S | 161 | ||||
-rw-r--r-- | arch/mips/kvm/stats.c | 63 | ||||
-rw-r--r-- | arch/mips/kvm/tlb.c | 700 | ||||
-rw-r--r-- | arch/mips/kvm/trace.h | 346 | ||||
-rw-r--r-- | arch/mips/kvm/trap_emul.c | 1306 | ||||
-rw-r--r-- | arch/mips/kvm/vz.c | 3331 |
21 files changed, 14034 insertions, 0 deletions
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig new file mode 100644 index 000000000..032b3fca6 --- /dev/null +++ b/arch/mips/kvm/Kconfig | |||
@@ -0,0 +1,77 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | # | ||
3 | # KVM configuration | ||
4 | # | ||
5 | source "virt/kvm/Kconfig" | ||
6 | |||
7 | menuconfig VIRTUALIZATION | ||
8 | bool "Virtualization" | ||
9 | help | ||
10 | Say Y here to get to see options for using your Linux host to run | ||
11 | other operating systems inside virtual machines (guests). | ||
12 | This option alone does not add any kernel code. | ||
13 | |||
14 | If you say N, all options in this submenu will be skipped and disabled. | ||
15 | |||
16 | if VIRTUALIZATION | ||
17 | |||
18 | config KVM | ||
19 | tristate "Kernel-based Virtual Machine (KVM) support" | ||
20 | depends on HAVE_KVM | ||
21 | depends on MIPS_FP_SUPPORT | ||
22 | select EXPORT_UASM | ||
23 | select PREEMPT_NOTIFIERS | ||
24 | select KVM_GENERIC_DIRTYLOG_READ_PROTECT | ||
25 | select HAVE_KVM_EVENTFD | ||
26 | select HAVE_KVM_VCPU_ASYNC_IOCTL | ||
27 | select KVM_MMIO | ||
28 | select MMU_NOTIFIER | ||
29 | select SRCU | ||
30 | help | ||
31 | Support for hosting Guest kernels. | ||
32 | |||
33 | choice | ||
34 | prompt "Virtualization mode" | ||
35 | depends on KVM | ||
36 | default KVM_MIPS_TE | ||
37 | |||
38 | config KVM_MIPS_TE | ||
39 | bool "Trap & Emulate" | ||
40 | depends on CPU_MIPS32_R2 | ||
41 | help | ||
42 | Use trap and emulate to virtualize 32-bit guests in user mode. This | ||
43 | does not require any special hardware Virtualization support beyond | ||
44 | standard MIPS32 r2 or later, but it does require the guest kernel | ||
45 | to be configured with CONFIG_KVM_GUEST=y so that it resides in the | ||
46 | user address segment. | ||
47 | |||
48 | config KVM_MIPS_VZ | ||
49 | bool "MIPS Virtualization (VZ) ASE" | ||
50 | help | ||
51 | Use the MIPS Virtualization (VZ) ASE to virtualize guests. This | ||
52 | supports running unmodified guest kernels (with CONFIG_KVM_GUEST=n), | ||
53 | but requires hardware support. | ||
54 | |||
55 | endchoice | ||
56 | |||
57 | config KVM_MIPS_DYN_TRANS | ||
58 | bool "KVM/MIPS: Dynamic binary translation to reduce traps" | ||
59 | depends on KVM_MIPS_TE | ||
60 | default y | ||
61 | help | ||
62 | When running in Trap & Emulate mode patch privileged | ||
63 | instructions to reduce the number of traps. | ||
64 | |||
65 | If unsure, say Y. | ||
66 | |||
67 | config KVM_MIPS_DEBUG_COP0_COUNTERS | ||
68 | bool "Maintain counters for COP0 accesses" | ||
69 | depends on KVM | ||
70 | help | ||
71 | Maintain statistics for Guest COP0 accesses. | ||
72 | A histogram of COP0 accesses is printed when the VM is | ||
73 | shutdown. | ||
74 | |||
75 | If unsure, say N. | ||
76 | |||
77 | endif # VIRTUALIZATION | ||
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile new file mode 100644 index 000000000..506c4ac0b --- /dev/null +++ b/arch/mips/kvm/Makefile | |||
@@ -0,0 +1,27 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | # Makefile for KVM support for MIPS | ||
3 | # | ||
4 | |||
5 | common-objs-y = $(addprefix ../../../virt/kvm/, kvm_main.o coalesced_mmio.o eventfd.o) | ||
6 | |||
7 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm | ||
8 | |||
9 | common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o | ||
10 | |||
11 | kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \ | ||
12 | interrupt.o stats.o commpage.o \ | ||
13 | fpu.o | ||
14 | kvm-objs += hypcall.o | ||
15 | kvm-objs += mmu.o | ||
16 | ifdef CONFIG_CPU_LOONGSON64 | ||
17 | kvm-objs += loongson_ipi.o | ||
18 | endif | ||
19 | |||
20 | ifdef CONFIG_KVM_MIPS_VZ | ||
21 | kvm-objs += vz.o | ||
22 | else | ||
23 | kvm-objs += dyntrans.o | ||
24 | kvm-objs += trap_emul.o | ||
25 | endif | ||
26 | obj-$(CONFIG_KVM) += kvm.o | ||
27 | obj-y += callback.o tlb.o | ||
diff --git a/arch/mips/kvm/callback.c b/arch/mips/kvm/callback.c new file mode 100644 index 000000000..d88aa2173 --- /dev/null +++ b/arch/mips/kvm/callback.c | |||
@@ -0,0 +1,14 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Authors: Yann Le Du <ledu@kymasys.com> | ||
8 | */ | ||
9 | |||
10 | #include <linux/export.h> | ||
11 | #include <linux/kvm_host.h> | ||
12 | |||
13 | struct kvm_mips_callbacks *kvm_mips_callbacks; | ||
14 | EXPORT_SYMBOL_GPL(kvm_mips_callbacks); | ||
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c new file mode 100644 index 000000000..5812e6145 --- /dev/null +++ b/arch/mips/kvm/commpage.c | |||
@@ -0,0 +1,32 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * commpage, currently used for Virtual COP0 registers. | ||
7 | * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR. | ||
8 | * | ||
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/errno.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/vmalloc.h> | ||
16 | #include <linux/fs.h> | ||
17 | #include <linux/memblock.h> | ||
18 | #include <asm/page.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | #include <asm/mmu_context.h> | ||
21 | |||
22 | #include <linux/kvm_host.h> | ||
23 | |||
24 | #include "commpage.h" | ||
25 | |||
26 | void kvm_mips_commpage_init(struct kvm_vcpu *vcpu) | ||
27 | { | ||
28 | struct kvm_mips_commpage *page = vcpu->arch.kseg0_commpage; | ||
29 | |||
30 | /* Specific init values for fields */ | ||
31 | vcpu->arch.cop0 = &page->cop0; | ||
32 | } | ||
diff --git a/arch/mips/kvm/commpage.h b/arch/mips/kvm/commpage.h new file mode 100644 index 000000000..08c5fa2bb --- /dev/null +++ b/arch/mips/kvm/commpage.h | |||
@@ -0,0 +1,24 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: commpage: mapped into get kernel space | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #ifndef __KVM_MIPS_COMMPAGE_H__ | ||
13 | #define __KVM_MIPS_COMMPAGE_H__ | ||
14 | |||
15 | struct kvm_mips_commpage { | ||
16 | /* COP0 state is mapped into Guest kernel via commpage */ | ||
17 | struct mips_coproc cop0; | ||
18 | }; | ||
19 | |||
20 | #define KVM_MIPS_COMM_EIDI_OFFSET 0x0 | ||
21 | |||
22 | extern void kvm_mips_commpage_init(struct kvm_vcpu *vcpu); | ||
23 | |||
24 | #endif /* __KVM_MIPS_COMMPAGE_H__ */ | ||
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c new file mode 100644 index 000000000..d77b61b3d --- /dev/null +++ b/arch/mips/kvm/dyntrans.c | |||
@@ -0,0 +1,143 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: Binary Patching for privileged instructions, reduces traps. | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/highmem.h> | ||
15 | #include <linux/kvm_host.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/memblock.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | |||
22 | #include "commpage.h" | ||
23 | |||
24 | /** | ||
25 | * kvm_mips_trans_replace() - Replace trapping instruction in guest memory. | ||
26 | * @vcpu: Virtual CPU. | ||
27 | * @opc: PC of instruction to replace. | ||
28 | * @replace: Instruction to write | ||
29 | */ | ||
30 | static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc, | ||
31 | union mips_instruction replace) | ||
32 | { | ||
33 | unsigned long vaddr = (unsigned long)opc; | ||
34 | int err; | ||
35 | |||
36 | retry: | ||
37 | /* The GVA page table is still active so use the Linux TLB handlers */ | ||
38 | kvm_trap_emul_gva_lockless_begin(vcpu); | ||
39 | err = put_user(replace.word, opc); | ||
40 | kvm_trap_emul_gva_lockless_end(vcpu); | ||
41 | |||
42 | if (unlikely(err)) { | ||
43 | /* | ||
44 | * We write protect clean pages in GVA page table so normal | ||
45 | * Linux TLB mod handler doesn't silently dirty the page. | ||
46 | * Its also possible we raced with a GVA invalidation. | ||
47 | * Try to force the page to become dirty. | ||
48 | */ | ||
49 | err = kvm_trap_emul_gva_fault(vcpu, vaddr, true); | ||
50 | if (unlikely(err)) { | ||
51 | kvm_info("%s: Address unwriteable: %p\n", | ||
52 | __func__, opc); | ||
53 | return -EFAULT; | ||
54 | } | ||
55 | |||
56 | /* | ||
57 | * Try again. This will likely trigger a TLB refill, which will | ||
58 | * fetch the new dirty entry from the GVA page table, which | ||
59 | * should then succeed. | ||
60 | */ | ||
61 | goto retry; | ||
62 | } | ||
63 | __local_flush_icache_user_range(vaddr, vaddr + 4); | ||
64 | |||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc, | ||
69 | struct kvm_vcpu *vcpu) | ||
70 | { | ||
71 | union mips_instruction nop_inst = { 0 }; | ||
72 | |||
73 | /* Replace the CACHE instruction, with a NOP */ | ||
74 | return kvm_mips_trans_replace(vcpu, opc, nop_inst); | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Address based CACHE instructions are transformed into synci(s). A little | ||
79 | * heavy for just D-cache invalidates, but avoids an expensive trap | ||
80 | */ | ||
81 | int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc, | ||
82 | struct kvm_vcpu *vcpu) | ||
83 | { | ||
84 | union mips_instruction synci_inst = { 0 }; | ||
85 | |||
86 | synci_inst.i_format.opcode = bcond_op; | ||
87 | synci_inst.i_format.rs = inst.i_format.rs; | ||
88 | synci_inst.i_format.rt = synci_op; | ||
89 | if (cpu_has_mips_r6) | ||
90 | synci_inst.i_format.simmediate = inst.spec3_format.simmediate; | ||
91 | else | ||
92 | synci_inst.i_format.simmediate = inst.i_format.simmediate; | ||
93 | |||
94 | return kvm_mips_trans_replace(vcpu, opc, synci_inst); | ||
95 | } | ||
96 | |||
97 | int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc, | ||
98 | struct kvm_vcpu *vcpu) | ||
99 | { | ||
100 | union mips_instruction mfc0_inst = { 0 }; | ||
101 | u32 rd, sel; | ||
102 | |||
103 | rd = inst.c0r_format.rd; | ||
104 | sel = inst.c0r_format.sel; | ||
105 | |||
106 | if (rd == MIPS_CP0_ERRCTL && sel == 0) { | ||
107 | mfc0_inst.r_format.opcode = spec_op; | ||
108 | mfc0_inst.r_format.rd = inst.c0r_format.rt; | ||
109 | mfc0_inst.r_format.func = add_op; | ||
110 | } else { | ||
111 | mfc0_inst.i_format.opcode = lw_op; | ||
112 | mfc0_inst.i_format.rt = inst.c0r_format.rt; | ||
113 | mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | | ||
114 | offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); | ||
115 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
116 | if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) | ||
117 | mfc0_inst.i_format.simmediate |= 4; | ||
118 | #endif | ||
119 | } | ||
120 | |||
121 | return kvm_mips_trans_replace(vcpu, opc, mfc0_inst); | ||
122 | } | ||
123 | |||
124 | int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc, | ||
125 | struct kvm_vcpu *vcpu) | ||
126 | { | ||
127 | union mips_instruction mtc0_inst = { 0 }; | ||
128 | u32 rd, sel; | ||
129 | |||
130 | rd = inst.c0r_format.rd; | ||
131 | sel = inst.c0r_format.sel; | ||
132 | |||
133 | mtc0_inst.i_format.opcode = sw_op; | ||
134 | mtc0_inst.i_format.rt = inst.c0r_format.rt; | ||
135 | mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR | | ||
136 | offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]); | ||
137 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
138 | if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8) | ||
139 | mtc0_inst.i_format.simmediate |= 4; | ||
140 | #endif | ||
141 | |||
142 | return kvm_mips_trans_replace(vcpu, opc, mtc0_inst); | ||
143 | } | ||
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c new file mode 100644 index 000000000..d70c4f8e1 --- /dev/null +++ b/arch/mips/kvm/emulate.c | |||
@@ -0,0 +1,3292 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: Instruction/Exception emulation | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/ktime.h> | ||
15 | #include <linux/kvm_host.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/memblock.h> | ||
19 | #include <linux/random.h> | ||
20 | #include <asm/page.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | #include <asm/cacheops.h> | ||
23 | #include <asm/cpu-info.h> | ||
24 | #include <asm/mmu_context.h> | ||
25 | #include <asm/tlbflush.h> | ||
26 | #include <asm/inst.h> | ||
27 | |||
28 | #undef CONFIG_MIPS_MT | ||
29 | #include <asm/r4kcache.h> | ||
30 | #define CONFIG_MIPS_MT | ||
31 | |||
32 | #include "interrupt.h" | ||
33 | #include "commpage.h" | ||
34 | |||
35 | #include "trace.h" | ||
36 | |||
37 | /* | ||
38 | * Compute the return address and do emulate branch simulation, if required. | ||
39 | * This function should be called only in branch delay slot active. | ||
40 | */ | ||
41 | static int kvm_compute_return_epc(struct kvm_vcpu *vcpu, unsigned long instpc, | ||
42 | unsigned long *out) | ||
43 | { | ||
44 | unsigned int dspcontrol; | ||
45 | union mips_instruction insn; | ||
46 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
47 | long epc = instpc; | ||
48 | long nextpc; | ||
49 | int err; | ||
50 | |||
51 | if (epc & 3) { | ||
52 | kvm_err("%s: unaligned epc\n", __func__); | ||
53 | return -EINVAL; | ||
54 | } | ||
55 | |||
56 | /* Read the instruction */ | ||
57 | err = kvm_get_badinstrp((u32 *)epc, vcpu, &insn.word); | ||
58 | if (err) | ||
59 | return err; | ||
60 | |||
61 | switch (insn.i_format.opcode) { | ||
62 | /* jr and jalr are in r_format format. */ | ||
63 | case spec_op: | ||
64 | switch (insn.r_format.func) { | ||
65 | case jalr_op: | ||
66 | arch->gprs[insn.r_format.rd] = epc + 8; | ||
67 | fallthrough; | ||
68 | case jr_op: | ||
69 | nextpc = arch->gprs[insn.r_format.rs]; | ||
70 | break; | ||
71 | default: | ||
72 | return -EINVAL; | ||
73 | } | ||
74 | break; | ||
75 | |||
76 | /* | ||
77 | * This group contains: | ||
78 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | ||
79 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | ||
80 | */ | ||
81 | case bcond_op: | ||
82 | switch (insn.i_format.rt) { | ||
83 | case bltz_op: | ||
84 | case bltzl_op: | ||
85 | if ((long)arch->gprs[insn.i_format.rs] < 0) | ||
86 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
87 | else | ||
88 | epc += 8; | ||
89 | nextpc = epc; | ||
90 | break; | ||
91 | |||
92 | case bgez_op: | ||
93 | case bgezl_op: | ||
94 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | ||
95 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
96 | else | ||
97 | epc += 8; | ||
98 | nextpc = epc; | ||
99 | break; | ||
100 | |||
101 | case bltzal_op: | ||
102 | case bltzall_op: | ||
103 | arch->gprs[31] = epc + 8; | ||
104 | if ((long)arch->gprs[insn.i_format.rs] < 0) | ||
105 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
106 | else | ||
107 | epc += 8; | ||
108 | nextpc = epc; | ||
109 | break; | ||
110 | |||
111 | case bgezal_op: | ||
112 | case bgezall_op: | ||
113 | arch->gprs[31] = epc + 8; | ||
114 | if ((long)arch->gprs[insn.i_format.rs] >= 0) | ||
115 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
116 | else | ||
117 | epc += 8; | ||
118 | nextpc = epc; | ||
119 | break; | ||
120 | case bposge32_op: | ||
121 | if (!cpu_has_dsp) { | ||
122 | kvm_err("%s: DSP branch but not DSP ASE\n", | ||
123 | __func__); | ||
124 | return -EINVAL; | ||
125 | } | ||
126 | |||
127 | dspcontrol = rddsp(0x01); | ||
128 | |||
129 | if (dspcontrol >= 32) | ||
130 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
131 | else | ||
132 | epc += 8; | ||
133 | nextpc = epc; | ||
134 | break; | ||
135 | default: | ||
136 | return -EINVAL; | ||
137 | } | ||
138 | break; | ||
139 | |||
140 | /* These are unconditional and in j_format. */ | ||
141 | case jal_op: | ||
142 | arch->gprs[31] = instpc + 8; | ||
143 | fallthrough; | ||
144 | case j_op: | ||
145 | epc += 4; | ||
146 | epc >>= 28; | ||
147 | epc <<= 28; | ||
148 | epc |= (insn.j_format.target << 2); | ||
149 | nextpc = epc; | ||
150 | break; | ||
151 | |||
152 | /* These are conditional and in i_format. */ | ||
153 | case beq_op: | ||
154 | case beql_op: | ||
155 | if (arch->gprs[insn.i_format.rs] == | ||
156 | arch->gprs[insn.i_format.rt]) | ||
157 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
158 | else | ||
159 | epc += 8; | ||
160 | nextpc = epc; | ||
161 | break; | ||
162 | |||
163 | case bne_op: | ||
164 | case bnel_op: | ||
165 | if (arch->gprs[insn.i_format.rs] != | ||
166 | arch->gprs[insn.i_format.rt]) | ||
167 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
168 | else | ||
169 | epc += 8; | ||
170 | nextpc = epc; | ||
171 | break; | ||
172 | |||
173 | case blez_op: /* POP06 */ | ||
174 | #ifndef CONFIG_CPU_MIPSR6 | ||
175 | case blezl_op: /* removed in R6 */ | ||
176 | #endif | ||
177 | if (insn.i_format.rt != 0) | ||
178 | goto compact_branch; | ||
179 | if ((long)arch->gprs[insn.i_format.rs] <= 0) | ||
180 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
181 | else | ||
182 | epc += 8; | ||
183 | nextpc = epc; | ||
184 | break; | ||
185 | |||
186 | case bgtz_op: /* POP07 */ | ||
187 | #ifndef CONFIG_CPU_MIPSR6 | ||
188 | case bgtzl_op: /* removed in R6 */ | ||
189 | #endif | ||
190 | if (insn.i_format.rt != 0) | ||
191 | goto compact_branch; | ||
192 | if ((long)arch->gprs[insn.i_format.rs] > 0) | ||
193 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
194 | else | ||
195 | epc += 8; | ||
196 | nextpc = epc; | ||
197 | break; | ||
198 | |||
199 | /* And now the FPA/cp1 branch instructions. */ | ||
200 | case cop1_op: | ||
201 | kvm_err("%s: unsupported cop1_op\n", __func__); | ||
202 | return -EINVAL; | ||
203 | |||
204 | #ifdef CONFIG_CPU_MIPSR6 | ||
205 | /* R6 added the following compact branches with forbidden slots */ | ||
206 | case blezl_op: /* POP26 */ | ||
207 | case bgtzl_op: /* POP27 */ | ||
208 | /* only rt == 0 isn't compact branch */ | ||
209 | if (insn.i_format.rt != 0) | ||
210 | goto compact_branch; | ||
211 | return -EINVAL; | ||
212 | case pop10_op: | ||
213 | case pop30_op: | ||
214 | /* only rs == rt == 0 is reserved, rest are compact branches */ | ||
215 | if (insn.i_format.rs != 0 || insn.i_format.rt != 0) | ||
216 | goto compact_branch; | ||
217 | return -EINVAL; | ||
218 | case pop66_op: | ||
219 | case pop76_op: | ||
220 | /* only rs == 0 isn't compact branch */ | ||
221 | if (insn.i_format.rs != 0) | ||
222 | goto compact_branch; | ||
223 | return -EINVAL; | ||
224 | compact_branch: | ||
225 | /* | ||
226 | * If we've hit an exception on the forbidden slot, then | ||
227 | * the branch must not have been taken. | ||
228 | */ | ||
229 | epc += 8; | ||
230 | nextpc = epc; | ||
231 | break; | ||
232 | #else | ||
233 | compact_branch: | ||
234 | /* Fall through - Compact branches not supported before R6 */ | ||
235 | #endif | ||
236 | default: | ||
237 | return -EINVAL; | ||
238 | } | ||
239 | |||
240 | *out = nextpc; | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause) | ||
245 | { | ||
246 | int err; | ||
247 | |||
248 | if (cause & CAUSEF_BD) { | ||
249 | err = kvm_compute_return_epc(vcpu, vcpu->arch.pc, | ||
250 | &vcpu->arch.pc); | ||
251 | if (err) | ||
252 | return EMULATE_FAIL; | ||
253 | } else { | ||
254 | vcpu->arch.pc += 4; | ||
255 | } | ||
256 | |||
257 | kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc); | ||
258 | |||
259 | return EMULATE_DONE; | ||
260 | } | ||
261 | |||
262 | /** | ||
263 | * kvm_get_badinstr() - Get bad instruction encoding. | ||
264 | * @opc: Guest pointer to faulting instruction. | ||
265 | * @vcpu: KVM VCPU information. | ||
266 | * | ||
267 | * Gets the instruction encoding of the faulting instruction, using the saved | ||
268 | * BadInstr register value if it exists, otherwise falling back to reading guest | ||
269 | * memory at @opc. | ||
270 | * | ||
271 | * Returns: The instruction encoding of the faulting instruction. | ||
272 | */ | ||
273 | int kvm_get_badinstr(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | ||
274 | { | ||
275 | if (cpu_has_badinstr) { | ||
276 | *out = vcpu->arch.host_cp0_badinstr; | ||
277 | return 0; | ||
278 | } else { | ||
279 | return kvm_get_inst(opc, vcpu, out); | ||
280 | } | ||
281 | } | ||
282 | |||
283 | /** | ||
284 | * kvm_get_badinstrp() - Get bad prior instruction encoding. | ||
285 | * @opc: Guest pointer to prior faulting instruction. | ||
286 | * @vcpu: KVM VCPU information. | ||
287 | * | ||
288 | * Gets the instruction encoding of the prior faulting instruction (the branch | ||
289 | * containing the delay slot which faulted), using the saved BadInstrP register | ||
290 | * value if it exists, otherwise falling back to reading guest memory at @opc. | ||
291 | * | ||
292 | * Returns: The instruction encoding of the prior faulting instruction. | ||
293 | */ | ||
294 | int kvm_get_badinstrp(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | ||
295 | { | ||
296 | if (cpu_has_badinstrp) { | ||
297 | *out = vcpu->arch.host_cp0_badinstrp; | ||
298 | return 0; | ||
299 | } else { | ||
300 | return kvm_get_inst(opc, vcpu, out); | ||
301 | } | ||
302 | } | ||
303 | |||
304 | /** | ||
305 | * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. | ||
306 | * @vcpu: Virtual CPU. | ||
307 | * | ||
308 | * Returns: 1 if the CP0_Count timer is disabled by either the guest | ||
309 | * CP0_Cause.DC bit or the count_ctl.DC bit. | ||
310 | * 0 otherwise (in which case CP0_Count timer is running). | ||
311 | */ | ||
312 | int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) | ||
313 | { | ||
314 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
315 | |||
316 | return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) || | ||
317 | (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC); | ||
318 | } | ||
319 | |||
320 | /** | ||
321 | * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count. | ||
322 | * | ||
323 | * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias. | ||
324 | * | ||
325 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | ||
326 | */ | ||
327 | static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now) | ||
328 | { | ||
329 | s64 now_ns, periods; | ||
330 | u64 delta; | ||
331 | |||
332 | now_ns = ktime_to_ns(now); | ||
333 | delta = now_ns + vcpu->arch.count_dyn_bias; | ||
334 | |||
335 | if (delta >= vcpu->arch.count_period) { | ||
336 | /* If delta is out of safe range the bias needs adjusting */ | ||
337 | periods = div64_s64(now_ns, vcpu->arch.count_period); | ||
338 | vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period; | ||
339 | /* Recalculate delta with new bias */ | ||
340 | delta = now_ns + vcpu->arch.count_dyn_bias; | ||
341 | } | ||
342 | |||
343 | /* | ||
344 | * We've ensured that: | ||
345 | * delta < count_period | ||
346 | * | ||
347 | * Therefore the intermediate delta*count_hz will never overflow since | ||
348 | * at the boundary condition: | ||
349 | * delta = count_period | ||
350 | * delta = NSEC_PER_SEC * 2^32 / count_hz | ||
351 | * delta * count_hz = NSEC_PER_SEC * 2^32 | ||
352 | */ | ||
353 | return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC); | ||
354 | } | ||
355 | |||
356 | /** | ||
357 | * kvm_mips_count_time() - Get effective current time. | ||
358 | * @vcpu: Virtual CPU. | ||
359 | * | ||
360 | * Get effective monotonic ktime. This is usually a straightforward ktime_get(), | ||
361 | * except when the master disable bit is set in count_ctl, in which case it is | ||
362 | * count_resume, i.e. the time that the count was disabled. | ||
363 | * | ||
364 | * Returns: Effective monotonic ktime for CP0_Count. | ||
365 | */ | ||
366 | static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu) | ||
367 | { | ||
368 | if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) | ||
369 | return vcpu->arch.count_resume; | ||
370 | |||
371 | return ktime_get(); | ||
372 | } | ||
373 | |||
374 | /** | ||
375 | * kvm_mips_read_count_running() - Read the current count value as if running. | ||
376 | * @vcpu: Virtual CPU. | ||
377 | * @now: Kernel time to read CP0_Count at. | ||
378 | * | ||
379 | * Returns the current guest CP0_Count register at time @now and handles if the | ||
380 | * timer interrupt is pending and hasn't been handled yet. | ||
381 | * | ||
382 | * Returns: The current value of the guest CP0_Count register. | ||
383 | */ | ||
384 | static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now) | ||
385 | { | ||
386 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
387 | ktime_t expires, threshold; | ||
388 | u32 count, compare; | ||
389 | int running; | ||
390 | |||
391 | /* Calculate the biased and scaled guest CP0_Count */ | ||
392 | count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); | ||
393 | compare = kvm_read_c0_guest_compare(cop0); | ||
394 | |||
395 | /* | ||
396 | * Find whether CP0_Count has reached the closest timer interrupt. If | ||
397 | * not, we shouldn't inject it. | ||
398 | */ | ||
399 | if ((s32)(count - compare) < 0) | ||
400 | return count; | ||
401 | |||
402 | /* | ||
403 | * The CP0_Count we're going to return has already reached the closest | ||
404 | * timer interrupt. Quickly check if it really is a new interrupt by | ||
405 | * looking at whether the interval until the hrtimer expiry time is | ||
406 | * less than 1/4 of the timer period. | ||
407 | */ | ||
408 | expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer); | ||
409 | threshold = ktime_add_ns(now, vcpu->arch.count_period / 4); | ||
410 | if (ktime_before(expires, threshold)) { | ||
411 | /* | ||
412 | * Cancel it while we handle it so there's no chance of | ||
413 | * interference with the timeout handler. | ||
414 | */ | ||
415 | running = hrtimer_cancel(&vcpu->arch.comparecount_timer); | ||
416 | |||
417 | /* Nothing should be waiting on the timeout */ | ||
418 | kvm_mips_callbacks->queue_timer_int(vcpu); | ||
419 | |||
420 | /* | ||
421 | * Restart the timer if it was running based on the expiry time | ||
422 | * we read, so that we don't push it back 2 periods. | ||
423 | */ | ||
424 | if (running) { | ||
425 | expires = ktime_add_ns(expires, | ||
426 | vcpu->arch.count_period); | ||
427 | hrtimer_start(&vcpu->arch.comparecount_timer, expires, | ||
428 | HRTIMER_MODE_ABS); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | return count; | ||
433 | } | ||
434 | |||
435 | /** | ||
436 | * kvm_mips_read_count() - Read the current count value. | ||
437 | * @vcpu: Virtual CPU. | ||
438 | * | ||
439 | * Read the current guest CP0_Count value, taking into account whether the timer | ||
440 | * is stopped. | ||
441 | * | ||
442 | * Returns: The current guest CP0_Count value. | ||
443 | */ | ||
444 | u32 kvm_mips_read_count(struct kvm_vcpu *vcpu) | ||
445 | { | ||
446 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
447 | |||
448 | /* If count disabled just read static copy of count */ | ||
449 | if (kvm_mips_count_disabled(vcpu)) | ||
450 | return kvm_read_c0_guest_count(cop0); | ||
451 | |||
452 | return kvm_mips_read_count_running(vcpu, ktime_get()); | ||
453 | } | ||
454 | |||
455 | /** | ||
456 | * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer. | ||
457 | * @vcpu: Virtual CPU. | ||
458 | * @count: Output pointer for CP0_Count value at point of freeze. | ||
459 | * | ||
460 | * Freeze the hrtimer safely and return both the ktime and the CP0_Count value | ||
461 | * at the point it was frozen. It is guaranteed that any pending interrupts at | ||
462 | * the point it was frozen are handled, and none after that point. | ||
463 | * | ||
464 | * This is useful where the time/CP0_Count is needed in the calculation of the | ||
465 | * new parameters. | ||
466 | * | ||
467 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | ||
468 | * | ||
469 | * Returns: The ktime at the point of freeze. | ||
470 | */ | ||
471 | ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count) | ||
472 | { | ||
473 | ktime_t now; | ||
474 | |||
475 | /* stop hrtimer before finding time */ | ||
476 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | ||
477 | now = ktime_get(); | ||
478 | |||
479 | /* find count at this point and handle pending hrtimer */ | ||
480 | *count = kvm_mips_read_count_running(vcpu, now); | ||
481 | |||
482 | return now; | ||
483 | } | ||
484 | |||
485 | /** | ||
486 | * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry. | ||
487 | * @vcpu: Virtual CPU. | ||
488 | * @now: ktime at point of resume. | ||
489 | * @count: CP0_Count at point of resume. | ||
490 | * | ||
491 | * Resumes the timer and updates the timer expiry based on @now and @count. | ||
492 | * This can be used in conjunction with kvm_mips_freeze_timer() when timer | ||
493 | * parameters need to be changed. | ||
494 | * | ||
495 | * It is guaranteed that a timer interrupt immediately after resume will be | ||
496 | * handled, but not if CP_Compare is exactly at @count. That case is already | ||
497 | * handled by kvm_mips_freeze_timer(). | ||
498 | * | ||
499 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running). | ||
500 | */ | ||
501 | static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu, | ||
502 | ktime_t now, u32 count) | ||
503 | { | ||
504 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
505 | u32 compare; | ||
506 | u64 delta; | ||
507 | ktime_t expire; | ||
508 | |||
509 | /* Calculate timeout (wrap 0 to 2^32) */ | ||
510 | compare = kvm_read_c0_guest_compare(cop0); | ||
511 | delta = (u64)(u32)(compare - count - 1) + 1; | ||
512 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); | ||
513 | expire = ktime_add_ns(now, delta); | ||
514 | |||
515 | /* Update hrtimer to use new timeout */ | ||
516 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | ||
517 | hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS); | ||
518 | } | ||
519 | |||
520 | /** | ||
521 | * kvm_mips_restore_hrtimer() - Restore hrtimer after a gap, updating expiry. | ||
522 | * @vcpu: Virtual CPU. | ||
523 | * @before: Time before Count was saved, lower bound of drift calculation. | ||
524 | * @count: CP0_Count at point of restore. | ||
525 | * @min_drift: Minimum amount of drift permitted before correction. | ||
526 | * Must be <= 0. | ||
527 | * | ||
528 | * Restores the timer from a particular @count, accounting for drift. This can | ||
529 | * be used in conjunction with kvm_mips_freeze_timer() when a hardware timer is | ||
530 | * to be used for a period of time, but the exact ktime corresponding to the | ||
531 | * final Count that must be restored is not known. | ||
532 | * | ||
533 | * It is gauranteed that a timer interrupt immediately after restore will be | ||
534 | * handled, but not if CP0_Compare is exactly at @count. That case should | ||
535 | * already be handled when the hardware timer state is saved. | ||
536 | * | ||
537 | * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is not | ||
538 | * stopped). | ||
539 | * | ||
540 | * Returns: Amount of correction to count_bias due to drift. | ||
541 | */ | ||
542 | int kvm_mips_restore_hrtimer(struct kvm_vcpu *vcpu, ktime_t before, | ||
543 | u32 count, int min_drift) | ||
544 | { | ||
545 | ktime_t now, count_time; | ||
546 | u32 now_count, before_count; | ||
547 | u64 delta; | ||
548 | int drift, ret = 0; | ||
549 | |||
550 | /* Calculate expected count at before */ | ||
551 | before_count = vcpu->arch.count_bias + | ||
552 | kvm_mips_ktime_to_count(vcpu, before); | ||
553 | |||
554 | /* | ||
555 | * Detect significantly negative drift, where count is lower than | ||
556 | * expected. Some negative drift is expected when hardware counter is | ||
557 | * set after kvm_mips_freeze_timer(), and it is harmless to allow the | ||
558 | * time to jump forwards a little, within reason. If the drift is too | ||
559 | * significant, adjust the bias to avoid a big Guest.CP0_Count jump. | ||
560 | */ | ||
561 | drift = count - before_count; | ||
562 | if (drift < min_drift) { | ||
563 | count_time = before; | ||
564 | vcpu->arch.count_bias += drift; | ||
565 | ret = drift; | ||
566 | goto resume; | ||
567 | } | ||
568 | |||
569 | /* Calculate expected count right now */ | ||
570 | now = ktime_get(); | ||
571 | now_count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now); | ||
572 | |||
573 | /* | ||
574 | * Detect positive drift, where count is higher than expected, and | ||
575 | * adjust the bias to avoid guest time going backwards. | ||
576 | */ | ||
577 | drift = count - now_count; | ||
578 | if (drift > 0) { | ||
579 | count_time = now; | ||
580 | vcpu->arch.count_bias += drift; | ||
581 | ret = drift; | ||
582 | goto resume; | ||
583 | } | ||
584 | |||
585 | /* Subtract nanosecond delta to find ktime when count was read */ | ||
586 | delta = (u64)(u32)(now_count - count); | ||
587 | delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz); | ||
588 | count_time = ktime_sub_ns(now, delta); | ||
589 | |||
590 | resume: | ||
591 | /* Resume using the calculated ktime */ | ||
592 | kvm_mips_resume_hrtimer(vcpu, count_time, count); | ||
593 | return ret; | ||
594 | } | ||
595 | |||
596 | /** | ||
597 | * kvm_mips_write_count() - Modify the count and update timer. | ||
598 | * @vcpu: Virtual CPU. | ||
599 | * @count: Guest CP0_Count value to set. | ||
600 | * | ||
601 | * Sets the CP0_Count value and updates the timer accordingly. | ||
602 | */ | ||
603 | void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count) | ||
604 | { | ||
605 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
606 | ktime_t now; | ||
607 | |||
608 | /* Calculate bias */ | ||
609 | now = kvm_mips_count_time(vcpu); | ||
610 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); | ||
611 | |||
612 | if (kvm_mips_count_disabled(vcpu)) | ||
613 | /* The timer's disabled, adjust the static count */ | ||
614 | kvm_write_c0_guest_count(cop0, count); | ||
615 | else | ||
616 | /* Update timeout */ | ||
617 | kvm_mips_resume_hrtimer(vcpu, now, count); | ||
618 | } | ||
619 | |||
620 | /** | ||
621 | * kvm_mips_init_count() - Initialise timer. | ||
622 | * @vcpu: Virtual CPU. | ||
623 | * @count_hz: Frequency of timer. | ||
624 | * | ||
625 | * Initialise the timer to the specified frequency, zero it, and set it going if | ||
626 | * it's enabled. | ||
627 | */ | ||
628 | void kvm_mips_init_count(struct kvm_vcpu *vcpu, unsigned long count_hz) | ||
629 | { | ||
630 | vcpu->arch.count_hz = count_hz; | ||
631 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); | ||
632 | vcpu->arch.count_dyn_bias = 0; | ||
633 | |||
634 | /* Starting at 0 */ | ||
635 | kvm_mips_write_count(vcpu, 0); | ||
636 | } | ||
637 | |||
638 | /** | ||
639 | * kvm_mips_set_count_hz() - Update the frequency of the timer. | ||
640 | * @vcpu: Virtual CPU. | ||
641 | * @count_hz: Frequency of CP0_Count timer in Hz. | ||
642 | * | ||
643 | * Change the frequency of the CP0_Count timer. This is done atomically so that | ||
644 | * CP0_Count is continuous and no timer interrupt is lost. | ||
645 | * | ||
646 | * Returns: -EINVAL if @count_hz is out of range. | ||
647 | * 0 on success. | ||
648 | */ | ||
649 | int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz) | ||
650 | { | ||
651 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
652 | int dc; | ||
653 | ktime_t now; | ||
654 | u32 count; | ||
655 | |||
656 | /* ensure the frequency is in a sensible range... */ | ||
657 | if (count_hz <= 0 || count_hz > NSEC_PER_SEC) | ||
658 | return -EINVAL; | ||
659 | /* ... and has actually changed */ | ||
660 | if (vcpu->arch.count_hz == count_hz) | ||
661 | return 0; | ||
662 | |||
663 | /* Safely freeze timer so we can keep it continuous */ | ||
664 | dc = kvm_mips_count_disabled(vcpu); | ||
665 | if (dc) { | ||
666 | now = kvm_mips_count_time(vcpu); | ||
667 | count = kvm_read_c0_guest_count(cop0); | ||
668 | } else { | ||
669 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | ||
670 | } | ||
671 | |||
672 | /* Update the frequency */ | ||
673 | vcpu->arch.count_hz = count_hz; | ||
674 | vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz); | ||
675 | vcpu->arch.count_dyn_bias = 0; | ||
676 | |||
677 | /* Calculate adjusted bias so dynamic count is unchanged */ | ||
678 | vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); | ||
679 | |||
680 | /* Update and resume hrtimer */ | ||
681 | if (!dc) | ||
682 | kvm_mips_resume_hrtimer(vcpu, now, count); | ||
683 | return 0; | ||
684 | } | ||
685 | |||
686 | /** | ||
687 | * kvm_mips_write_compare() - Modify compare and update timer. | ||
688 | * @vcpu: Virtual CPU. | ||
689 | * @compare: New CP0_Compare value. | ||
690 | * @ack: Whether to acknowledge timer interrupt. | ||
691 | * | ||
692 | * Update CP0_Compare to a new value and update the timeout. | ||
693 | * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure | ||
694 | * any pending timer interrupt is preserved. | ||
695 | */ | ||
696 | void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack) | ||
697 | { | ||
698 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
699 | int dc; | ||
700 | u32 old_compare = kvm_read_c0_guest_compare(cop0); | ||
701 | s32 delta = compare - old_compare; | ||
702 | u32 cause; | ||
703 | ktime_t now = ktime_set(0, 0); /* silence bogus GCC warning */ | ||
704 | u32 count; | ||
705 | |||
706 | /* if unchanged, must just be an ack */ | ||
707 | if (old_compare == compare) { | ||
708 | if (!ack) | ||
709 | return; | ||
710 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | ||
711 | kvm_write_c0_guest_compare(cop0, compare); | ||
712 | return; | ||
713 | } | ||
714 | |||
715 | /* | ||
716 | * If guest CP0_Compare moves forward, CP0_GTOffset should be adjusted | ||
717 | * too to prevent guest CP0_Count hitting guest CP0_Compare. | ||
718 | * | ||
719 | * The new GTOffset corresponds to the new value of CP0_Compare, and is | ||
720 | * set prior to it being written into the guest context. We disable | ||
721 | * preemption until the new value is written to prevent restore of a | ||
722 | * GTOffset corresponding to the old CP0_Compare value. | ||
723 | */ | ||
724 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta > 0) { | ||
725 | preempt_disable(); | ||
726 | write_c0_gtoffset(compare - read_c0_count()); | ||
727 | back_to_back_c0_hazard(); | ||
728 | } | ||
729 | |||
730 | /* freeze_hrtimer() takes care of timer interrupts <= count */ | ||
731 | dc = kvm_mips_count_disabled(vcpu); | ||
732 | if (!dc) | ||
733 | now = kvm_mips_freeze_hrtimer(vcpu, &count); | ||
734 | |||
735 | if (ack) | ||
736 | kvm_mips_callbacks->dequeue_timer_int(vcpu); | ||
737 | else if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) | ||
738 | /* | ||
739 | * With VZ, writing CP0_Compare acks (clears) CP0_Cause.TI, so | ||
740 | * preserve guest CP0_Cause.TI if we don't want to ack it. | ||
741 | */ | ||
742 | cause = kvm_read_c0_guest_cause(cop0); | ||
743 | |||
744 | kvm_write_c0_guest_compare(cop0, compare); | ||
745 | |||
746 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { | ||
747 | if (delta > 0) | ||
748 | preempt_enable(); | ||
749 | |||
750 | back_to_back_c0_hazard(); | ||
751 | |||
752 | if (!ack && cause & CAUSEF_TI) | ||
753 | kvm_write_c0_guest_cause(cop0, cause); | ||
754 | } | ||
755 | |||
756 | /* resume_hrtimer() takes care of timer interrupts > count */ | ||
757 | if (!dc) | ||
758 | kvm_mips_resume_hrtimer(vcpu, now, count); | ||
759 | |||
760 | /* | ||
761 | * If guest CP0_Compare is moving backward, we delay CP0_GTOffset change | ||
762 | * until after the new CP0_Compare is written, otherwise new guest | ||
763 | * CP0_Count could hit new guest CP0_Compare. | ||
764 | */ | ||
765 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && delta <= 0) | ||
766 | write_c0_gtoffset(compare - read_c0_count()); | ||
767 | } | ||
768 | |||
769 | /** | ||
770 | * kvm_mips_count_disable() - Disable count. | ||
771 | * @vcpu: Virtual CPU. | ||
772 | * | ||
773 | * Disable the CP0_Count timer. A timer interrupt on or before the final stop | ||
774 | * time will be handled but not after. | ||
775 | * | ||
776 | * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or | ||
777 | * count_ctl.DC has been set (count disabled). | ||
778 | * | ||
779 | * Returns: The time that the timer was stopped. | ||
780 | */ | ||
781 | static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu) | ||
782 | { | ||
783 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
784 | u32 count; | ||
785 | ktime_t now; | ||
786 | |||
787 | /* Stop hrtimer */ | ||
788 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | ||
789 | |||
790 | /* Set the static count from the dynamic count, handling pending TI */ | ||
791 | now = ktime_get(); | ||
792 | count = kvm_mips_read_count_running(vcpu, now); | ||
793 | kvm_write_c0_guest_count(cop0, count); | ||
794 | |||
795 | return now; | ||
796 | } | ||
797 | |||
798 | /** | ||
799 | * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC. | ||
800 | * @vcpu: Virtual CPU. | ||
801 | * | ||
802 | * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or | ||
803 | * before the final stop time will be handled if the timer isn't disabled by | ||
804 | * count_ctl.DC, but not after. | ||
805 | * | ||
806 | * Assumes CP0_Cause.DC is clear (count enabled). | ||
807 | */ | ||
808 | void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu) | ||
809 | { | ||
810 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
811 | |||
812 | kvm_set_c0_guest_cause(cop0, CAUSEF_DC); | ||
813 | if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)) | ||
814 | kvm_mips_count_disable(vcpu); | ||
815 | } | ||
816 | |||
817 | /** | ||
818 | * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC. | ||
819 | * @vcpu: Virtual CPU. | ||
820 | * | ||
821 | * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after | ||
822 | * the start time will be handled if the timer isn't disabled by count_ctl.DC, | ||
823 | * potentially before even returning, so the caller should be careful with | ||
824 | * ordering of CP0_Cause modifications so as not to lose it. | ||
825 | * | ||
826 | * Assumes CP0_Cause.DC is set (count disabled). | ||
827 | */ | ||
828 | void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu) | ||
829 | { | ||
830 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
831 | u32 count; | ||
832 | |||
833 | kvm_clear_c0_guest_cause(cop0, CAUSEF_DC); | ||
834 | |||
835 | /* | ||
836 | * Set the dynamic count to match the static count. | ||
837 | * This starts the hrtimer if count_ctl.DC allows it. | ||
838 | * Otherwise it conveniently updates the biases. | ||
839 | */ | ||
840 | count = kvm_read_c0_guest_count(cop0); | ||
841 | kvm_mips_write_count(vcpu, count); | ||
842 | } | ||
843 | |||
844 | /** | ||
845 | * kvm_mips_set_count_ctl() - Update the count control KVM register. | ||
846 | * @vcpu: Virtual CPU. | ||
847 | * @count_ctl: Count control register new value. | ||
848 | * | ||
849 | * Set the count control KVM register. The timer is updated accordingly. | ||
850 | * | ||
851 | * Returns: -EINVAL if reserved bits are set. | ||
852 | * 0 on success. | ||
853 | */ | ||
854 | int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl) | ||
855 | { | ||
856 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
857 | s64 changed = count_ctl ^ vcpu->arch.count_ctl; | ||
858 | s64 delta; | ||
859 | ktime_t expire, now; | ||
860 | u32 count, compare; | ||
861 | |||
862 | /* Only allow defined bits to be changed */ | ||
863 | if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC)) | ||
864 | return -EINVAL; | ||
865 | |||
866 | /* Apply new value */ | ||
867 | vcpu->arch.count_ctl = count_ctl; | ||
868 | |||
869 | /* Master CP0_Count disable */ | ||
870 | if (changed & KVM_REG_MIPS_COUNT_CTL_DC) { | ||
871 | /* Is CP0_Cause.DC already disabling CP0_Count? */ | ||
872 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) { | ||
873 | if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) | ||
874 | /* Just record the current time */ | ||
875 | vcpu->arch.count_resume = ktime_get(); | ||
876 | } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) { | ||
877 | /* disable timer and record current time */ | ||
878 | vcpu->arch.count_resume = kvm_mips_count_disable(vcpu); | ||
879 | } else { | ||
880 | /* | ||
881 | * Calculate timeout relative to static count at resume | ||
882 | * time (wrap 0 to 2^32). | ||
883 | */ | ||
884 | count = kvm_read_c0_guest_count(cop0); | ||
885 | compare = kvm_read_c0_guest_compare(cop0); | ||
886 | delta = (u64)(u32)(compare - count - 1) + 1; | ||
887 | delta = div_u64(delta * NSEC_PER_SEC, | ||
888 | vcpu->arch.count_hz); | ||
889 | expire = ktime_add_ns(vcpu->arch.count_resume, delta); | ||
890 | |||
891 | /* Handle pending interrupt */ | ||
892 | now = ktime_get(); | ||
893 | if (ktime_compare(now, expire) >= 0) | ||
894 | /* Nothing should be waiting on the timeout */ | ||
895 | kvm_mips_callbacks->queue_timer_int(vcpu); | ||
896 | |||
897 | /* Resume hrtimer without changing bias */ | ||
898 | count = kvm_mips_read_count_running(vcpu, now); | ||
899 | kvm_mips_resume_hrtimer(vcpu, now, count); | ||
900 | } | ||
901 | } | ||
902 | |||
903 | return 0; | ||
904 | } | ||
905 | |||
906 | /** | ||
907 | * kvm_mips_set_count_resume() - Update the count resume KVM register. | ||
908 | * @vcpu: Virtual CPU. | ||
909 | * @count_resume: Count resume register new value. | ||
910 | * | ||
911 | * Set the count resume KVM register. | ||
912 | * | ||
913 | * Returns: -EINVAL if out of valid range (0..now). | ||
914 | * 0 on success. | ||
915 | */ | ||
916 | int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume) | ||
917 | { | ||
918 | /* | ||
919 | * It doesn't make sense for the resume time to be in the future, as it | ||
920 | * would be possible for the next interrupt to be more than a full | ||
921 | * period in the future. | ||
922 | */ | ||
923 | if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get())) | ||
924 | return -EINVAL; | ||
925 | |||
926 | vcpu->arch.count_resume = ns_to_ktime(count_resume); | ||
927 | return 0; | ||
928 | } | ||
929 | |||
930 | /** | ||
931 | * kvm_mips_count_timeout() - Push timer forward on timeout. | ||
932 | * @vcpu: Virtual CPU. | ||
933 | * | ||
934 | * Handle an hrtimer event by push the hrtimer forward a period. | ||
935 | * | ||
936 | * Returns: The hrtimer_restart value to return to the hrtimer subsystem. | ||
937 | */ | ||
938 | enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu) | ||
939 | { | ||
940 | /* Add the Count period to the current expiry time */ | ||
941 | hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer, | ||
942 | vcpu->arch.count_period); | ||
943 | return HRTIMER_RESTART; | ||
944 | } | ||
945 | |||
946 | enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu) | ||
947 | { | ||
948 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
949 | enum emulation_result er = EMULATE_DONE; | ||
950 | |||
951 | if (kvm_read_c0_guest_status(cop0) & ST0_ERL) { | ||
952 | kvm_clear_c0_guest_status(cop0, ST0_ERL); | ||
953 | vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0); | ||
954 | } else if (kvm_read_c0_guest_status(cop0) & ST0_EXL) { | ||
955 | kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc, | ||
956 | kvm_read_c0_guest_epc(cop0)); | ||
957 | kvm_clear_c0_guest_status(cop0, ST0_EXL); | ||
958 | vcpu->arch.pc = kvm_read_c0_guest_epc(cop0); | ||
959 | |||
960 | } else { | ||
961 | kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n", | ||
962 | vcpu->arch.pc); | ||
963 | er = EMULATE_FAIL; | ||
964 | } | ||
965 | |||
966 | return er; | ||
967 | } | ||
968 | |||
969 | enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu) | ||
970 | { | ||
971 | kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc, | ||
972 | vcpu->arch.pending_exceptions); | ||
973 | |||
974 | ++vcpu->stat.wait_exits; | ||
975 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT); | ||
976 | if (!vcpu->arch.pending_exceptions) { | ||
977 | kvm_vz_lose_htimer(vcpu); | ||
978 | vcpu->arch.wait = 1; | ||
979 | kvm_vcpu_block(vcpu); | ||
980 | |||
981 | /* | ||
982 | * We we are runnable, then definitely go off to user space to | ||
983 | * check if any I/O interrupts are pending. | ||
984 | */ | ||
985 | if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) { | ||
986 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); | ||
987 | vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; | ||
988 | } | ||
989 | } | ||
990 | |||
991 | return EMULATE_DONE; | ||
992 | } | ||
993 | |||
994 | static void kvm_mips_change_entryhi(struct kvm_vcpu *vcpu, | ||
995 | unsigned long entryhi) | ||
996 | { | ||
997 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
998 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
999 | int cpu, i; | ||
1000 | u32 nasid = entryhi & KVM_ENTRYHI_ASID; | ||
1001 | |||
1002 | if (((kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID) != nasid)) { | ||
1003 | trace_kvm_asid_change(vcpu, kvm_read_c0_guest_entryhi(cop0) & | ||
1004 | KVM_ENTRYHI_ASID, nasid); | ||
1005 | |||
1006 | /* | ||
1007 | * Flush entries from the GVA page tables. | ||
1008 | * Guest user page table will get flushed lazily on re-entry to | ||
1009 | * guest user if the guest ASID actually changes. | ||
1010 | */ | ||
1011 | kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_KERN); | ||
1012 | |||
1013 | /* | ||
1014 | * Regenerate/invalidate kernel MMU context. | ||
1015 | * The user MMU context will be regenerated lazily on re-entry | ||
1016 | * to guest user if the guest ASID actually changes. | ||
1017 | */ | ||
1018 | preempt_disable(); | ||
1019 | cpu = smp_processor_id(); | ||
1020 | get_new_mmu_context(kern_mm); | ||
1021 | for_each_possible_cpu(i) | ||
1022 | if (i != cpu) | ||
1023 | set_cpu_context(i, kern_mm, 0); | ||
1024 | preempt_enable(); | ||
1025 | } | ||
1026 | kvm_write_c0_guest_entryhi(cop0, entryhi); | ||
1027 | } | ||
1028 | |||
1029 | enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu) | ||
1030 | { | ||
1031 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1032 | struct kvm_mips_tlb *tlb; | ||
1033 | unsigned long pc = vcpu->arch.pc; | ||
1034 | int index; | ||
1035 | |||
1036 | index = kvm_read_c0_guest_index(cop0); | ||
1037 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | ||
1038 | /* UNDEFINED */ | ||
1039 | kvm_debug("[%#lx] TLBR Index %#x out of range\n", pc, index); | ||
1040 | index &= KVM_MIPS_GUEST_TLB_SIZE - 1; | ||
1041 | } | ||
1042 | |||
1043 | tlb = &vcpu->arch.guest_tlb[index]; | ||
1044 | kvm_write_c0_guest_pagemask(cop0, tlb->tlb_mask); | ||
1045 | kvm_write_c0_guest_entrylo0(cop0, tlb->tlb_lo[0]); | ||
1046 | kvm_write_c0_guest_entrylo1(cop0, tlb->tlb_lo[1]); | ||
1047 | kvm_mips_change_entryhi(vcpu, tlb->tlb_hi); | ||
1048 | |||
1049 | return EMULATE_DONE; | ||
1050 | } | ||
1051 | |||
1052 | /** | ||
1053 | * kvm_mips_invalidate_guest_tlb() - Indicates a change in guest MMU map. | ||
1054 | * @vcpu: VCPU with changed mappings. | ||
1055 | * @tlb: TLB entry being removed. | ||
1056 | * | ||
1057 | * This is called to indicate a single change in guest MMU mappings, so that we | ||
1058 | * can arrange TLB flushes on this and other CPUs. | ||
1059 | */ | ||
1060 | static void kvm_mips_invalidate_guest_tlb(struct kvm_vcpu *vcpu, | ||
1061 | struct kvm_mips_tlb *tlb) | ||
1062 | { | ||
1063 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
1064 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
1065 | int cpu, i; | ||
1066 | bool user; | ||
1067 | |||
1068 | /* No need to flush for entries which are already invalid */ | ||
1069 | if (!((tlb->tlb_lo[0] | tlb->tlb_lo[1]) & ENTRYLO_V)) | ||
1070 | return; | ||
1071 | /* Don't touch host kernel page tables or TLB mappings */ | ||
1072 | if ((unsigned long)tlb->tlb_hi > 0x7fffffff) | ||
1073 | return; | ||
1074 | /* User address space doesn't need flushing for KSeg2/3 changes */ | ||
1075 | user = tlb->tlb_hi < KVM_GUEST_KSEG0; | ||
1076 | |||
1077 | preempt_disable(); | ||
1078 | |||
1079 | /* Invalidate page table entries */ | ||
1080 | kvm_trap_emul_invalidate_gva(vcpu, tlb->tlb_hi & VPN2_MASK, user); | ||
1081 | |||
1082 | /* | ||
1083 | * Probe the shadow host TLB for the entry being overwritten, if one | ||
1084 | * matches, invalidate it | ||
1085 | */ | ||
1086 | kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi, user, true); | ||
1087 | |||
1088 | /* Invalidate the whole ASID on other CPUs */ | ||
1089 | cpu = smp_processor_id(); | ||
1090 | for_each_possible_cpu(i) { | ||
1091 | if (i == cpu) | ||
1092 | continue; | ||
1093 | if (user) | ||
1094 | set_cpu_context(i, user_mm, 0); | ||
1095 | set_cpu_context(i, kern_mm, 0); | ||
1096 | } | ||
1097 | |||
1098 | preempt_enable(); | ||
1099 | } | ||
1100 | |||
1101 | /* Write Guest TLB Entry @ Index */ | ||
1102 | enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu) | ||
1103 | { | ||
1104 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1105 | int index = kvm_read_c0_guest_index(cop0); | ||
1106 | struct kvm_mips_tlb *tlb = NULL; | ||
1107 | unsigned long pc = vcpu->arch.pc; | ||
1108 | |||
1109 | if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) { | ||
1110 | kvm_debug("%s: illegal index: %d\n", __func__, index); | ||
1111 | kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | ||
1112 | pc, index, kvm_read_c0_guest_entryhi(cop0), | ||
1113 | kvm_read_c0_guest_entrylo0(cop0), | ||
1114 | kvm_read_c0_guest_entrylo1(cop0), | ||
1115 | kvm_read_c0_guest_pagemask(cop0)); | ||
1116 | index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE; | ||
1117 | } | ||
1118 | |||
1119 | tlb = &vcpu->arch.guest_tlb[index]; | ||
1120 | |||
1121 | kvm_mips_invalidate_guest_tlb(vcpu, tlb); | ||
1122 | |||
1123 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | ||
1124 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | ||
1125 | tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); | ||
1126 | tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); | ||
1127 | |||
1128 | kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n", | ||
1129 | pc, index, kvm_read_c0_guest_entryhi(cop0), | ||
1130 | kvm_read_c0_guest_entrylo0(cop0), | ||
1131 | kvm_read_c0_guest_entrylo1(cop0), | ||
1132 | kvm_read_c0_guest_pagemask(cop0)); | ||
1133 | |||
1134 | return EMULATE_DONE; | ||
1135 | } | ||
1136 | |||
1137 | /* Write Guest TLB Entry @ Random Index */ | ||
1138 | enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu) | ||
1139 | { | ||
1140 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1141 | struct kvm_mips_tlb *tlb = NULL; | ||
1142 | unsigned long pc = vcpu->arch.pc; | ||
1143 | int index; | ||
1144 | |||
1145 | index = prandom_u32_max(KVM_MIPS_GUEST_TLB_SIZE); | ||
1146 | tlb = &vcpu->arch.guest_tlb[index]; | ||
1147 | |||
1148 | kvm_mips_invalidate_guest_tlb(vcpu, tlb); | ||
1149 | |||
1150 | tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0); | ||
1151 | tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0); | ||
1152 | tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0); | ||
1153 | tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0); | ||
1154 | |||
1155 | kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n", | ||
1156 | pc, index, kvm_read_c0_guest_entryhi(cop0), | ||
1157 | kvm_read_c0_guest_entrylo0(cop0), | ||
1158 | kvm_read_c0_guest_entrylo1(cop0)); | ||
1159 | |||
1160 | return EMULATE_DONE; | ||
1161 | } | ||
1162 | |||
1163 | enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu) | ||
1164 | { | ||
1165 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1166 | long entryhi = kvm_read_c0_guest_entryhi(cop0); | ||
1167 | unsigned long pc = vcpu->arch.pc; | ||
1168 | int index = -1; | ||
1169 | |||
1170 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | ||
1171 | |||
1172 | kvm_write_c0_guest_index(cop0, index); | ||
1173 | |||
1174 | kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi, | ||
1175 | index); | ||
1176 | |||
1177 | return EMULATE_DONE; | ||
1178 | } | ||
1179 | |||
1180 | /** | ||
1181 | * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1 | ||
1182 | * @vcpu: Virtual CPU. | ||
1183 | * | ||
1184 | * Finds the mask of bits which are writable in the guest's Config1 CP0 | ||
1185 | * register, by userland (currently read-only to the guest). | ||
1186 | */ | ||
1187 | unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu) | ||
1188 | { | ||
1189 | unsigned int mask = 0; | ||
1190 | |||
1191 | /* Permit FPU to be present if FPU is supported */ | ||
1192 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) | ||
1193 | mask |= MIPS_CONF1_FP; | ||
1194 | |||
1195 | return mask; | ||
1196 | } | ||
1197 | |||
1198 | /** | ||
1199 | * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3 | ||
1200 | * @vcpu: Virtual CPU. | ||
1201 | * | ||
1202 | * Finds the mask of bits which are writable in the guest's Config3 CP0 | ||
1203 | * register, by userland (currently read-only to the guest). | ||
1204 | */ | ||
1205 | unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu) | ||
1206 | { | ||
1207 | /* Config4 and ULRI are optional */ | ||
1208 | unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI; | ||
1209 | |||
1210 | /* Permit MSA to be present if MSA is supported */ | ||
1211 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | ||
1212 | mask |= MIPS_CONF3_MSA; | ||
1213 | |||
1214 | return mask; | ||
1215 | } | ||
1216 | |||
1217 | /** | ||
1218 | * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4 | ||
1219 | * @vcpu: Virtual CPU. | ||
1220 | * | ||
1221 | * Finds the mask of bits which are writable in the guest's Config4 CP0 | ||
1222 | * register, by userland (currently read-only to the guest). | ||
1223 | */ | ||
1224 | unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu) | ||
1225 | { | ||
1226 | /* Config5 is optional */ | ||
1227 | unsigned int mask = MIPS_CONF_M; | ||
1228 | |||
1229 | /* KScrExist */ | ||
1230 | mask |= 0xfc << MIPS_CONF4_KSCREXIST_SHIFT; | ||
1231 | |||
1232 | return mask; | ||
1233 | } | ||
1234 | |||
1235 | /** | ||
1236 | * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5 | ||
1237 | * @vcpu: Virtual CPU. | ||
1238 | * | ||
1239 | * Finds the mask of bits which are writable in the guest's Config5 CP0 | ||
1240 | * register, by the guest itself. | ||
1241 | */ | ||
1242 | unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu) | ||
1243 | { | ||
1244 | unsigned int mask = 0; | ||
1245 | |||
1246 | /* Permit MSAEn changes if MSA supported and enabled */ | ||
1247 | if (kvm_mips_guest_has_msa(&vcpu->arch)) | ||
1248 | mask |= MIPS_CONF5_MSAEN; | ||
1249 | |||
1250 | /* | ||
1251 | * Permit guest FPU mode changes if FPU is enabled and the relevant | ||
1252 | * feature exists according to FIR register. | ||
1253 | */ | ||
1254 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | ||
1255 | if (cpu_has_fre) | ||
1256 | mask |= MIPS_CONF5_FRE; | ||
1257 | /* We don't support UFR or UFE */ | ||
1258 | } | ||
1259 | |||
1260 | return mask; | ||
1261 | } | ||
1262 | |||
1263 | enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst, | ||
1264 | u32 *opc, u32 cause, | ||
1265 | struct kvm_vcpu *vcpu) | ||
1266 | { | ||
1267 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1268 | enum emulation_result er = EMULATE_DONE; | ||
1269 | u32 rt, rd, sel; | ||
1270 | unsigned long curr_pc; | ||
1271 | |||
1272 | /* | ||
1273 | * Update PC and hold onto current PC in case there is | ||
1274 | * an error and we want to rollback the PC | ||
1275 | */ | ||
1276 | curr_pc = vcpu->arch.pc; | ||
1277 | er = update_pc(vcpu, cause); | ||
1278 | if (er == EMULATE_FAIL) | ||
1279 | return er; | ||
1280 | |||
1281 | if (inst.co_format.co) { | ||
1282 | switch (inst.co_format.func) { | ||
1283 | case tlbr_op: /* Read indexed TLB entry */ | ||
1284 | er = kvm_mips_emul_tlbr(vcpu); | ||
1285 | break; | ||
1286 | case tlbwi_op: /* Write indexed */ | ||
1287 | er = kvm_mips_emul_tlbwi(vcpu); | ||
1288 | break; | ||
1289 | case tlbwr_op: /* Write random */ | ||
1290 | er = kvm_mips_emul_tlbwr(vcpu); | ||
1291 | break; | ||
1292 | case tlbp_op: /* TLB Probe */ | ||
1293 | er = kvm_mips_emul_tlbp(vcpu); | ||
1294 | break; | ||
1295 | case rfe_op: | ||
1296 | kvm_err("!!!COP0_RFE!!!\n"); | ||
1297 | break; | ||
1298 | case eret_op: | ||
1299 | er = kvm_mips_emul_eret(vcpu); | ||
1300 | goto dont_update_pc; | ||
1301 | case wait_op: | ||
1302 | er = kvm_mips_emul_wait(vcpu); | ||
1303 | break; | ||
1304 | case hypcall_op: | ||
1305 | er = kvm_mips_emul_hypcall(vcpu, inst); | ||
1306 | break; | ||
1307 | } | ||
1308 | } else { | ||
1309 | rt = inst.c0r_format.rt; | ||
1310 | rd = inst.c0r_format.rd; | ||
1311 | sel = inst.c0r_format.sel; | ||
1312 | |||
1313 | switch (inst.c0r_format.rs) { | ||
1314 | case mfc_op: | ||
1315 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | ||
1316 | cop0->stat[rd][sel]++; | ||
1317 | #endif | ||
1318 | /* Get reg */ | ||
1319 | if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | ||
1320 | vcpu->arch.gprs[rt] = | ||
1321 | (s32)kvm_mips_read_count(vcpu); | ||
1322 | } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) { | ||
1323 | vcpu->arch.gprs[rt] = 0x0; | ||
1324 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | ||
1325 | kvm_mips_trans_mfc0(inst, opc, vcpu); | ||
1326 | #endif | ||
1327 | } else { | ||
1328 | vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel]; | ||
1329 | |||
1330 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | ||
1331 | kvm_mips_trans_mfc0(inst, opc, vcpu); | ||
1332 | #endif | ||
1333 | } | ||
1334 | |||
1335 | trace_kvm_hwr(vcpu, KVM_TRACE_MFC0, | ||
1336 | KVM_TRACE_COP0(rd, sel), | ||
1337 | vcpu->arch.gprs[rt]); | ||
1338 | break; | ||
1339 | |||
1340 | case dmfc_op: | ||
1341 | vcpu->arch.gprs[rt] = cop0->reg[rd][sel]; | ||
1342 | |||
1343 | trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0, | ||
1344 | KVM_TRACE_COP0(rd, sel), | ||
1345 | vcpu->arch.gprs[rt]); | ||
1346 | break; | ||
1347 | |||
1348 | case mtc_op: | ||
1349 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | ||
1350 | cop0->stat[rd][sel]++; | ||
1351 | #endif | ||
1352 | trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, | ||
1353 | KVM_TRACE_COP0(rd, sel), | ||
1354 | vcpu->arch.gprs[rt]); | ||
1355 | |||
1356 | if ((rd == MIPS_CP0_TLB_INDEX) | ||
1357 | && (vcpu->arch.gprs[rt] >= | ||
1358 | KVM_MIPS_GUEST_TLB_SIZE)) { | ||
1359 | kvm_err("Invalid TLB Index: %ld", | ||
1360 | vcpu->arch.gprs[rt]); | ||
1361 | er = EMULATE_FAIL; | ||
1362 | break; | ||
1363 | } | ||
1364 | if ((rd == MIPS_CP0_PRID) && (sel == 1)) { | ||
1365 | /* | ||
1366 | * Preserve core number, and keep the exception | ||
1367 | * base in guest KSeg0. | ||
1368 | */ | ||
1369 | kvm_change_c0_guest_ebase(cop0, 0x1ffff000, | ||
1370 | vcpu->arch.gprs[rt]); | ||
1371 | } else if (rd == MIPS_CP0_TLB_HI && sel == 0) { | ||
1372 | kvm_mips_change_entryhi(vcpu, | ||
1373 | vcpu->arch.gprs[rt]); | ||
1374 | } | ||
1375 | /* Are we writing to COUNT */ | ||
1376 | else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) { | ||
1377 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); | ||
1378 | goto done; | ||
1379 | } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) { | ||
1380 | /* If we are writing to COMPARE */ | ||
1381 | /* Clear pending timer interrupt, if any */ | ||
1382 | kvm_mips_write_compare(vcpu, | ||
1383 | vcpu->arch.gprs[rt], | ||
1384 | true); | ||
1385 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { | ||
1386 | unsigned int old_val, val, change; | ||
1387 | |||
1388 | old_val = kvm_read_c0_guest_status(cop0); | ||
1389 | val = vcpu->arch.gprs[rt]; | ||
1390 | change = val ^ old_val; | ||
1391 | |||
1392 | /* Make sure that the NMI bit is never set */ | ||
1393 | val &= ~ST0_NMI; | ||
1394 | |||
1395 | /* | ||
1396 | * Don't allow CU1 or FR to be set unless FPU | ||
1397 | * capability enabled and exists in guest | ||
1398 | * configuration. | ||
1399 | */ | ||
1400 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1401 | val &= ~(ST0_CU1 | ST0_FR); | ||
1402 | |||
1403 | /* | ||
1404 | * Also don't allow FR to be set if host doesn't | ||
1405 | * support it. | ||
1406 | */ | ||
1407 | if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64)) | ||
1408 | val &= ~ST0_FR; | ||
1409 | |||
1410 | |||
1411 | /* Handle changes in FPU mode */ | ||
1412 | preempt_disable(); | ||
1413 | |||
1414 | /* | ||
1415 | * FPU and Vector register state is made | ||
1416 | * UNPREDICTABLE by a change of FR, so don't | ||
1417 | * even bother saving it. | ||
1418 | */ | ||
1419 | if (change & ST0_FR) | ||
1420 | kvm_drop_fpu(vcpu); | ||
1421 | |||
1422 | /* | ||
1423 | * If MSA state is already live, it is undefined | ||
1424 | * how it interacts with FR=0 FPU state, and we | ||
1425 | * don't want to hit reserved instruction | ||
1426 | * exceptions trying to save the MSA state later | ||
1427 | * when CU=1 && FR=1, so play it safe and save | ||
1428 | * it first. | ||
1429 | */ | ||
1430 | if (change & ST0_CU1 && !(val & ST0_FR) && | ||
1431 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) | ||
1432 | kvm_lose_fpu(vcpu); | ||
1433 | |||
1434 | /* | ||
1435 | * Propagate CU1 (FPU enable) changes | ||
1436 | * immediately if the FPU context is already | ||
1437 | * loaded. When disabling we leave the context | ||
1438 | * loaded so it can be quickly enabled again in | ||
1439 | * the near future. | ||
1440 | */ | ||
1441 | if (change & ST0_CU1 && | ||
1442 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) | ||
1443 | change_c0_status(ST0_CU1, val); | ||
1444 | |||
1445 | preempt_enable(); | ||
1446 | |||
1447 | kvm_write_c0_guest_status(cop0, val); | ||
1448 | |||
1449 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | ||
1450 | /* | ||
1451 | * If FPU present, we need CU1/FR bits to take | ||
1452 | * effect fairly soon. | ||
1453 | */ | ||
1454 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1455 | kvm_mips_trans_mtc0(inst, opc, vcpu); | ||
1456 | #endif | ||
1457 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { | ||
1458 | unsigned int old_val, val, change, wrmask; | ||
1459 | |||
1460 | old_val = kvm_read_c0_guest_config5(cop0); | ||
1461 | val = vcpu->arch.gprs[rt]; | ||
1462 | |||
1463 | /* Only a few bits are writable in Config5 */ | ||
1464 | wrmask = kvm_mips_config5_wrmask(vcpu); | ||
1465 | change = (val ^ old_val) & wrmask; | ||
1466 | val = old_val ^ change; | ||
1467 | |||
1468 | |||
1469 | /* Handle changes in FPU/MSA modes */ | ||
1470 | preempt_disable(); | ||
1471 | |||
1472 | /* | ||
1473 | * Propagate FRE changes immediately if the FPU | ||
1474 | * context is already loaded. | ||
1475 | */ | ||
1476 | if (change & MIPS_CONF5_FRE && | ||
1477 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) | ||
1478 | change_c0_config5(MIPS_CONF5_FRE, val); | ||
1479 | |||
1480 | /* | ||
1481 | * Propagate MSAEn changes immediately if the | ||
1482 | * MSA context is already loaded. When disabling | ||
1483 | * we leave the context loaded so it can be | ||
1484 | * quickly enabled again in the near future. | ||
1485 | */ | ||
1486 | if (change & MIPS_CONF5_MSAEN && | ||
1487 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) | ||
1488 | change_c0_config5(MIPS_CONF5_MSAEN, | ||
1489 | val); | ||
1490 | |||
1491 | preempt_enable(); | ||
1492 | |||
1493 | kvm_write_c0_guest_config5(cop0, val); | ||
1494 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { | ||
1495 | u32 old_cause, new_cause; | ||
1496 | |||
1497 | old_cause = kvm_read_c0_guest_cause(cop0); | ||
1498 | new_cause = vcpu->arch.gprs[rt]; | ||
1499 | /* Update R/W bits */ | ||
1500 | kvm_change_c0_guest_cause(cop0, 0x08800300, | ||
1501 | new_cause); | ||
1502 | /* DC bit enabling/disabling timer? */ | ||
1503 | if ((old_cause ^ new_cause) & CAUSEF_DC) { | ||
1504 | if (new_cause & CAUSEF_DC) | ||
1505 | kvm_mips_count_disable_cause(vcpu); | ||
1506 | else | ||
1507 | kvm_mips_count_enable_cause(vcpu); | ||
1508 | } | ||
1509 | } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) { | ||
1510 | u32 mask = MIPS_HWRENA_CPUNUM | | ||
1511 | MIPS_HWRENA_SYNCISTEP | | ||
1512 | MIPS_HWRENA_CC | | ||
1513 | MIPS_HWRENA_CCRES; | ||
1514 | |||
1515 | if (kvm_read_c0_guest_config3(cop0) & | ||
1516 | MIPS_CONF3_ULRI) | ||
1517 | mask |= MIPS_HWRENA_ULR; | ||
1518 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask; | ||
1519 | } else { | ||
1520 | cop0->reg[rd][sel] = vcpu->arch.gprs[rt]; | ||
1521 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | ||
1522 | kvm_mips_trans_mtc0(inst, opc, vcpu); | ||
1523 | #endif | ||
1524 | } | ||
1525 | break; | ||
1526 | |||
1527 | case dmtc_op: | ||
1528 | kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n", | ||
1529 | vcpu->arch.pc, rt, rd, sel); | ||
1530 | trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0, | ||
1531 | KVM_TRACE_COP0(rd, sel), | ||
1532 | vcpu->arch.gprs[rt]); | ||
1533 | er = EMULATE_FAIL; | ||
1534 | break; | ||
1535 | |||
1536 | case mfmc0_op: | ||
1537 | #ifdef KVM_MIPS_DEBUG_COP0_COUNTERS | ||
1538 | cop0->stat[MIPS_CP0_STATUS][0]++; | ||
1539 | #endif | ||
1540 | if (rt != 0) | ||
1541 | vcpu->arch.gprs[rt] = | ||
1542 | kvm_read_c0_guest_status(cop0); | ||
1543 | /* EI */ | ||
1544 | if (inst.mfmc0_format.sc) { | ||
1545 | kvm_debug("[%#lx] mfmc0_op: EI\n", | ||
1546 | vcpu->arch.pc); | ||
1547 | kvm_set_c0_guest_status(cop0, ST0_IE); | ||
1548 | } else { | ||
1549 | kvm_debug("[%#lx] mfmc0_op: DI\n", | ||
1550 | vcpu->arch.pc); | ||
1551 | kvm_clear_c0_guest_status(cop0, ST0_IE); | ||
1552 | } | ||
1553 | |||
1554 | break; | ||
1555 | |||
1556 | case wrpgpr_op: | ||
1557 | { | ||
1558 | u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf; | ||
1559 | u32 pss = | ||
1560 | (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf; | ||
1561 | /* | ||
1562 | * We don't support any shadow register sets, so | ||
1563 | * SRSCtl[PSS] == SRSCtl[CSS] = 0 | ||
1564 | */ | ||
1565 | if (css || pss) { | ||
1566 | er = EMULATE_FAIL; | ||
1567 | break; | ||
1568 | } | ||
1569 | kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd, | ||
1570 | vcpu->arch.gprs[rt]); | ||
1571 | vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt]; | ||
1572 | } | ||
1573 | break; | ||
1574 | default: | ||
1575 | kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n", | ||
1576 | vcpu->arch.pc, inst.c0r_format.rs); | ||
1577 | er = EMULATE_FAIL; | ||
1578 | break; | ||
1579 | } | ||
1580 | } | ||
1581 | |||
1582 | done: | ||
1583 | /* Rollback PC only if emulation was unsuccessful */ | ||
1584 | if (er == EMULATE_FAIL) | ||
1585 | vcpu->arch.pc = curr_pc; | ||
1586 | |||
1587 | dont_update_pc: | ||
1588 | /* | ||
1589 | * This is for special instructions whose emulation | ||
1590 | * updates the PC, so do not overwrite the PC under | ||
1591 | * any circumstances | ||
1592 | */ | ||
1593 | |||
1594 | return er; | ||
1595 | } | ||
1596 | |||
1597 | enum emulation_result kvm_mips_emulate_store(union mips_instruction inst, | ||
1598 | u32 cause, | ||
1599 | struct kvm_vcpu *vcpu) | ||
1600 | { | ||
1601 | int r; | ||
1602 | enum emulation_result er; | ||
1603 | u32 rt; | ||
1604 | struct kvm_run *run = vcpu->run; | ||
1605 | void *data = run->mmio.data; | ||
1606 | unsigned int imme; | ||
1607 | unsigned long curr_pc; | ||
1608 | |||
1609 | /* | ||
1610 | * Update PC and hold onto current PC in case there is | ||
1611 | * an error and we want to rollback the PC | ||
1612 | */ | ||
1613 | curr_pc = vcpu->arch.pc; | ||
1614 | er = update_pc(vcpu, cause); | ||
1615 | if (er == EMULATE_FAIL) | ||
1616 | return er; | ||
1617 | |||
1618 | rt = inst.i_format.rt; | ||
1619 | |||
1620 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1621 | vcpu->arch.host_cp0_badvaddr); | ||
1622 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) | ||
1623 | goto out_fail; | ||
1624 | |||
1625 | switch (inst.i_format.opcode) { | ||
1626 | #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) | ||
1627 | case sd_op: | ||
1628 | run->mmio.len = 8; | ||
1629 | *(u64 *)data = vcpu->arch.gprs[rt]; | ||
1630 | |||
1631 | kvm_debug("[%#lx] OP_SD: eaddr: %#lx, gpr: %#lx, data: %#llx\n", | ||
1632 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1633 | vcpu->arch.gprs[rt], *(u64 *)data); | ||
1634 | break; | ||
1635 | #endif | ||
1636 | |||
1637 | case sw_op: | ||
1638 | run->mmio.len = 4; | ||
1639 | *(u32 *)data = vcpu->arch.gprs[rt]; | ||
1640 | |||
1641 | kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1642 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1643 | vcpu->arch.gprs[rt], *(u32 *)data); | ||
1644 | break; | ||
1645 | |||
1646 | case sh_op: | ||
1647 | run->mmio.len = 2; | ||
1648 | *(u16 *)data = vcpu->arch.gprs[rt]; | ||
1649 | |||
1650 | kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1651 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1652 | vcpu->arch.gprs[rt], *(u16 *)data); | ||
1653 | break; | ||
1654 | |||
1655 | case sb_op: | ||
1656 | run->mmio.len = 1; | ||
1657 | *(u8 *)data = vcpu->arch.gprs[rt]; | ||
1658 | |||
1659 | kvm_debug("[%#lx] OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1660 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1661 | vcpu->arch.gprs[rt], *(u8 *)data); | ||
1662 | break; | ||
1663 | |||
1664 | case swl_op: | ||
1665 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1666 | vcpu->arch.host_cp0_badvaddr) & (~0x3); | ||
1667 | run->mmio.len = 4; | ||
1668 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; | ||
1669 | switch (imme) { | ||
1670 | case 0: | ||
1671 | *(u32 *)data = ((*(u32 *)data) & 0xffffff00) | | ||
1672 | (vcpu->arch.gprs[rt] >> 24); | ||
1673 | break; | ||
1674 | case 1: | ||
1675 | *(u32 *)data = ((*(u32 *)data) & 0xffff0000) | | ||
1676 | (vcpu->arch.gprs[rt] >> 16); | ||
1677 | break; | ||
1678 | case 2: | ||
1679 | *(u32 *)data = ((*(u32 *)data) & 0xff000000) | | ||
1680 | (vcpu->arch.gprs[rt] >> 8); | ||
1681 | break; | ||
1682 | case 3: | ||
1683 | *(u32 *)data = vcpu->arch.gprs[rt]; | ||
1684 | break; | ||
1685 | default: | ||
1686 | break; | ||
1687 | } | ||
1688 | |||
1689 | kvm_debug("[%#lx] OP_SWL: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1690 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1691 | vcpu->arch.gprs[rt], *(u32 *)data); | ||
1692 | break; | ||
1693 | |||
1694 | case swr_op: | ||
1695 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1696 | vcpu->arch.host_cp0_badvaddr) & (~0x3); | ||
1697 | run->mmio.len = 4; | ||
1698 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; | ||
1699 | switch (imme) { | ||
1700 | case 0: | ||
1701 | *(u32 *)data = vcpu->arch.gprs[rt]; | ||
1702 | break; | ||
1703 | case 1: | ||
1704 | *(u32 *)data = ((*(u32 *)data) & 0xff) | | ||
1705 | (vcpu->arch.gprs[rt] << 8); | ||
1706 | break; | ||
1707 | case 2: | ||
1708 | *(u32 *)data = ((*(u32 *)data) & 0xffff) | | ||
1709 | (vcpu->arch.gprs[rt] << 16); | ||
1710 | break; | ||
1711 | case 3: | ||
1712 | *(u32 *)data = ((*(u32 *)data) & 0xffffff) | | ||
1713 | (vcpu->arch.gprs[rt] << 24); | ||
1714 | break; | ||
1715 | default: | ||
1716 | break; | ||
1717 | } | ||
1718 | |||
1719 | kvm_debug("[%#lx] OP_SWR: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1720 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1721 | vcpu->arch.gprs[rt], *(u32 *)data); | ||
1722 | break; | ||
1723 | |||
1724 | #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) | ||
1725 | case sdl_op: | ||
1726 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1727 | vcpu->arch.host_cp0_badvaddr) & (~0x7); | ||
1728 | |||
1729 | run->mmio.len = 8; | ||
1730 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; | ||
1731 | switch (imme) { | ||
1732 | case 0: | ||
1733 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff00) | | ||
1734 | ((vcpu->arch.gprs[rt] >> 56) & 0xff); | ||
1735 | break; | ||
1736 | case 1: | ||
1737 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff0000) | | ||
1738 | ((vcpu->arch.gprs[rt] >> 48) & 0xffff); | ||
1739 | break; | ||
1740 | case 2: | ||
1741 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffff000000) | | ||
1742 | ((vcpu->arch.gprs[rt] >> 40) & 0xffffff); | ||
1743 | break; | ||
1744 | case 3: | ||
1745 | *(u64 *)data = ((*(u64 *)data) & 0xffffffff00000000) | | ||
1746 | ((vcpu->arch.gprs[rt] >> 32) & 0xffffffff); | ||
1747 | break; | ||
1748 | case 4: | ||
1749 | *(u64 *)data = ((*(u64 *)data) & 0xffffff0000000000) | | ||
1750 | ((vcpu->arch.gprs[rt] >> 24) & 0xffffffffff); | ||
1751 | break; | ||
1752 | case 5: | ||
1753 | *(u64 *)data = ((*(u64 *)data) & 0xffff000000000000) | | ||
1754 | ((vcpu->arch.gprs[rt] >> 16) & 0xffffffffffff); | ||
1755 | break; | ||
1756 | case 6: | ||
1757 | *(u64 *)data = ((*(u64 *)data) & 0xff00000000000000) | | ||
1758 | ((vcpu->arch.gprs[rt] >> 8) & 0xffffffffffffff); | ||
1759 | break; | ||
1760 | case 7: | ||
1761 | *(u64 *)data = vcpu->arch.gprs[rt]; | ||
1762 | break; | ||
1763 | default: | ||
1764 | break; | ||
1765 | } | ||
1766 | |||
1767 | kvm_debug("[%#lx] OP_SDL: eaddr: %#lx, gpr: %#lx, data: %llx\n", | ||
1768 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1769 | vcpu->arch.gprs[rt], *(u64 *)data); | ||
1770 | break; | ||
1771 | |||
1772 | case sdr_op: | ||
1773 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1774 | vcpu->arch.host_cp0_badvaddr) & (~0x7); | ||
1775 | |||
1776 | run->mmio.len = 8; | ||
1777 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; | ||
1778 | switch (imme) { | ||
1779 | case 0: | ||
1780 | *(u64 *)data = vcpu->arch.gprs[rt]; | ||
1781 | break; | ||
1782 | case 1: | ||
1783 | *(u64 *)data = ((*(u64 *)data) & 0xff) | | ||
1784 | (vcpu->arch.gprs[rt] << 8); | ||
1785 | break; | ||
1786 | case 2: | ||
1787 | *(u64 *)data = ((*(u64 *)data) & 0xffff) | | ||
1788 | (vcpu->arch.gprs[rt] << 16); | ||
1789 | break; | ||
1790 | case 3: | ||
1791 | *(u64 *)data = ((*(u64 *)data) & 0xffffff) | | ||
1792 | (vcpu->arch.gprs[rt] << 24); | ||
1793 | break; | ||
1794 | case 4: | ||
1795 | *(u64 *)data = ((*(u64 *)data) & 0xffffffff) | | ||
1796 | (vcpu->arch.gprs[rt] << 32); | ||
1797 | break; | ||
1798 | case 5: | ||
1799 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffff) | | ||
1800 | (vcpu->arch.gprs[rt] << 40); | ||
1801 | break; | ||
1802 | case 6: | ||
1803 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffff) | | ||
1804 | (vcpu->arch.gprs[rt] << 48); | ||
1805 | break; | ||
1806 | case 7: | ||
1807 | *(u64 *)data = ((*(u64 *)data) & 0xffffffffffffff) | | ||
1808 | (vcpu->arch.gprs[rt] << 56); | ||
1809 | break; | ||
1810 | default: | ||
1811 | break; | ||
1812 | } | ||
1813 | |||
1814 | kvm_debug("[%#lx] OP_SDR: eaddr: %#lx, gpr: %#lx, data: %llx\n", | ||
1815 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1816 | vcpu->arch.gprs[rt], *(u64 *)data); | ||
1817 | break; | ||
1818 | #endif | ||
1819 | |||
1820 | #ifdef CONFIG_CPU_LOONGSON64 | ||
1821 | case sdc2_op: | ||
1822 | rt = inst.loongson3_lsdc2_format.rt; | ||
1823 | switch (inst.loongson3_lsdc2_format.opcode1) { | ||
1824 | /* | ||
1825 | * Loongson-3 overridden sdc2 instructions. | ||
1826 | * opcode1 instruction | ||
1827 | * 0x0 gssbx: store 1 bytes from GPR | ||
1828 | * 0x1 gsshx: store 2 bytes from GPR | ||
1829 | * 0x2 gsswx: store 4 bytes from GPR | ||
1830 | * 0x3 gssdx: store 8 bytes from GPR | ||
1831 | */ | ||
1832 | case 0x0: | ||
1833 | run->mmio.len = 1; | ||
1834 | *(u8 *)data = vcpu->arch.gprs[rt]; | ||
1835 | |||
1836 | kvm_debug("[%#lx] OP_GSSBX: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1837 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1838 | vcpu->arch.gprs[rt], *(u8 *)data); | ||
1839 | break; | ||
1840 | case 0x1: | ||
1841 | run->mmio.len = 2; | ||
1842 | *(u16 *)data = vcpu->arch.gprs[rt]; | ||
1843 | |||
1844 | kvm_debug("[%#lx] OP_GSSSHX: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1845 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1846 | vcpu->arch.gprs[rt], *(u16 *)data); | ||
1847 | break; | ||
1848 | case 0x2: | ||
1849 | run->mmio.len = 4; | ||
1850 | *(u32 *)data = vcpu->arch.gprs[rt]; | ||
1851 | |||
1852 | kvm_debug("[%#lx] OP_GSSWX: eaddr: %#lx, gpr: %#lx, data: %#x\n", | ||
1853 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1854 | vcpu->arch.gprs[rt], *(u32 *)data); | ||
1855 | break; | ||
1856 | case 0x3: | ||
1857 | run->mmio.len = 8; | ||
1858 | *(u64 *)data = vcpu->arch.gprs[rt]; | ||
1859 | |||
1860 | kvm_debug("[%#lx] OP_GSSDX: eaddr: %#lx, gpr: %#lx, data: %#llx\n", | ||
1861 | vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr, | ||
1862 | vcpu->arch.gprs[rt], *(u64 *)data); | ||
1863 | break; | ||
1864 | default: | ||
1865 | kvm_err("Godson Extended GS-Store not yet supported (inst=0x%08x)\n", | ||
1866 | inst.word); | ||
1867 | break; | ||
1868 | } | ||
1869 | break; | ||
1870 | #endif | ||
1871 | default: | ||
1872 | kvm_err("Store not yet supported (inst=0x%08x)\n", | ||
1873 | inst.word); | ||
1874 | goto out_fail; | ||
1875 | } | ||
1876 | |||
1877 | vcpu->mmio_needed = 1; | ||
1878 | run->mmio.is_write = 1; | ||
1879 | vcpu->mmio_is_write = 1; | ||
1880 | |||
1881 | r = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, | ||
1882 | run->mmio.phys_addr, run->mmio.len, data); | ||
1883 | |||
1884 | if (!r) { | ||
1885 | vcpu->mmio_needed = 0; | ||
1886 | return EMULATE_DONE; | ||
1887 | } | ||
1888 | |||
1889 | return EMULATE_DO_MMIO; | ||
1890 | |||
1891 | out_fail: | ||
1892 | /* Rollback PC if emulation was unsuccessful */ | ||
1893 | vcpu->arch.pc = curr_pc; | ||
1894 | return EMULATE_FAIL; | ||
1895 | } | ||
1896 | |||
1897 | enum emulation_result kvm_mips_emulate_load(union mips_instruction inst, | ||
1898 | u32 cause, struct kvm_vcpu *vcpu) | ||
1899 | { | ||
1900 | struct kvm_run *run = vcpu->run; | ||
1901 | int r; | ||
1902 | enum emulation_result er; | ||
1903 | unsigned long curr_pc; | ||
1904 | u32 op, rt; | ||
1905 | unsigned int imme; | ||
1906 | |||
1907 | rt = inst.i_format.rt; | ||
1908 | op = inst.i_format.opcode; | ||
1909 | |||
1910 | /* | ||
1911 | * Find the resume PC now while we have safe and easy access to the | ||
1912 | * prior branch instruction, and save it for | ||
1913 | * kvm_mips_complete_mmio_load() to restore later. | ||
1914 | */ | ||
1915 | curr_pc = vcpu->arch.pc; | ||
1916 | er = update_pc(vcpu, cause); | ||
1917 | if (er == EMULATE_FAIL) | ||
1918 | return er; | ||
1919 | vcpu->arch.io_pc = vcpu->arch.pc; | ||
1920 | vcpu->arch.pc = curr_pc; | ||
1921 | |||
1922 | vcpu->arch.io_gpr = rt; | ||
1923 | |||
1924 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1925 | vcpu->arch.host_cp0_badvaddr); | ||
1926 | if (run->mmio.phys_addr == KVM_INVALID_ADDR) | ||
1927 | return EMULATE_FAIL; | ||
1928 | |||
1929 | vcpu->mmio_needed = 2; /* signed */ | ||
1930 | switch (op) { | ||
1931 | #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) | ||
1932 | case ld_op: | ||
1933 | run->mmio.len = 8; | ||
1934 | break; | ||
1935 | |||
1936 | case lwu_op: | ||
1937 | vcpu->mmio_needed = 1; /* unsigned */ | ||
1938 | fallthrough; | ||
1939 | #endif | ||
1940 | case lw_op: | ||
1941 | run->mmio.len = 4; | ||
1942 | break; | ||
1943 | |||
1944 | case lhu_op: | ||
1945 | vcpu->mmio_needed = 1; /* unsigned */ | ||
1946 | fallthrough; | ||
1947 | case lh_op: | ||
1948 | run->mmio.len = 2; | ||
1949 | break; | ||
1950 | |||
1951 | case lbu_op: | ||
1952 | vcpu->mmio_needed = 1; /* unsigned */ | ||
1953 | fallthrough; | ||
1954 | case lb_op: | ||
1955 | run->mmio.len = 1; | ||
1956 | break; | ||
1957 | |||
1958 | case lwl_op: | ||
1959 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1960 | vcpu->arch.host_cp0_badvaddr) & (~0x3); | ||
1961 | |||
1962 | run->mmio.len = 4; | ||
1963 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; | ||
1964 | switch (imme) { | ||
1965 | case 0: | ||
1966 | vcpu->mmio_needed = 3; /* 1 byte */ | ||
1967 | break; | ||
1968 | case 1: | ||
1969 | vcpu->mmio_needed = 4; /* 2 bytes */ | ||
1970 | break; | ||
1971 | case 2: | ||
1972 | vcpu->mmio_needed = 5; /* 3 bytes */ | ||
1973 | break; | ||
1974 | case 3: | ||
1975 | vcpu->mmio_needed = 6; /* 4 bytes */ | ||
1976 | break; | ||
1977 | default: | ||
1978 | break; | ||
1979 | } | ||
1980 | break; | ||
1981 | |||
1982 | case lwr_op: | ||
1983 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
1984 | vcpu->arch.host_cp0_badvaddr) & (~0x3); | ||
1985 | |||
1986 | run->mmio.len = 4; | ||
1987 | imme = vcpu->arch.host_cp0_badvaddr & 0x3; | ||
1988 | switch (imme) { | ||
1989 | case 0: | ||
1990 | vcpu->mmio_needed = 7; /* 4 bytes */ | ||
1991 | break; | ||
1992 | case 1: | ||
1993 | vcpu->mmio_needed = 8; /* 3 bytes */ | ||
1994 | break; | ||
1995 | case 2: | ||
1996 | vcpu->mmio_needed = 9; /* 2 bytes */ | ||
1997 | break; | ||
1998 | case 3: | ||
1999 | vcpu->mmio_needed = 10; /* 1 byte */ | ||
2000 | break; | ||
2001 | default: | ||
2002 | break; | ||
2003 | } | ||
2004 | break; | ||
2005 | |||
2006 | #if defined(CONFIG_64BIT) && defined(CONFIG_KVM_MIPS_VZ) | ||
2007 | case ldl_op: | ||
2008 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
2009 | vcpu->arch.host_cp0_badvaddr) & (~0x7); | ||
2010 | |||
2011 | run->mmio.len = 8; | ||
2012 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; | ||
2013 | switch (imme) { | ||
2014 | case 0: | ||
2015 | vcpu->mmio_needed = 11; /* 1 byte */ | ||
2016 | break; | ||
2017 | case 1: | ||
2018 | vcpu->mmio_needed = 12; /* 2 bytes */ | ||
2019 | break; | ||
2020 | case 2: | ||
2021 | vcpu->mmio_needed = 13; /* 3 bytes */ | ||
2022 | break; | ||
2023 | case 3: | ||
2024 | vcpu->mmio_needed = 14; /* 4 bytes */ | ||
2025 | break; | ||
2026 | case 4: | ||
2027 | vcpu->mmio_needed = 15; /* 5 bytes */ | ||
2028 | break; | ||
2029 | case 5: | ||
2030 | vcpu->mmio_needed = 16; /* 6 bytes */ | ||
2031 | break; | ||
2032 | case 6: | ||
2033 | vcpu->mmio_needed = 17; /* 7 bytes */ | ||
2034 | break; | ||
2035 | case 7: | ||
2036 | vcpu->mmio_needed = 18; /* 8 bytes */ | ||
2037 | break; | ||
2038 | default: | ||
2039 | break; | ||
2040 | } | ||
2041 | break; | ||
2042 | |||
2043 | case ldr_op: | ||
2044 | run->mmio.phys_addr = kvm_mips_callbacks->gva_to_gpa( | ||
2045 | vcpu->arch.host_cp0_badvaddr) & (~0x7); | ||
2046 | |||
2047 | run->mmio.len = 8; | ||
2048 | imme = vcpu->arch.host_cp0_badvaddr & 0x7; | ||
2049 | switch (imme) { | ||
2050 | case 0: | ||
2051 | vcpu->mmio_needed = 19; /* 8 bytes */ | ||
2052 | break; | ||
2053 | case 1: | ||
2054 | vcpu->mmio_needed = 20; /* 7 bytes */ | ||
2055 | break; | ||
2056 | case 2: | ||
2057 | vcpu->mmio_needed = 21; /* 6 bytes */ | ||
2058 | break; | ||
2059 | case 3: | ||
2060 | vcpu->mmio_needed = 22; /* 5 bytes */ | ||
2061 | break; | ||
2062 | case 4: | ||
2063 | vcpu->mmio_needed = 23; /* 4 bytes */ | ||
2064 | break; | ||
2065 | case 5: | ||
2066 | vcpu->mmio_needed = 24; /* 3 bytes */ | ||
2067 | break; | ||
2068 | case 6: | ||
2069 | vcpu->mmio_needed = 25; /* 2 bytes */ | ||
2070 | break; | ||
2071 | case 7: | ||
2072 | vcpu->mmio_needed = 26; /* 1 byte */ | ||
2073 | break; | ||
2074 | default: | ||
2075 | break; | ||
2076 | } | ||
2077 | break; | ||
2078 | #endif | ||
2079 | |||
2080 | #ifdef CONFIG_CPU_LOONGSON64 | ||
2081 | case ldc2_op: | ||
2082 | rt = inst.loongson3_lsdc2_format.rt; | ||
2083 | switch (inst.loongson3_lsdc2_format.opcode1) { | ||
2084 | /* | ||
2085 | * Loongson-3 overridden ldc2 instructions. | ||
2086 | * opcode1 instruction | ||
2087 | * 0x0 gslbx: store 1 bytes from GPR | ||
2088 | * 0x1 gslhx: store 2 bytes from GPR | ||
2089 | * 0x2 gslwx: store 4 bytes from GPR | ||
2090 | * 0x3 gsldx: store 8 bytes from GPR | ||
2091 | */ | ||
2092 | case 0x0: | ||
2093 | run->mmio.len = 1; | ||
2094 | vcpu->mmio_needed = 27; /* signed */ | ||
2095 | break; | ||
2096 | case 0x1: | ||
2097 | run->mmio.len = 2; | ||
2098 | vcpu->mmio_needed = 28; /* signed */ | ||
2099 | break; | ||
2100 | case 0x2: | ||
2101 | run->mmio.len = 4; | ||
2102 | vcpu->mmio_needed = 29; /* signed */ | ||
2103 | break; | ||
2104 | case 0x3: | ||
2105 | run->mmio.len = 8; | ||
2106 | vcpu->mmio_needed = 30; /* signed */ | ||
2107 | break; | ||
2108 | default: | ||
2109 | kvm_err("Godson Extended GS-Load for float not yet supported (inst=0x%08x)\n", | ||
2110 | inst.word); | ||
2111 | break; | ||
2112 | } | ||
2113 | break; | ||
2114 | #endif | ||
2115 | |||
2116 | default: | ||
2117 | kvm_err("Load not yet supported (inst=0x%08x)\n", | ||
2118 | inst.word); | ||
2119 | vcpu->mmio_needed = 0; | ||
2120 | return EMULATE_FAIL; | ||
2121 | } | ||
2122 | |||
2123 | run->mmio.is_write = 0; | ||
2124 | vcpu->mmio_is_write = 0; | ||
2125 | |||
2126 | r = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, | ||
2127 | run->mmio.phys_addr, run->mmio.len, run->mmio.data); | ||
2128 | |||
2129 | if (!r) { | ||
2130 | kvm_mips_complete_mmio_load(vcpu); | ||
2131 | vcpu->mmio_needed = 0; | ||
2132 | return EMULATE_DONE; | ||
2133 | } | ||
2134 | |||
2135 | return EMULATE_DO_MMIO; | ||
2136 | } | ||
2137 | |||
2138 | #ifndef CONFIG_KVM_MIPS_VZ | ||
2139 | static enum emulation_result kvm_mips_guest_cache_op(int (*fn)(unsigned long), | ||
2140 | unsigned long curr_pc, | ||
2141 | unsigned long addr, | ||
2142 | struct kvm_vcpu *vcpu, | ||
2143 | u32 cause) | ||
2144 | { | ||
2145 | int err; | ||
2146 | |||
2147 | for (;;) { | ||
2148 | /* Carefully attempt the cache operation */ | ||
2149 | kvm_trap_emul_gva_lockless_begin(vcpu); | ||
2150 | err = fn(addr); | ||
2151 | kvm_trap_emul_gva_lockless_end(vcpu); | ||
2152 | |||
2153 | if (likely(!err)) | ||
2154 | return EMULATE_DONE; | ||
2155 | |||
2156 | /* | ||
2157 | * Try to handle the fault and retry, maybe we just raced with a | ||
2158 | * GVA invalidation. | ||
2159 | */ | ||
2160 | switch (kvm_trap_emul_gva_fault(vcpu, addr, false)) { | ||
2161 | case KVM_MIPS_GVA: | ||
2162 | case KVM_MIPS_GPA: | ||
2163 | /* bad virtual or physical address */ | ||
2164 | return EMULATE_FAIL; | ||
2165 | case KVM_MIPS_TLB: | ||
2166 | /* no matching guest TLB */ | ||
2167 | vcpu->arch.host_cp0_badvaddr = addr; | ||
2168 | vcpu->arch.pc = curr_pc; | ||
2169 | kvm_mips_emulate_tlbmiss_ld(cause, NULL, vcpu); | ||
2170 | return EMULATE_EXCEPT; | ||
2171 | case KVM_MIPS_TLBINV: | ||
2172 | /* invalid matching guest TLB */ | ||
2173 | vcpu->arch.host_cp0_badvaddr = addr; | ||
2174 | vcpu->arch.pc = curr_pc; | ||
2175 | kvm_mips_emulate_tlbinv_ld(cause, NULL, vcpu); | ||
2176 | return EMULATE_EXCEPT; | ||
2177 | default: | ||
2178 | break; | ||
2179 | } | ||
2180 | } | ||
2181 | } | ||
2182 | |||
2183 | enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst, | ||
2184 | u32 *opc, u32 cause, | ||
2185 | struct kvm_vcpu *vcpu) | ||
2186 | { | ||
2187 | enum emulation_result er = EMULATE_DONE; | ||
2188 | u32 cache, op_inst, op, base; | ||
2189 | s16 offset; | ||
2190 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2191 | unsigned long va; | ||
2192 | unsigned long curr_pc; | ||
2193 | |||
2194 | /* | ||
2195 | * Update PC and hold onto current PC in case there is | ||
2196 | * an error and we want to rollback the PC | ||
2197 | */ | ||
2198 | curr_pc = vcpu->arch.pc; | ||
2199 | er = update_pc(vcpu, cause); | ||
2200 | if (er == EMULATE_FAIL) | ||
2201 | return er; | ||
2202 | |||
2203 | base = inst.i_format.rs; | ||
2204 | op_inst = inst.i_format.rt; | ||
2205 | if (cpu_has_mips_r6) | ||
2206 | offset = inst.spec3_format.simmediate; | ||
2207 | else | ||
2208 | offset = inst.i_format.simmediate; | ||
2209 | cache = op_inst & CacheOp_Cache; | ||
2210 | op = op_inst & CacheOp_Op; | ||
2211 | |||
2212 | va = arch->gprs[base] + offset; | ||
2213 | |||
2214 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | ||
2215 | cache, op, base, arch->gprs[base], offset); | ||
2216 | |||
2217 | /* | ||
2218 | * Treat INDEX_INV as a nop, basically issued by Linux on startup to | ||
2219 | * invalidate the caches entirely by stepping through all the | ||
2220 | * ways/indexes | ||
2221 | */ | ||
2222 | if (op == Index_Writeback_Inv) { | ||
2223 | kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | ||
2224 | vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base, | ||
2225 | arch->gprs[base], offset); | ||
2226 | |||
2227 | if (cache == Cache_D) { | ||
2228 | #ifdef CONFIG_CPU_R4K_CACHE_TLB | ||
2229 | r4k_blast_dcache(); | ||
2230 | #else | ||
2231 | switch (boot_cpu_type()) { | ||
2232 | case CPU_CAVIUM_OCTEON3: | ||
2233 | /* locally flush icache */ | ||
2234 | local_flush_icache_range(0, 0); | ||
2235 | break; | ||
2236 | default: | ||
2237 | __flush_cache_all(); | ||
2238 | break; | ||
2239 | } | ||
2240 | #endif | ||
2241 | } else if (cache == Cache_I) { | ||
2242 | #ifdef CONFIG_CPU_R4K_CACHE_TLB | ||
2243 | r4k_blast_icache(); | ||
2244 | #else | ||
2245 | switch (boot_cpu_type()) { | ||
2246 | case CPU_CAVIUM_OCTEON3: | ||
2247 | /* locally flush icache */ | ||
2248 | local_flush_icache_range(0, 0); | ||
2249 | break; | ||
2250 | default: | ||
2251 | flush_icache_all(); | ||
2252 | break; | ||
2253 | } | ||
2254 | #endif | ||
2255 | } else { | ||
2256 | kvm_err("%s: unsupported CACHE INDEX operation\n", | ||
2257 | __func__); | ||
2258 | return EMULATE_FAIL; | ||
2259 | } | ||
2260 | |||
2261 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | ||
2262 | kvm_mips_trans_cache_index(inst, opc, vcpu); | ||
2263 | #endif | ||
2264 | goto done; | ||
2265 | } | ||
2266 | |||
2267 | /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */ | ||
2268 | if (op_inst == Hit_Writeback_Inv_D || op_inst == Hit_Invalidate_D) { | ||
2269 | /* | ||
2270 | * Perform the dcache part of icache synchronisation on the | ||
2271 | * guest's behalf. | ||
2272 | */ | ||
2273 | er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, | ||
2274 | curr_pc, va, vcpu, cause); | ||
2275 | if (er != EMULATE_DONE) | ||
2276 | goto done; | ||
2277 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | ||
2278 | /* | ||
2279 | * Replace the CACHE instruction, with a SYNCI, not the same, | ||
2280 | * but avoids a trap | ||
2281 | */ | ||
2282 | kvm_mips_trans_cache_va(inst, opc, vcpu); | ||
2283 | #endif | ||
2284 | } else if (op_inst == Hit_Invalidate_I) { | ||
2285 | /* Perform the icache synchronisation on the guest's behalf */ | ||
2286 | er = kvm_mips_guest_cache_op(protected_writeback_dcache_line, | ||
2287 | curr_pc, va, vcpu, cause); | ||
2288 | if (er != EMULATE_DONE) | ||
2289 | goto done; | ||
2290 | er = kvm_mips_guest_cache_op(protected_flush_icache_line, | ||
2291 | curr_pc, va, vcpu, cause); | ||
2292 | if (er != EMULATE_DONE) | ||
2293 | goto done; | ||
2294 | |||
2295 | #ifdef CONFIG_KVM_MIPS_DYN_TRANS | ||
2296 | /* Replace the CACHE instruction, with a SYNCI */ | ||
2297 | kvm_mips_trans_cache_va(inst, opc, vcpu); | ||
2298 | #endif | ||
2299 | } else { | ||
2300 | kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | ||
2301 | cache, op, base, arch->gprs[base], offset); | ||
2302 | er = EMULATE_FAIL; | ||
2303 | } | ||
2304 | |||
2305 | done: | ||
2306 | /* Rollback PC only if emulation was unsuccessful */ | ||
2307 | if (er == EMULATE_FAIL) | ||
2308 | vcpu->arch.pc = curr_pc; | ||
2309 | /* Guest exception needs guest to resume */ | ||
2310 | if (er == EMULATE_EXCEPT) | ||
2311 | er = EMULATE_DONE; | ||
2312 | |||
2313 | return er; | ||
2314 | } | ||
2315 | |||
2316 | enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc, | ||
2317 | struct kvm_vcpu *vcpu) | ||
2318 | { | ||
2319 | union mips_instruction inst; | ||
2320 | enum emulation_result er = EMULATE_DONE; | ||
2321 | int err; | ||
2322 | |||
2323 | /* Fetch the instruction. */ | ||
2324 | if (cause & CAUSEF_BD) | ||
2325 | opc += 1; | ||
2326 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
2327 | if (err) | ||
2328 | return EMULATE_FAIL; | ||
2329 | |||
2330 | switch (inst.r_format.opcode) { | ||
2331 | case cop0_op: | ||
2332 | er = kvm_mips_emulate_CP0(inst, opc, cause, vcpu); | ||
2333 | break; | ||
2334 | |||
2335 | #ifndef CONFIG_CPU_MIPSR6 | ||
2336 | case cache_op: | ||
2337 | ++vcpu->stat.cache_exits; | ||
2338 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); | ||
2339 | er = kvm_mips_emulate_cache(inst, opc, cause, vcpu); | ||
2340 | break; | ||
2341 | #else | ||
2342 | case spec3_op: | ||
2343 | switch (inst.spec3_format.func) { | ||
2344 | case cache6_op: | ||
2345 | ++vcpu->stat.cache_exits; | ||
2346 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); | ||
2347 | er = kvm_mips_emulate_cache(inst, opc, cause, | ||
2348 | vcpu); | ||
2349 | break; | ||
2350 | default: | ||
2351 | goto unknown; | ||
2352 | } | ||
2353 | break; | ||
2354 | unknown: | ||
2355 | #endif | ||
2356 | |||
2357 | default: | ||
2358 | kvm_err("Instruction emulation not supported (%p/%#x)\n", opc, | ||
2359 | inst.word); | ||
2360 | kvm_arch_vcpu_dump_regs(vcpu); | ||
2361 | er = EMULATE_FAIL; | ||
2362 | break; | ||
2363 | } | ||
2364 | |||
2365 | return er; | ||
2366 | } | ||
2367 | #endif /* CONFIG_KVM_MIPS_VZ */ | ||
2368 | |||
2369 | /** | ||
2370 | * kvm_mips_guest_exception_base() - Find guest exception vector base address. | ||
2371 | * | ||
2372 | * Returns: The base address of the current guest exception vector, taking | ||
2373 | * both Guest.CP0_Status.BEV and Guest.CP0_EBase into account. | ||
2374 | */ | ||
2375 | long kvm_mips_guest_exception_base(struct kvm_vcpu *vcpu) | ||
2376 | { | ||
2377 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2378 | |||
2379 | if (kvm_read_c0_guest_status(cop0) & ST0_BEV) | ||
2380 | return KVM_GUEST_CKSEG1ADDR(0x1fc00200); | ||
2381 | else | ||
2382 | return kvm_read_c0_guest_ebase(cop0) & MIPS_EBASE_BASE; | ||
2383 | } | ||
2384 | |||
2385 | enum emulation_result kvm_mips_emulate_syscall(u32 cause, | ||
2386 | u32 *opc, | ||
2387 | struct kvm_vcpu *vcpu) | ||
2388 | { | ||
2389 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2390 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2391 | enum emulation_result er = EMULATE_DONE; | ||
2392 | |||
2393 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2394 | /* save old pc */ | ||
2395 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2396 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2397 | |||
2398 | if (cause & CAUSEF_BD) | ||
2399 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2400 | else | ||
2401 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2402 | |||
2403 | kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc); | ||
2404 | |||
2405 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2406 | (EXCCODE_SYS << CAUSEB_EXCCODE)); | ||
2407 | |||
2408 | /* Set PC to the exception entry point */ | ||
2409 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2410 | |||
2411 | } else { | ||
2412 | kvm_err("Trying to deliver SYSCALL when EXL is already set\n"); | ||
2413 | er = EMULATE_FAIL; | ||
2414 | } | ||
2415 | |||
2416 | return er; | ||
2417 | } | ||
2418 | |||
2419 | enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause, | ||
2420 | u32 *opc, | ||
2421 | struct kvm_vcpu *vcpu) | ||
2422 | { | ||
2423 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2424 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2425 | unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) | | ||
2426 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
2427 | |||
2428 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2429 | /* save old pc */ | ||
2430 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2431 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2432 | |||
2433 | if (cause & CAUSEF_BD) | ||
2434 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2435 | else | ||
2436 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2437 | |||
2438 | kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n", | ||
2439 | arch->pc); | ||
2440 | |||
2441 | /* set pc to the exception entry point */ | ||
2442 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; | ||
2443 | |||
2444 | } else { | ||
2445 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | ||
2446 | arch->pc); | ||
2447 | |||
2448 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2449 | } | ||
2450 | |||
2451 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2452 | (EXCCODE_TLBL << CAUSEB_EXCCODE)); | ||
2453 | |||
2454 | /* setup badvaddr, context and entryhi registers for the guest */ | ||
2455 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | ||
2456 | /* XXXKYMA: is the context register used by linux??? */ | ||
2457 | kvm_write_c0_guest_entryhi(cop0, entryhi); | ||
2458 | |||
2459 | return EMULATE_DONE; | ||
2460 | } | ||
2461 | |||
2462 | enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause, | ||
2463 | u32 *opc, | ||
2464 | struct kvm_vcpu *vcpu) | ||
2465 | { | ||
2466 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2467 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2468 | unsigned long entryhi = | ||
2469 | (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | ||
2470 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
2471 | |||
2472 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2473 | /* save old pc */ | ||
2474 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2475 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2476 | |||
2477 | if (cause & CAUSEF_BD) | ||
2478 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2479 | else | ||
2480 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2481 | |||
2482 | kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n", | ||
2483 | arch->pc); | ||
2484 | } else { | ||
2485 | kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n", | ||
2486 | arch->pc); | ||
2487 | } | ||
2488 | |||
2489 | /* set pc to the exception entry point */ | ||
2490 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2491 | |||
2492 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2493 | (EXCCODE_TLBL << CAUSEB_EXCCODE)); | ||
2494 | |||
2495 | /* setup badvaddr, context and entryhi registers for the guest */ | ||
2496 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | ||
2497 | /* XXXKYMA: is the context register used by linux??? */ | ||
2498 | kvm_write_c0_guest_entryhi(cop0, entryhi); | ||
2499 | |||
2500 | return EMULATE_DONE; | ||
2501 | } | ||
2502 | |||
2503 | enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause, | ||
2504 | u32 *opc, | ||
2505 | struct kvm_vcpu *vcpu) | ||
2506 | { | ||
2507 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2508 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2509 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | ||
2510 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
2511 | |||
2512 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2513 | /* save old pc */ | ||
2514 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2515 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2516 | |||
2517 | if (cause & CAUSEF_BD) | ||
2518 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2519 | else | ||
2520 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2521 | |||
2522 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | ||
2523 | arch->pc); | ||
2524 | |||
2525 | /* Set PC to the exception entry point */ | ||
2526 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x0; | ||
2527 | } else { | ||
2528 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | ||
2529 | arch->pc); | ||
2530 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2531 | } | ||
2532 | |||
2533 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2534 | (EXCCODE_TLBS << CAUSEB_EXCCODE)); | ||
2535 | |||
2536 | /* setup badvaddr, context and entryhi registers for the guest */ | ||
2537 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | ||
2538 | /* XXXKYMA: is the context register used by linux??? */ | ||
2539 | kvm_write_c0_guest_entryhi(cop0, entryhi); | ||
2540 | |||
2541 | return EMULATE_DONE; | ||
2542 | } | ||
2543 | |||
2544 | enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause, | ||
2545 | u32 *opc, | ||
2546 | struct kvm_vcpu *vcpu) | ||
2547 | { | ||
2548 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2549 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2550 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | ||
2551 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
2552 | |||
2553 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2554 | /* save old pc */ | ||
2555 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2556 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2557 | |||
2558 | if (cause & CAUSEF_BD) | ||
2559 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2560 | else | ||
2561 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2562 | |||
2563 | kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n", | ||
2564 | arch->pc); | ||
2565 | } else { | ||
2566 | kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n", | ||
2567 | arch->pc); | ||
2568 | } | ||
2569 | |||
2570 | /* Set PC to the exception entry point */ | ||
2571 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2572 | |||
2573 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2574 | (EXCCODE_TLBS << CAUSEB_EXCCODE)); | ||
2575 | |||
2576 | /* setup badvaddr, context and entryhi registers for the guest */ | ||
2577 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | ||
2578 | /* XXXKYMA: is the context register used by linux??? */ | ||
2579 | kvm_write_c0_guest_entryhi(cop0, entryhi); | ||
2580 | |||
2581 | return EMULATE_DONE; | ||
2582 | } | ||
2583 | |||
2584 | enum emulation_result kvm_mips_emulate_tlbmod(u32 cause, | ||
2585 | u32 *opc, | ||
2586 | struct kvm_vcpu *vcpu) | ||
2587 | { | ||
2588 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2589 | unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) | | ||
2590 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
2591 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2592 | |||
2593 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2594 | /* save old pc */ | ||
2595 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2596 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2597 | |||
2598 | if (cause & CAUSEF_BD) | ||
2599 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2600 | else | ||
2601 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2602 | |||
2603 | kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n", | ||
2604 | arch->pc); | ||
2605 | } else { | ||
2606 | kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n", | ||
2607 | arch->pc); | ||
2608 | } | ||
2609 | |||
2610 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2611 | |||
2612 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2613 | (EXCCODE_MOD << CAUSEB_EXCCODE)); | ||
2614 | |||
2615 | /* setup badvaddr, context and entryhi registers for the guest */ | ||
2616 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | ||
2617 | /* XXXKYMA: is the context register used by linux??? */ | ||
2618 | kvm_write_c0_guest_entryhi(cop0, entryhi); | ||
2619 | |||
2620 | return EMULATE_DONE; | ||
2621 | } | ||
2622 | |||
2623 | enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause, | ||
2624 | u32 *opc, | ||
2625 | struct kvm_vcpu *vcpu) | ||
2626 | { | ||
2627 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2628 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2629 | |||
2630 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2631 | /* save old pc */ | ||
2632 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2633 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2634 | |||
2635 | if (cause & CAUSEF_BD) | ||
2636 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2637 | else | ||
2638 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2639 | |||
2640 | } | ||
2641 | |||
2642 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2643 | |||
2644 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2645 | (EXCCODE_CPU << CAUSEB_EXCCODE)); | ||
2646 | kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE)); | ||
2647 | |||
2648 | return EMULATE_DONE; | ||
2649 | } | ||
2650 | |||
2651 | enum emulation_result kvm_mips_emulate_ri_exc(u32 cause, | ||
2652 | u32 *opc, | ||
2653 | struct kvm_vcpu *vcpu) | ||
2654 | { | ||
2655 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2656 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2657 | enum emulation_result er = EMULATE_DONE; | ||
2658 | |||
2659 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2660 | /* save old pc */ | ||
2661 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2662 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2663 | |||
2664 | if (cause & CAUSEF_BD) | ||
2665 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2666 | else | ||
2667 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2668 | |||
2669 | kvm_debug("Delivering RI @ pc %#lx\n", arch->pc); | ||
2670 | |||
2671 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2672 | (EXCCODE_RI << CAUSEB_EXCCODE)); | ||
2673 | |||
2674 | /* Set PC to the exception entry point */ | ||
2675 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2676 | |||
2677 | } else { | ||
2678 | kvm_err("Trying to deliver RI when EXL is already set\n"); | ||
2679 | er = EMULATE_FAIL; | ||
2680 | } | ||
2681 | |||
2682 | return er; | ||
2683 | } | ||
2684 | |||
2685 | enum emulation_result kvm_mips_emulate_bp_exc(u32 cause, | ||
2686 | u32 *opc, | ||
2687 | struct kvm_vcpu *vcpu) | ||
2688 | { | ||
2689 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2690 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2691 | enum emulation_result er = EMULATE_DONE; | ||
2692 | |||
2693 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2694 | /* save old pc */ | ||
2695 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2696 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2697 | |||
2698 | if (cause & CAUSEF_BD) | ||
2699 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2700 | else | ||
2701 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2702 | |||
2703 | kvm_debug("Delivering BP @ pc %#lx\n", arch->pc); | ||
2704 | |||
2705 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2706 | (EXCCODE_BP << CAUSEB_EXCCODE)); | ||
2707 | |||
2708 | /* Set PC to the exception entry point */ | ||
2709 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2710 | |||
2711 | } else { | ||
2712 | kvm_err("Trying to deliver BP when EXL is already set\n"); | ||
2713 | er = EMULATE_FAIL; | ||
2714 | } | ||
2715 | |||
2716 | return er; | ||
2717 | } | ||
2718 | |||
2719 | enum emulation_result kvm_mips_emulate_trap_exc(u32 cause, | ||
2720 | u32 *opc, | ||
2721 | struct kvm_vcpu *vcpu) | ||
2722 | { | ||
2723 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2724 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2725 | enum emulation_result er = EMULATE_DONE; | ||
2726 | |||
2727 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2728 | /* save old pc */ | ||
2729 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2730 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2731 | |||
2732 | if (cause & CAUSEF_BD) | ||
2733 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2734 | else | ||
2735 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2736 | |||
2737 | kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc); | ||
2738 | |||
2739 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2740 | (EXCCODE_TR << CAUSEB_EXCCODE)); | ||
2741 | |||
2742 | /* Set PC to the exception entry point */ | ||
2743 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2744 | |||
2745 | } else { | ||
2746 | kvm_err("Trying to deliver TRAP when EXL is already set\n"); | ||
2747 | er = EMULATE_FAIL; | ||
2748 | } | ||
2749 | |||
2750 | return er; | ||
2751 | } | ||
2752 | |||
2753 | enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause, | ||
2754 | u32 *opc, | ||
2755 | struct kvm_vcpu *vcpu) | ||
2756 | { | ||
2757 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2758 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2759 | enum emulation_result er = EMULATE_DONE; | ||
2760 | |||
2761 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2762 | /* save old pc */ | ||
2763 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2764 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2765 | |||
2766 | if (cause & CAUSEF_BD) | ||
2767 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2768 | else | ||
2769 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2770 | |||
2771 | kvm_debug("Delivering MSAFPE @ pc %#lx\n", arch->pc); | ||
2772 | |||
2773 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2774 | (EXCCODE_MSAFPE << CAUSEB_EXCCODE)); | ||
2775 | |||
2776 | /* Set PC to the exception entry point */ | ||
2777 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2778 | |||
2779 | } else { | ||
2780 | kvm_err("Trying to deliver MSAFPE when EXL is already set\n"); | ||
2781 | er = EMULATE_FAIL; | ||
2782 | } | ||
2783 | |||
2784 | return er; | ||
2785 | } | ||
2786 | |||
2787 | enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause, | ||
2788 | u32 *opc, | ||
2789 | struct kvm_vcpu *vcpu) | ||
2790 | { | ||
2791 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2792 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2793 | enum emulation_result er = EMULATE_DONE; | ||
2794 | |||
2795 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2796 | /* save old pc */ | ||
2797 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2798 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2799 | |||
2800 | if (cause & CAUSEF_BD) | ||
2801 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2802 | else | ||
2803 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2804 | |||
2805 | kvm_debug("Delivering FPE @ pc %#lx\n", arch->pc); | ||
2806 | |||
2807 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2808 | (EXCCODE_FPE << CAUSEB_EXCCODE)); | ||
2809 | |||
2810 | /* Set PC to the exception entry point */ | ||
2811 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2812 | |||
2813 | } else { | ||
2814 | kvm_err("Trying to deliver FPE when EXL is already set\n"); | ||
2815 | er = EMULATE_FAIL; | ||
2816 | } | ||
2817 | |||
2818 | return er; | ||
2819 | } | ||
2820 | |||
2821 | enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause, | ||
2822 | u32 *opc, | ||
2823 | struct kvm_vcpu *vcpu) | ||
2824 | { | ||
2825 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2826 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2827 | enum emulation_result er = EMULATE_DONE; | ||
2828 | |||
2829 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
2830 | /* save old pc */ | ||
2831 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
2832 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
2833 | |||
2834 | if (cause & CAUSEF_BD) | ||
2835 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
2836 | else | ||
2837 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
2838 | |||
2839 | kvm_debug("Delivering MSADIS @ pc %#lx\n", arch->pc); | ||
2840 | |||
2841 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
2842 | (EXCCODE_MSADIS << CAUSEB_EXCCODE)); | ||
2843 | |||
2844 | /* Set PC to the exception entry point */ | ||
2845 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
2846 | |||
2847 | } else { | ||
2848 | kvm_err("Trying to deliver MSADIS when EXL is already set\n"); | ||
2849 | er = EMULATE_FAIL; | ||
2850 | } | ||
2851 | |||
2852 | return er; | ||
2853 | } | ||
2854 | |||
2855 | enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc, | ||
2856 | struct kvm_vcpu *vcpu) | ||
2857 | { | ||
2858 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2859 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
2860 | enum emulation_result er = EMULATE_DONE; | ||
2861 | unsigned long curr_pc; | ||
2862 | union mips_instruction inst; | ||
2863 | int err; | ||
2864 | |||
2865 | /* | ||
2866 | * Update PC and hold onto current PC in case there is | ||
2867 | * an error and we want to rollback the PC | ||
2868 | */ | ||
2869 | curr_pc = vcpu->arch.pc; | ||
2870 | er = update_pc(vcpu, cause); | ||
2871 | if (er == EMULATE_FAIL) | ||
2872 | return er; | ||
2873 | |||
2874 | /* Fetch the instruction. */ | ||
2875 | if (cause & CAUSEF_BD) | ||
2876 | opc += 1; | ||
2877 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
2878 | if (err) { | ||
2879 | kvm_err("%s: Cannot get inst @ %p (%d)\n", __func__, opc, err); | ||
2880 | return EMULATE_FAIL; | ||
2881 | } | ||
2882 | |||
2883 | if (inst.r_format.opcode == spec3_op && | ||
2884 | inst.r_format.func == rdhwr_op && | ||
2885 | inst.r_format.rs == 0 && | ||
2886 | (inst.r_format.re >> 3) == 0) { | ||
2887 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); | ||
2888 | int rd = inst.r_format.rd; | ||
2889 | int rt = inst.r_format.rt; | ||
2890 | int sel = inst.r_format.re & 0x7; | ||
2891 | |||
2892 | /* If usermode, check RDHWR rd is allowed by guest HWREna */ | ||
2893 | if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) { | ||
2894 | kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n", | ||
2895 | rd, opc); | ||
2896 | goto emulate_ri; | ||
2897 | } | ||
2898 | switch (rd) { | ||
2899 | case MIPS_HWR_CPUNUM: /* CPU number */ | ||
2900 | arch->gprs[rt] = vcpu->vcpu_id; | ||
2901 | break; | ||
2902 | case MIPS_HWR_SYNCISTEP: /* SYNCI length */ | ||
2903 | arch->gprs[rt] = min(current_cpu_data.dcache.linesz, | ||
2904 | current_cpu_data.icache.linesz); | ||
2905 | break; | ||
2906 | case MIPS_HWR_CC: /* Read count register */ | ||
2907 | arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu); | ||
2908 | break; | ||
2909 | case MIPS_HWR_CCRES: /* Count register resolution */ | ||
2910 | switch (current_cpu_data.cputype) { | ||
2911 | case CPU_20KC: | ||
2912 | case CPU_25KF: | ||
2913 | arch->gprs[rt] = 1; | ||
2914 | break; | ||
2915 | default: | ||
2916 | arch->gprs[rt] = 2; | ||
2917 | } | ||
2918 | break; | ||
2919 | case MIPS_HWR_ULR: /* Read UserLocal register */ | ||
2920 | arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0); | ||
2921 | break; | ||
2922 | |||
2923 | default: | ||
2924 | kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc); | ||
2925 | goto emulate_ri; | ||
2926 | } | ||
2927 | |||
2928 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel), | ||
2929 | vcpu->arch.gprs[rt]); | ||
2930 | } else { | ||
2931 | kvm_debug("Emulate RI not supported @ %p: %#x\n", | ||
2932 | opc, inst.word); | ||
2933 | goto emulate_ri; | ||
2934 | } | ||
2935 | |||
2936 | return EMULATE_DONE; | ||
2937 | |||
2938 | emulate_ri: | ||
2939 | /* | ||
2940 | * Rollback PC (if in branch delay slot then the PC already points to | ||
2941 | * branch target), and pass the RI exception to the guest OS. | ||
2942 | */ | ||
2943 | vcpu->arch.pc = curr_pc; | ||
2944 | return kvm_mips_emulate_ri_exc(cause, opc, vcpu); | ||
2945 | } | ||
2946 | |||
2947 | enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu) | ||
2948 | { | ||
2949 | struct kvm_run *run = vcpu->run; | ||
2950 | unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr]; | ||
2951 | enum emulation_result er = EMULATE_DONE; | ||
2952 | |||
2953 | if (run->mmio.len > sizeof(*gpr)) { | ||
2954 | kvm_err("Bad MMIO length: %d", run->mmio.len); | ||
2955 | er = EMULATE_FAIL; | ||
2956 | goto done; | ||
2957 | } | ||
2958 | |||
2959 | /* Restore saved resume PC */ | ||
2960 | vcpu->arch.pc = vcpu->arch.io_pc; | ||
2961 | |||
2962 | switch (run->mmio.len) { | ||
2963 | case 8: | ||
2964 | switch (vcpu->mmio_needed) { | ||
2965 | case 11: | ||
2966 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff) | | ||
2967 | (((*(s64 *)run->mmio.data) & 0xff) << 56); | ||
2968 | break; | ||
2969 | case 12: | ||
2970 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff) | | ||
2971 | (((*(s64 *)run->mmio.data) & 0xffff) << 48); | ||
2972 | break; | ||
2973 | case 13: | ||
2974 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff) | | ||
2975 | (((*(s64 *)run->mmio.data) & 0xffffff) << 40); | ||
2976 | break; | ||
2977 | case 14: | ||
2978 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff) | | ||
2979 | (((*(s64 *)run->mmio.data) & 0xffffffff) << 32); | ||
2980 | break; | ||
2981 | case 15: | ||
2982 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | | ||
2983 | (((*(s64 *)run->mmio.data) & 0xffffffffff) << 24); | ||
2984 | break; | ||
2985 | case 16: | ||
2986 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | | ||
2987 | (((*(s64 *)run->mmio.data) & 0xffffffffffff) << 16); | ||
2988 | break; | ||
2989 | case 17: | ||
2990 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | | ||
2991 | (((*(s64 *)run->mmio.data) & 0xffffffffffffff) << 8); | ||
2992 | break; | ||
2993 | case 18: | ||
2994 | case 19: | ||
2995 | *gpr = *(s64 *)run->mmio.data; | ||
2996 | break; | ||
2997 | case 20: | ||
2998 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff00000000000000) | | ||
2999 | ((((*(s64 *)run->mmio.data)) >> 8) & 0xffffffffffffff); | ||
3000 | break; | ||
3001 | case 21: | ||
3002 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff000000000000) | | ||
3003 | ((((*(s64 *)run->mmio.data)) >> 16) & 0xffffffffffff); | ||
3004 | break; | ||
3005 | case 22: | ||
3006 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff0000000000) | | ||
3007 | ((((*(s64 *)run->mmio.data)) >> 24) & 0xffffffffff); | ||
3008 | break; | ||
3009 | case 23: | ||
3010 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffff00000000) | | ||
3011 | ((((*(s64 *)run->mmio.data)) >> 32) & 0xffffffff); | ||
3012 | break; | ||
3013 | case 24: | ||
3014 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffff000000) | | ||
3015 | ((((*(s64 *)run->mmio.data)) >> 40) & 0xffffff); | ||
3016 | break; | ||
3017 | case 25: | ||
3018 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffff0000) | | ||
3019 | ((((*(s64 *)run->mmio.data)) >> 48) & 0xffff); | ||
3020 | break; | ||
3021 | case 26: | ||
3022 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffffffffffff00) | | ||
3023 | ((((*(s64 *)run->mmio.data)) >> 56) & 0xff); | ||
3024 | break; | ||
3025 | default: | ||
3026 | *gpr = *(s64 *)run->mmio.data; | ||
3027 | } | ||
3028 | break; | ||
3029 | |||
3030 | case 4: | ||
3031 | switch (vcpu->mmio_needed) { | ||
3032 | case 1: | ||
3033 | *gpr = *(u32 *)run->mmio.data; | ||
3034 | break; | ||
3035 | case 2: | ||
3036 | *gpr = *(s32 *)run->mmio.data; | ||
3037 | break; | ||
3038 | case 3: | ||
3039 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff) | | ||
3040 | (((*(s32 *)run->mmio.data) & 0xff) << 24); | ||
3041 | break; | ||
3042 | case 4: | ||
3043 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff) | | ||
3044 | (((*(s32 *)run->mmio.data) & 0xffff) << 16); | ||
3045 | break; | ||
3046 | case 5: | ||
3047 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff) | | ||
3048 | (((*(s32 *)run->mmio.data) & 0xffffff) << 8); | ||
3049 | break; | ||
3050 | case 6: | ||
3051 | case 7: | ||
3052 | *gpr = *(s32 *)run->mmio.data; | ||
3053 | break; | ||
3054 | case 8: | ||
3055 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xff000000) | | ||
3056 | ((((*(s32 *)run->mmio.data)) >> 8) & 0xffffff); | ||
3057 | break; | ||
3058 | case 9: | ||
3059 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffff0000) | | ||
3060 | ((((*(s32 *)run->mmio.data)) >> 16) & 0xffff); | ||
3061 | break; | ||
3062 | case 10: | ||
3063 | *gpr = (vcpu->arch.gprs[vcpu->arch.io_gpr] & 0xffffff00) | | ||
3064 | ((((*(s32 *)run->mmio.data)) >> 24) & 0xff); | ||
3065 | break; | ||
3066 | default: | ||
3067 | *gpr = *(s32 *)run->mmio.data; | ||
3068 | } | ||
3069 | break; | ||
3070 | |||
3071 | case 2: | ||
3072 | if (vcpu->mmio_needed == 1) | ||
3073 | *gpr = *(u16 *)run->mmio.data; | ||
3074 | else | ||
3075 | *gpr = *(s16 *)run->mmio.data; | ||
3076 | |||
3077 | break; | ||
3078 | case 1: | ||
3079 | if (vcpu->mmio_needed == 1) | ||
3080 | *gpr = *(u8 *)run->mmio.data; | ||
3081 | else | ||
3082 | *gpr = *(s8 *)run->mmio.data; | ||
3083 | break; | ||
3084 | } | ||
3085 | |||
3086 | done: | ||
3087 | return er; | ||
3088 | } | ||
3089 | |||
3090 | static enum emulation_result kvm_mips_emulate_exc(u32 cause, | ||
3091 | u32 *opc, | ||
3092 | struct kvm_vcpu *vcpu) | ||
3093 | { | ||
3094 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | ||
3095 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
3096 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
3097 | enum emulation_result er = EMULATE_DONE; | ||
3098 | |||
3099 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
3100 | /* save old pc */ | ||
3101 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
3102 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
3103 | |||
3104 | if (cause & CAUSEF_BD) | ||
3105 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
3106 | else | ||
3107 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
3108 | |||
3109 | kvm_change_c0_guest_cause(cop0, (0xff), | ||
3110 | (exccode << CAUSEB_EXCCODE)); | ||
3111 | |||
3112 | /* Set PC to the exception entry point */ | ||
3113 | arch->pc = kvm_mips_guest_exception_base(vcpu) + 0x180; | ||
3114 | kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr); | ||
3115 | |||
3116 | kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n", | ||
3117 | exccode, kvm_read_c0_guest_epc(cop0), | ||
3118 | kvm_read_c0_guest_badvaddr(cop0)); | ||
3119 | } else { | ||
3120 | kvm_err("Trying to deliver EXC when EXL is already set\n"); | ||
3121 | er = EMULATE_FAIL; | ||
3122 | } | ||
3123 | |||
3124 | return er; | ||
3125 | } | ||
3126 | |||
3127 | enum emulation_result kvm_mips_check_privilege(u32 cause, | ||
3128 | u32 *opc, | ||
3129 | struct kvm_vcpu *vcpu) | ||
3130 | { | ||
3131 | enum emulation_result er = EMULATE_DONE; | ||
3132 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | ||
3133 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
3134 | |||
3135 | int usermode = !KVM_GUEST_KERNEL_MODE(vcpu); | ||
3136 | |||
3137 | if (usermode) { | ||
3138 | switch (exccode) { | ||
3139 | case EXCCODE_INT: | ||
3140 | case EXCCODE_SYS: | ||
3141 | case EXCCODE_BP: | ||
3142 | case EXCCODE_RI: | ||
3143 | case EXCCODE_TR: | ||
3144 | case EXCCODE_MSAFPE: | ||
3145 | case EXCCODE_FPE: | ||
3146 | case EXCCODE_MSADIS: | ||
3147 | break; | ||
3148 | |||
3149 | case EXCCODE_CPU: | ||
3150 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0) | ||
3151 | er = EMULATE_PRIV_FAIL; | ||
3152 | break; | ||
3153 | |||
3154 | case EXCCODE_MOD: | ||
3155 | break; | ||
3156 | |||
3157 | case EXCCODE_TLBL: | ||
3158 | /* | ||
3159 | * We we are accessing Guest kernel space, then send an | ||
3160 | * address error exception to the guest | ||
3161 | */ | ||
3162 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | ||
3163 | kvm_debug("%s: LD MISS @ %#lx\n", __func__, | ||
3164 | badvaddr); | ||
3165 | cause &= ~0xff; | ||
3166 | cause |= (EXCCODE_ADEL << CAUSEB_EXCCODE); | ||
3167 | er = EMULATE_PRIV_FAIL; | ||
3168 | } | ||
3169 | break; | ||
3170 | |||
3171 | case EXCCODE_TLBS: | ||
3172 | /* | ||
3173 | * We we are accessing Guest kernel space, then send an | ||
3174 | * address error exception to the guest | ||
3175 | */ | ||
3176 | if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) { | ||
3177 | kvm_debug("%s: ST MISS @ %#lx\n", __func__, | ||
3178 | badvaddr); | ||
3179 | cause &= ~0xff; | ||
3180 | cause |= (EXCCODE_ADES << CAUSEB_EXCCODE); | ||
3181 | er = EMULATE_PRIV_FAIL; | ||
3182 | } | ||
3183 | break; | ||
3184 | |||
3185 | case EXCCODE_ADES: | ||
3186 | kvm_debug("%s: address error ST @ %#lx\n", __func__, | ||
3187 | badvaddr); | ||
3188 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | ||
3189 | cause &= ~0xff; | ||
3190 | cause |= (EXCCODE_TLBS << CAUSEB_EXCCODE); | ||
3191 | } | ||
3192 | er = EMULATE_PRIV_FAIL; | ||
3193 | break; | ||
3194 | case EXCCODE_ADEL: | ||
3195 | kvm_debug("%s: address error LD @ %#lx\n", __func__, | ||
3196 | badvaddr); | ||
3197 | if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) { | ||
3198 | cause &= ~0xff; | ||
3199 | cause |= (EXCCODE_TLBL << CAUSEB_EXCCODE); | ||
3200 | } | ||
3201 | er = EMULATE_PRIV_FAIL; | ||
3202 | break; | ||
3203 | default: | ||
3204 | er = EMULATE_PRIV_FAIL; | ||
3205 | break; | ||
3206 | } | ||
3207 | } | ||
3208 | |||
3209 | if (er == EMULATE_PRIV_FAIL) | ||
3210 | kvm_mips_emulate_exc(cause, opc, vcpu); | ||
3211 | |||
3212 | return er; | ||
3213 | } | ||
3214 | |||
3215 | /* | ||
3216 | * User Address (UA) fault, this could happen if | ||
3217 | * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this | ||
3218 | * case we pass on the fault to the guest kernel and let it handle it. | ||
3219 | * (2) TLB entry is present in the Guest TLB but not in the shadow, in this | ||
3220 | * case we inject the TLB from the Guest TLB into the shadow host TLB | ||
3221 | */ | ||
3222 | enum emulation_result kvm_mips_handle_tlbmiss(u32 cause, | ||
3223 | u32 *opc, | ||
3224 | struct kvm_vcpu *vcpu, | ||
3225 | bool write_fault) | ||
3226 | { | ||
3227 | enum emulation_result er = EMULATE_DONE; | ||
3228 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | ||
3229 | unsigned long va = vcpu->arch.host_cp0_badvaddr; | ||
3230 | int index; | ||
3231 | |||
3232 | kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n", | ||
3233 | vcpu->arch.host_cp0_badvaddr); | ||
3234 | |||
3235 | /* | ||
3236 | * KVM would not have got the exception if this entry was valid in the | ||
3237 | * shadow host TLB. Check the Guest TLB, if the entry is not there then | ||
3238 | * send the guest an exception. The guest exc handler should then inject | ||
3239 | * an entry into the guest TLB. | ||
3240 | */ | ||
3241 | index = kvm_mips_guest_tlb_lookup(vcpu, | ||
3242 | (va & VPN2_MASK) | | ||
3243 | (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & | ||
3244 | KVM_ENTRYHI_ASID)); | ||
3245 | if (index < 0) { | ||
3246 | if (exccode == EXCCODE_TLBL) { | ||
3247 | er = kvm_mips_emulate_tlbmiss_ld(cause, opc, vcpu); | ||
3248 | } else if (exccode == EXCCODE_TLBS) { | ||
3249 | er = kvm_mips_emulate_tlbmiss_st(cause, opc, vcpu); | ||
3250 | } else { | ||
3251 | kvm_err("%s: invalid exc code: %d\n", __func__, | ||
3252 | exccode); | ||
3253 | er = EMULATE_FAIL; | ||
3254 | } | ||
3255 | } else { | ||
3256 | struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index]; | ||
3257 | |||
3258 | /* | ||
3259 | * Check if the entry is valid, if not then setup a TLB invalid | ||
3260 | * exception to the guest | ||
3261 | */ | ||
3262 | if (!TLB_IS_VALID(*tlb, va)) { | ||
3263 | if (exccode == EXCCODE_TLBL) { | ||
3264 | er = kvm_mips_emulate_tlbinv_ld(cause, opc, | ||
3265 | vcpu); | ||
3266 | } else if (exccode == EXCCODE_TLBS) { | ||
3267 | er = kvm_mips_emulate_tlbinv_st(cause, opc, | ||
3268 | vcpu); | ||
3269 | } else { | ||
3270 | kvm_err("%s: invalid exc code: %d\n", __func__, | ||
3271 | exccode); | ||
3272 | er = EMULATE_FAIL; | ||
3273 | } | ||
3274 | } else { | ||
3275 | kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n", | ||
3276 | tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]); | ||
3277 | /* | ||
3278 | * OK we have a Guest TLB entry, now inject it into the | ||
3279 | * shadow host TLB | ||
3280 | */ | ||
3281 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, va, | ||
3282 | write_fault)) { | ||
3283 | kvm_err("%s: handling mapped seg tlb fault for %lx, index: %u, vcpu: %p, ASID: %#lx\n", | ||
3284 | __func__, va, index, vcpu, | ||
3285 | read_c0_entryhi()); | ||
3286 | er = EMULATE_FAIL; | ||
3287 | } | ||
3288 | } | ||
3289 | } | ||
3290 | |||
3291 | return er; | ||
3292 | } | ||
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c new file mode 100644 index 000000000..832475bf2 --- /dev/null +++ b/arch/mips/kvm/entry.c | |||
@@ -0,0 +1,955 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Generation of main entry point for the guest, exception handling. | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | * | ||
11 | * Copyright (C) 2016 Imagination Technologies Ltd. | ||
12 | */ | ||
13 | |||
14 | #include <linux/kvm_host.h> | ||
15 | #include <linux/log2.h> | ||
16 | #include <asm/mmu_context.h> | ||
17 | #include <asm/msa.h> | ||
18 | #include <asm/setup.h> | ||
19 | #include <asm/tlbex.h> | ||
20 | #include <asm/uasm.h> | ||
21 | |||
22 | /* Register names */ | ||
23 | #define ZERO 0 | ||
24 | #define AT 1 | ||
25 | #define V0 2 | ||
26 | #define V1 3 | ||
27 | #define A0 4 | ||
28 | #define A1 5 | ||
29 | |||
30 | #if _MIPS_SIM == _MIPS_SIM_ABI32 | ||
31 | #define T0 8 | ||
32 | #define T1 9 | ||
33 | #define T2 10 | ||
34 | #define T3 11 | ||
35 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */ | ||
36 | |||
37 | #if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 | ||
38 | #define T0 12 | ||
39 | #define T1 13 | ||
40 | #define T2 14 | ||
41 | #define T3 15 | ||
42 | #endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */ | ||
43 | |||
44 | #define S0 16 | ||
45 | #define S1 17 | ||
46 | #define T9 25 | ||
47 | #define K0 26 | ||
48 | #define K1 27 | ||
49 | #define GP 28 | ||
50 | #define SP 29 | ||
51 | #define RA 31 | ||
52 | |||
53 | /* Some CP0 registers */ | ||
54 | #define C0_PWBASE 5, 5 | ||
55 | #define C0_HWRENA 7, 0 | ||
56 | #define C0_BADVADDR 8, 0 | ||
57 | #define C0_BADINSTR 8, 1 | ||
58 | #define C0_BADINSTRP 8, 2 | ||
59 | #define C0_PGD 9, 7 | ||
60 | #define C0_ENTRYHI 10, 0 | ||
61 | #define C0_GUESTCTL1 10, 4 | ||
62 | #define C0_STATUS 12, 0 | ||
63 | #define C0_GUESTCTL0 12, 6 | ||
64 | #define C0_CAUSE 13, 0 | ||
65 | #define C0_EPC 14, 0 | ||
66 | #define C0_EBASE 15, 1 | ||
67 | #define C0_CONFIG5 16, 5 | ||
68 | #define C0_DDATA_LO 28, 3 | ||
69 | #define C0_ERROREPC 30, 0 | ||
70 | |||
71 | #define CALLFRAME_SIZ 32 | ||
72 | |||
73 | #ifdef CONFIG_64BIT | ||
74 | #define ST0_KX_IF_64 ST0_KX | ||
75 | #else | ||
76 | #define ST0_KX_IF_64 0 | ||
77 | #endif | ||
78 | |||
79 | static unsigned int scratch_vcpu[2] = { C0_DDATA_LO }; | ||
80 | static unsigned int scratch_tmp[2] = { C0_ERROREPC }; | ||
81 | |||
82 | enum label_id { | ||
83 | label_fpu_1 = 1, | ||
84 | label_msa_1, | ||
85 | label_return_to_host, | ||
86 | label_kernel_asid, | ||
87 | label_exit_common, | ||
88 | }; | ||
89 | |||
90 | UASM_L_LA(_fpu_1) | ||
91 | UASM_L_LA(_msa_1) | ||
92 | UASM_L_LA(_return_to_host) | ||
93 | UASM_L_LA(_kernel_asid) | ||
94 | UASM_L_LA(_exit_common) | ||
95 | |||
96 | static void *kvm_mips_build_enter_guest(void *addr); | ||
97 | static void *kvm_mips_build_ret_from_exit(void *addr); | ||
98 | static void *kvm_mips_build_ret_to_guest(void *addr); | ||
99 | static void *kvm_mips_build_ret_to_host(void *addr); | ||
100 | |||
101 | /* | ||
102 | * The version of this function in tlbex.c uses current_cpu_type(), but for KVM | ||
103 | * we assume symmetry. | ||
104 | */ | ||
105 | static int c0_kscratch(void) | ||
106 | { | ||
107 | switch (boot_cpu_type()) { | ||
108 | case CPU_XLP: | ||
109 | case CPU_XLR: | ||
110 | return 22; | ||
111 | default: | ||
112 | return 31; | ||
113 | } | ||
114 | } | ||
115 | |||
116 | /** | ||
117 | * kvm_mips_entry_setup() - Perform global setup for entry code. | ||
118 | * | ||
119 | * Perform global setup for entry code, such as choosing a scratch register. | ||
120 | * | ||
121 | * Returns: 0 on success. | ||
122 | * -errno on failure. | ||
123 | */ | ||
124 | int kvm_mips_entry_setup(void) | ||
125 | { | ||
126 | /* | ||
127 | * We prefer to use KScratchN registers if they are available over the | ||
128 | * defaults above, which may not work on all cores. | ||
129 | */ | ||
130 | unsigned int kscratch_mask = cpu_data[0].kscratch_mask; | ||
131 | |||
132 | if (pgd_reg != -1) | ||
133 | kscratch_mask &= ~BIT(pgd_reg); | ||
134 | |||
135 | /* Pick a scratch register for storing VCPU */ | ||
136 | if (kscratch_mask) { | ||
137 | scratch_vcpu[0] = c0_kscratch(); | ||
138 | scratch_vcpu[1] = ffs(kscratch_mask) - 1; | ||
139 | kscratch_mask &= ~BIT(scratch_vcpu[1]); | ||
140 | } | ||
141 | |||
142 | /* Pick a scratch register to use as a temp for saving state */ | ||
143 | if (kscratch_mask) { | ||
144 | scratch_tmp[0] = c0_kscratch(); | ||
145 | scratch_tmp[1] = ffs(kscratch_mask) - 1; | ||
146 | kscratch_mask &= ~BIT(scratch_tmp[1]); | ||
147 | } | ||
148 | |||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp, | ||
153 | unsigned int frame) | ||
154 | { | ||
155 | /* Save the VCPU scratch register value in cp0_epc of the stack frame */ | ||
156 | UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); | ||
157 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); | ||
158 | |||
159 | /* Save the temp scratch register value in cp0_cause of stack frame */ | ||
160 | if (scratch_tmp[0] == c0_kscratch()) { | ||
161 | UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); | ||
162 | UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); | ||
163 | } | ||
164 | } | ||
165 | |||
166 | static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, | ||
167 | unsigned int frame) | ||
168 | { | ||
169 | /* | ||
170 | * Restore host scratch register values saved by | ||
171 | * kvm_mips_build_save_scratch(). | ||
172 | */ | ||
173 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame); | ||
174 | UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]); | ||
175 | |||
176 | if (scratch_tmp[0] == c0_kscratch()) { | ||
177 | UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame); | ||
178 | UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]); | ||
179 | } | ||
180 | } | ||
181 | |||
182 | /** | ||
183 | * build_set_exc_base() - Assemble code to write exception base address. | ||
184 | * @p: Code buffer pointer. | ||
185 | * @reg: Source register (generated code may set WG bit in @reg). | ||
186 | * | ||
187 | * Assemble code to modify the exception base address in the EBase register, | ||
188 | * using the appropriately sized access and setting the WG bit if necessary. | ||
189 | */ | ||
190 | static inline void build_set_exc_base(u32 **p, unsigned int reg) | ||
191 | { | ||
192 | if (cpu_has_ebase_wg) { | ||
193 | /* Set WG so that all the bits get written */ | ||
194 | uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); | ||
195 | UASM_i_MTC0(p, reg, C0_EBASE); | ||
196 | } else { | ||
197 | uasm_i_mtc0(p, reg, C0_EBASE); | ||
198 | } | ||
199 | } | ||
200 | |||
201 | /** | ||
202 | * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. | ||
203 | * @addr: Address to start writing code. | ||
204 | * | ||
205 | * Assemble the start of the vcpu_run function to run a guest VCPU. The function | ||
206 | * conforms to the following prototype: | ||
207 | * | ||
208 | * int vcpu_run(struct kvm_vcpu *vcpu); | ||
209 | * | ||
210 | * The exit from the guest and return to the caller is handled by the code | ||
211 | * generated by kvm_mips_build_ret_to_host(). | ||
212 | * | ||
213 | * Returns: Next address after end of written function. | ||
214 | */ | ||
215 | void *kvm_mips_build_vcpu_run(void *addr) | ||
216 | { | ||
217 | u32 *p = addr; | ||
218 | unsigned int i; | ||
219 | |||
220 | /* | ||
221 | * A0: vcpu | ||
222 | */ | ||
223 | |||
224 | /* k0/k1 not being used in host kernel context */ | ||
225 | UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs)); | ||
226 | for (i = 16; i < 32; ++i) { | ||
227 | if (i == 24) | ||
228 | i = 28; | ||
229 | UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1); | ||
230 | } | ||
231 | |||
232 | /* Save host status */ | ||
233 | uasm_i_mfc0(&p, V0, C0_STATUS); | ||
234 | UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1); | ||
235 | |||
236 | /* Save scratch registers, will be used to store pointer to vcpu etc */ | ||
237 | kvm_mips_build_save_scratch(&p, V1, K1); | ||
238 | |||
239 | /* VCPU scratch register has pointer to vcpu */ | ||
240 | UASM_i_MTC0(&p, A0, scratch_vcpu[0], scratch_vcpu[1]); | ||
241 | |||
242 | /* Offset into vcpu->arch */ | ||
243 | UASM_i_ADDIU(&p, K1, A0, offsetof(struct kvm_vcpu, arch)); | ||
244 | |||
245 | /* | ||
246 | * Save the host stack to VCPU, used for exception processing | ||
247 | * when we exit from the Guest | ||
248 | */ | ||
249 | UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); | ||
250 | |||
251 | /* Save the kernel gp as well */ | ||
252 | UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); | ||
253 | |||
254 | /* | ||
255 | * Setup status register for running the guest in UM, interrupts | ||
256 | * are disabled | ||
257 | */ | ||
258 | UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64); | ||
259 | uasm_i_mtc0(&p, K0, C0_STATUS); | ||
260 | uasm_i_ehb(&p); | ||
261 | |||
262 | /* load up the new EBASE */ | ||
263 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); | ||
264 | build_set_exc_base(&p, K0); | ||
265 | |||
266 | /* | ||
267 | * Now that the new EBASE has been loaded, unset BEV, set | ||
268 | * interrupt mask as it was but make sure that timer interrupts | ||
269 | * are enabled | ||
270 | */ | ||
271 | uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64); | ||
272 | uasm_i_andi(&p, V0, V0, ST0_IM); | ||
273 | uasm_i_or(&p, K0, K0, V0); | ||
274 | uasm_i_mtc0(&p, K0, C0_STATUS); | ||
275 | uasm_i_ehb(&p); | ||
276 | |||
277 | p = kvm_mips_build_enter_guest(p); | ||
278 | |||
279 | return p; | ||
280 | } | ||
281 | |||
282 | /** | ||
283 | * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. | ||
284 | * @addr: Address to start writing code. | ||
285 | * | ||
286 | * Assemble the code to resume guest execution. This code is common between the | ||
287 | * initial entry into the guest from the host, and returning from the exit | ||
288 | * handler back to the guest. | ||
289 | * | ||
290 | * Returns: Next address after end of written function. | ||
291 | */ | ||
292 | static void *kvm_mips_build_enter_guest(void *addr) | ||
293 | { | ||
294 | u32 *p = addr; | ||
295 | unsigned int i; | ||
296 | struct uasm_label labels[2]; | ||
297 | struct uasm_reloc relocs[2]; | ||
298 | struct uasm_label __maybe_unused *l = labels; | ||
299 | struct uasm_reloc __maybe_unused *r = relocs; | ||
300 | |||
301 | memset(labels, 0, sizeof(labels)); | ||
302 | memset(relocs, 0, sizeof(relocs)); | ||
303 | |||
304 | /* Set Guest EPC */ | ||
305 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); | ||
306 | UASM_i_MTC0(&p, T0, C0_EPC); | ||
307 | |||
308 | #ifdef CONFIG_KVM_MIPS_VZ | ||
309 | /* Save normal linux process pgd (VZ guarantees pgd_reg is set) */ | ||
310 | if (cpu_has_ldpte) | ||
311 | UASM_i_MFC0(&p, K0, C0_PWBASE); | ||
312 | else | ||
313 | UASM_i_MFC0(&p, K0, c0_kscratch(), pgd_reg); | ||
314 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_pgd), K1); | ||
315 | |||
316 | /* | ||
317 | * Set up KVM GPA pgd. | ||
318 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | ||
319 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | ||
320 | * - write mm->pgd into CP0_PWBase | ||
321 | * | ||
322 | * We keep S0 pointing at struct kvm so we can load the ASID below. | ||
323 | */ | ||
324 | UASM_i_LW(&p, S0, (int)offsetof(struct kvm_vcpu, kvm) - | ||
325 | (int)offsetof(struct kvm_vcpu, arch), K1); | ||
326 | UASM_i_LW(&p, A0, offsetof(struct kvm, arch.gpa_mm.pgd), S0); | ||
327 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); | ||
328 | uasm_i_jalr(&p, RA, T9); | ||
329 | /* delay slot */ | ||
330 | if (cpu_has_htw) | ||
331 | UASM_i_MTC0(&p, A0, C0_PWBASE); | ||
332 | else | ||
333 | uasm_i_nop(&p); | ||
334 | |||
335 | /* Set GM bit to setup eret to VZ guest context */ | ||
336 | uasm_i_addiu(&p, V1, ZERO, 1); | ||
337 | uasm_i_mfc0(&p, K0, C0_GUESTCTL0); | ||
338 | uasm_i_ins(&p, K0, V1, MIPS_GCTL0_GM_SHIFT, 1); | ||
339 | uasm_i_mtc0(&p, K0, C0_GUESTCTL0); | ||
340 | |||
341 | if (cpu_has_guestid) { | ||
342 | /* | ||
343 | * Set root mode GuestID, so that root TLB refill handler can | ||
344 | * use the correct GuestID in the root TLB. | ||
345 | */ | ||
346 | |||
347 | /* Get current GuestID */ | ||
348 | uasm_i_mfc0(&p, T0, C0_GUESTCTL1); | ||
349 | /* Set GuestCtl1.RID = GuestCtl1.ID */ | ||
350 | uasm_i_ext(&p, T1, T0, MIPS_GCTL1_ID_SHIFT, | ||
351 | MIPS_GCTL1_ID_WIDTH); | ||
352 | uasm_i_ins(&p, T0, T1, MIPS_GCTL1_RID_SHIFT, | ||
353 | MIPS_GCTL1_RID_WIDTH); | ||
354 | uasm_i_mtc0(&p, T0, C0_GUESTCTL1); | ||
355 | |||
356 | /* GuestID handles dealiasing so we don't need to touch ASID */ | ||
357 | goto skip_asid_restore; | ||
358 | } | ||
359 | |||
360 | /* Root ASID Dealias (RAD) */ | ||
361 | |||
362 | /* Save host ASID */ | ||
363 | UASM_i_MFC0(&p, K0, C0_ENTRYHI); | ||
364 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), | ||
365 | K1); | ||
366 | |||
367 | /* Set the root ASID for the Guest */ | ||
368 | UASM_i_ADDIU(&p, T1, S0, | ||
369 | offsetof(struct kvm, arch.gpa_mm.context.asid)); | ||
370 | #else | ||
371 | /* Set the ASID for the Guest Kernel or User */ | ||
372 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); | ||
373 | UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), | ||
374 | T0); | ||
375 | uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); | ||
376 | uasm_i_xori(&p, T0, T0, KSU_USER); | ||
377 | uasm_il_bnez(&p, &r, T0, label_kernel_asid); | ||
378 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, | ||
379 | guest_kernel_mm.context.asid)); | ||
380 | /* else user */ | ||
381 | UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, | ||
382 | guest_user_mm.context.asid)); | ||
383 | uasm_l_kernel_asid(&l, p); | ||
384 | #endif | ||
385 | |||
386 | /* t1: contains the base of the ASID array, need to get the cpu id */ | ||
387 | /* smp_processor_id */ | ||
388 | uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); | ||
389 | /* index the ASID array */ | ||
390 | uasm_i_sll(&p, T2, T2, ilog2(sizeof(long))); | ||
391 | UASM_i_ADDU(&p, T3, T1, T2); | ||
392 | UASM_i_LW(&p, K0, 0, T3); | ||
393 | #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE | ||
394 | /* | ||
395 | * reuse ASID array offset | ||
396 | * cpuinfo_mips is a multiple of sizeof(long) | ||
397 | */ | ||
398 | uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/sizeof(long)); | ||
399 | uasm_i_mul(&p, T2, T2, T3); | ||
400 | |||
401 | UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); | ||
402 | UASM_i_ADDU(&p, AT, AT, T2); | ||
403 | UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); | ||
404 | uasm_i_and(&p, K0, K0, T2); | ||
405 | #else | ||
406 | uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); | ||
407 | #endif | ||
408 | |||
409 | #ifndef CONFIG_KVM_MIPS_VZ | ||
410 | /* | ||
411 | * Set up KVM T&E GVA pgd. | ||
412 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | ||
413 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | ||
414 | * - but skips write into CP0_PWBase for now | ||
415 | */ | ||
416 | UASM_i_LW(&p, A0, (int)offsetof(struct mm_struct, pgd) - | ||
417 | (int)offsetof(struct mm_struct, context.asid), T1); | ||
418 | |||
419 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); | ||
420 | uasm_i_jalr(&p, RA, T9); | ||
421 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); | ||
422 | #else | ||
423 | /* Set up KVM VZ root ASID (!guestid) */ | ||
424 | uasm_i_mtc0(&p, K0, C0_ENTRYHI); | ||
425 | skip_asid_restore: | ||
426 | #endif | ||
427 | uasm_i_ehb(&p); | ||
428 | |||
429 | /* Disable RDHWR access */ | ||
430 | uasm_i_mtc0(&p, ZERO, C0_HWRENA); | ||
431 | |||
432 | /* load the guest context from VCPU and return */ | ||
433 | for (i = 1; i < 32; ++i) { | ||
434 | /* Guest k0/k1 loaded later */ | ||
435 | if (i == K0 || i == K1) | ||
436 | continue; | ||
437 | UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); | ||
438 | } | ||
439 | |||
440 | #ifndef CONFIG_CPU_MIPSR6 | ||
441 | /* Restore hi/lo */ | ||
442 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); | ||
443 | uasm_i_mthi(&p, K0); | ||
444 | |||
445 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); | ||
446 | uasm_i_mtlo(&p, K0); | ||
447 | #endif | ||
448 | |||
449 | /* Restore the guest's k0/k1 registers */ | ||
450 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); | ||
451 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); | ||
452 | |||
453 | /* Jump to guest */ | ||
454 | uasm_i_eret(&p); | ||
455 | |||
456 | uasm_resolve_relocs(relocs, labels); | ||
457 | |||
458 | return p; | ||
459 | } | ||
460 | |||
461 | /** | ||
462 | * kvm_mips_build_tlb_refill_exception() - Assemble TLB refill handler. | ||
463 | * @addr: Address to start writing code. | ||
464 | * @handler: Address of common handler (within range of @addr). | ||
465 | * | ||
466 | * Assemble TLB refill exception fast path handler for guest execution. | ||
467 | * | ||
468 | * Returns: Next address after end of written function. | ||
469 | */ | ||
470 | void *kvm_mips_build_tlb_refill_exception(void *addr, void *handler) | ||
471 | { | ||
472 | u32 *p = addr; | ||
473 | struct uasm_label labels[2]; | ||
474 | struct uasm_reloc relocs[2]; | ||
475 | #ifndef CONFIG_CPU_LOONGSON64 | ||
476 | struct uasm_label *l = labels; | ||
477 | struct uasm_reloc *r = relocs; | ||
478 | #endif | ||
479 | |||
480 | memset(labels, 0, sizeof(labels)); | ||
481 | memset(relocs, 0, sizeof(relocs)); | ||
482 | |||
483 | /* Save guest k1 into scratch register */ | ||
484 | UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); | ||
485 | |||
486 | /* Get the VCPU pointer from the VCPU scratch register */ | ||
487 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); | ||
488 | |||
489 | /* Save guest k0 into VCPU structure */ | ||
490 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); | ||
491 | |||
492 | /* | ||
493 | * Some of the common tlbex code uses current_cpu_type(). For KVM we | ||
494 | * assume symmetry and just disable preemption to silence the warning. | ||
495 | */ | ||
496 | preempt_disable(); | ||
497 | |||
498 | #ifdef CONFIG_CPU_LOONGSON64 | ||
499 | UASM_i_MFC0(&p, K1, C0_PGD); | ||
500 | uasm_i_lddir(&p, K0, K1, 3); /* global page dir */ | ||
501 | #ifndef __PAGETABLE_PMD_FOLDED | ||
502 | uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */ | ||
503 | #endif | ||
504 | uasm_i_ldpte(&p, K1, 0); /* even */ | ||
505 | uasm_i_ldpte(&p, K1, 1); /* odd */ | ||
506 | uasm_i_tlbwr(&p); | ||
507 | #else | ||
508 | /* | ||
509 | * Now for the actual refill bit. A lot of this can be common with the | ||
510 | * Linux TLB refill handler, however we don't need to handle so many | ||
511 | * cases. We only need to handle user mode refills, and user mode runs | ||
512 | * with 32-bit addressing. | ||
513 | * | ||
514 | * Therefore the branch to label_vmalloc generated by build_get_pmde64() | ||
515 | * that isn't resolved should never actually get taken and is harmless | ||
516 | * to leave in place for now. | ||
517 | */ | ||
518 | |||
519 | #ifdef CONFIG_64BIT | ||
520 | build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ | ||
521 | #else | ||
522 | build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ | ||
523 | #endif | ||
524 | |||
525 | /* we don't support huge pages yet */ | ||
526 | |||
527 | build_get_ptep(&p, K0, K1); | ||
528 | build_update_entries(&p, K0, K1); | ||
529 | build_tlb_write_entry(&p, &l, &r, tlb_random); | ||
530 | #endif | ||
531 | |||
532 | preempt_enable(); | ||
533 | |||
534 | /* Get the VCPU pointer from the VCPU scratch register again */ | ||
535 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); | ||
536 | |||
537 | /* Restore the guest's k0/k1 registers */ | ||
538 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu, arch.gprs[K0]), K1); | ||
539 | uasm_i_ehb(&p); | ||
540 | UASM_i_MFC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); | ||
541 | |||
542 | /* Jump to guest */ | ||
543 | uasm_i_eret(&p); | ||
544 | |||
545 | return p; | ||
546 | } | ||
547 | |||
548 | /** | ||
549 | * kvm_mips_build_exception() - Assemble first level guest exception handler. | ||
550 | * @addr: Address to start writing code. | ||
551 | * @handler: Address of common handler (within range of @addr). | ||
552 | * | ||
553 | * Assemble exception vector code for guest execution. The generated vector will | ||
554 | * branch to the common exception handler generated by kvm_mips_build_exit(). | ||
555 | * | ||
556 | * Returns: Next address after end of written function. | ||
557 | */ | ||
558 | void *kvm_mips_build_exception(void *addr, void *handler) | ||
559 | { | ||
560 | u32 *p = addr; | ||
561 | struct uasm_label labels[2]; | ||
562 | struct uasm_reloc relocs[2]; | ||
563 | struct uasm_label *l = labels; | ||
564 | struct uasm_reloc *r = relocs; | ||
565 | |||
566 | memset(labels, 0, sizeof(labels)); | ||
567 | memset(relocs, 0, sizeof(relocs)); | ||
568 | |||
569 | /* Save guest k1 into scratch register */ | ||
570 | UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); | ||
571 | |||
572 | /* Get the VCPU pointer from the VCPU scratch register */ | ||
573 | UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); | ||
574 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); | ||
575 | |||
576 | /* Save guest k0 into VCPU structure */ | ||
577 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); | ||
578 | |||
579 | /* Branch to the common handler */ | ||
580 | uasm_il_b(&p, &r, label_exit_common); | ||
581 | uasm_i_nop(&p); | ||
582 | |||
583 | uasm_l_exit_common(&l, handler); | ||
584 | uasm_resolve_relocs(relocs, labels); | ||
585 | |||
586 | return p; | ||
587 | } | ||
588 | |||
589 | /** | ||
590 | * kvm_mips_build_exit() - Assemble common guest exit handler. | ||
591 | * @addr: Address to start writing code. | ||
592 | * | ||
593 | * Assemble the generic guest exit handling code. This is called by the | ||
594 | * exception vectors (generated by kvm_mips_build_exception()), and calls | ||
595 | * kvm_mips_handle_exit(), then either resumes the guest or returns to the host | ||
596 | * depending on the return value. | ||
597 | * | ||
598 | * Returns: Next address after end of written function. | ||
599 | */ | ||
600 | void *kvm_mips_build_exit(void *addr) | ||
601 | { | ||
602 | u32 *p = addr; | ||
603 | unsigned int i; | ||
604 | struct uasm_label labels[3]; | ||
605 | struct uasm_reloc relocs[3]; | ||
606 | struct uasm_label *l = labels; | ||
607 | struct uasm_reloc *r = relocs; | ||
608 | |||
609 | memset(labels, 0, sizeof(labels)); | ||
610 | memset(relocs, 0, sizeof(relocs)); | ||
611 | |||
612 | /* | ||
613 | * Generic Guest exception handler. We end up here when the guest | ||
614 | * does something that causes a trap to kernel mode. | ||
615 | * | ||
616 | * Both k0/k1 registers will have already been saved (k0 into the vcpu | ||
617 | * structure, and k1 into the scratch_tmp register). | ||
618 | * | ||
619 | * The k1 register will already contain the kvm_vcpu_arch pointer. | ||
620 | */ | ||
621 | |||
622 | /* Start saving Guest context to VCPU */ | ||
623 | for (i = 0; i < 32; ++i) { | ||
624 | /* Guest k0/k1 saved later */ | ||
625 | if (i == K0 || i == K1) | ||
626 | continue; | ||
627 | UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); | ||
628 | } | ||
629 | |||
630 | #ifndef CONFIG_CPU_MIPSR6 | ||
631 | /* We need to save hi/lo and restore them on the way out */ | ||
632 | uasm_i_mfhi(&p, T0); | ||
633 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); | ||
634 | |||
635 | uasm_i_mflo(&p, T0); | ||
636 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); | ||
637 | #endif | ||
638 | |||
639 | /* Finally save guest k1 to VCPU */ | ||
640 | uasm_i_ehb(&p); | ||
641 | UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]); | ||
642 | UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); | ||
643 | |||
644 | /* Now that context has been saved, we can use other registers */ | ||
645 | |||
646 | /* Restore vcpu */ | ||
647 | UASM_i_MFC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); | ||
648 | |||
649 | /* | ||
650 | * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process | ||
651 | * the exception | ||
652 | */ | ||
653 | UASM_i_MFC0(&p, K0, C0_EPC); | ||
654 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); | ||
655 | |||
656 | UASM_i_MFC0(&p, K0, C0_BADVADDR); | ||
657 | UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), | ||
658 | K1); | ||
659 | |||
660 | uasm_i_mfc0(&p, K0, C0_CAUSE); | ||
661 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); | ||
662 | |||
663 | if (cpu_has_badinstr) { | ||
664 | uasm_i_mfc0(&p, K0, C0_BADINSTR); | ||
665 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, | ||
666 | host_cp0_badinstr), K1); | ||
667 | } | ||
668 | |||
669 | if (cpu_has_badinstrp) { | ||
670 | uasm_i_mfc0(&p, K0, C0_BADINSTRP); | ||
671 | uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, | ||
672 | host_cp0_badinstrp), K1); | ||
673 | } | ||
674 | |||
675 | /* Now restore the host state just enough to run the handlers */ | ||
676 | |||
677 | /* Switch EBASE to the one used by Linux */ | ||
678 | /* load up the host EBASE */ | ||
679 | uasm_i_mfc0(&p, V0, C0_STATUS); | ||
680 | |||
681 | uasm_i_lui(&p, AT, ST0_BEV >> 16); | ||
682 | uasm_i_or(&p, K0, V0, AT); | ||
683 | |||
684 | uasm_i_mtc0(&p, K0, C0_STATUS); | ||
685 | uasm_i_ehb(&p); | ||
686 | |||
687 | UASM_i_LA_mostly(&p, K0, (long)&ebase); | ||
688 | UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); | ||
689 | build_set_exc_base(&p, K0); | ||
690 | |||
691 | if (raw_cpu_has_fpu) { | ||
692 | /* | ||
693 | * If FPU is enabled, save FCR31 and clear it so that later | ||
694 | * ctc1's don't trigger FPE for pending exceptions. | ||
695 | */ | ||
696 | uasm_i_lui(&p, AT, ST0_CU1 >> 16); | ||
697 | uasm_i_and(&p, V1, V0, AT); | ||
698 | uasm_il_beqz(&p, &r, V1, label_fpu_1); | ||
699 | uasm_i_nop(&p); | ||
700 | uasm_i_cfc1(&p, T0, 31); | ||
701 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), | ||
702 | K1); | ||
703 | uasm_i_ctc1(&p, ZERO, 31); | ||
704 | uasm_l_fpu_1(&l, p); | ||
705 | } | ||
706 | |||
707 | if (cpu_has_msa) { | ||
708 | /* | ||
709 | * If MSA is enabled, save MSACSR and clear it so that later | ||
710 | * instructions don't trigger MSAFPE for pending exceptions. | ||
711 | */ | ||
712 | uasm_i_mfc0(&p, T0, C0_CONFIG5); | ||
713 | uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ | ||
714 | uasm_il_beqz(&p, &r, T0, label_msa_1); | ||
715 | uasm_i_nop(&p); | ||
716 | uasm_i_cfcmsa(&p, T0, MSA_CSR); | ||
717 | uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), | ||
718 | K1); | ||
719 | uasm_i_ctcmsa(&p, MSA_CSR, ZERO); | ||
720 | uasm_l_msa_1(&l, p); | ||
721 | } | ||
722 | |||
723 | #ifdef CONFIG_KVM_MIPS_VZ | ||
724 | /* Restore host ASID */ | ||
725 | if (!cpu_has_guestid) { | ||
726 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, host_entryhi), | ||
727 | K1); | ||
728 | UASM_i_MTC0(&p, K0, C0_ENTRYHI); | ||
729 | } | ||
730 | |||
731 | /* | ||
732 | * Set up normal Linux process pgd. | ||
733 | * This does roughly the same as TLBMISS_HANDLER_SETUP_PGD(): | ||
734 | * - call tlbmiss_handler_setup_pgd(mm->pgd) | ||
735 | * - write mm->pgd into CP0_PWBase | ||
736 | */ | ||
737 | UASM_i_LW(&p, A0, | ||
738 | offsetof(struct kvm_vcpu_arch, host_pgd), K1); | ||
739 | UASM_i_LA(&p, T9, (unsigned long)tlbmiss_handler_setup_pgd); | ||
740 | uasm_i_jalr(&p, RA, T9); | ||
741 | /* delay slot */ | ||
742 | if (cpu_has_htw) | ||
743 | UASM_i_MTC0(&p, A0, C0_PWBASE); | ||
744 | else | ||
745 | uasm_i_nop(&p); | ||
746 | |||
747 | /* Clear GM bit so we don't enter guest mode when EXL is cleared */ | ||
748 | uasm_i_mfc0(&p, K0, C0_GUESTCTL0); | ||
749 | uasm_i_ins(&p, K0, ZERO, MIPS_GCTL0_GM_SHIFT, 1); | ||
750 | uasm_i_mtc0(&p, K0, C0_GUESTCTL0); | ||
751 | |||
752 | /* Save GuestCtl0 so we can access GExcCode after CPU migration */ | ||
753 | uasm_i_sw(&p, K0, | ||
754 | offsetof(struct kvm_vcpu_arch, host_cp0_guestctl0), K1); | ||
755 | |||
756 | if (cpu_has_guestid) { | ||
757 | /* | ||
758 | * Clear root mode GuestID, so that root TLB operations use the | ||
759 | * root GuestID in the root TLB. | ||
760 | */ | ||
761 | uasm_i_mfc0(&p, T0, C0_GUESTCTL1); | ||
762 | /* Set GuestCtl1.RID = MIPS_GCTL1_ROOT_GUESTID (i.e. 0) */ | ||
763 | uasm_i_ins(&p, T0, ZERO, MIPS_GCTL1_RID_SHIFT, | ||
764 | MIPS_GCTL1_RID_WIDTH); | ||
765 | uasm_i_mtc0(&p, T0, C0_GUESTCTL1); | ||
766 | } | ||
767 | #endif | ||
768 | |||
769 | /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ | ||
770 | uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); | ||
771 | uasm_i_and(&p, V0, V0, AT); | ||
772 | uasm_i_lui(&p, AT, ST0_CU0 >> 16); | ||
773 | uasm_i_or(&p, V0, V0, AT); | ||
774 | #ifdef CONFIG_64BIT | ||
775 | uasm_i_ori(&p, V0, V0, ST0_SX | ST0_UX); | ||
776 | #endif | ||
777 | uasm_i_mtc0(&p, V0, C0_STATUS); | ||
778 | uasm_i_ehb(&p); | ||
779 | |||
780 | /* Load up host GP */ | ||
781 | UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); | ||
782 | |||
783 | /* Need a stack before we can jump to "C" */ | ||
784 | UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); | ||
785 | |||
786 | /* Saved host state */ | ||
787 | UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); | ||
788 | |||
789 | /* | ||
790 | * XXXKYMA do we need to load the host ASID, maybe not because the | ||
791 | * kernel entries are marked GLOBAL, need to verify | ||
792 | */ | ||
793 | |||
794 | /* Restore host scratch registers, as we'll have clobbered them */ | ||
795 | kvm_mips_build_restore_scratch(&p, K0, SP); | ||
796 | |||
797 | /* Restore RDHWR access */ | ||
798 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); | ||
799 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); | ||
800 | uasm_i_mtc0(&p, K0, C0_HWRENA); | ||
801 | |||
802 | /* Jump to handler */ | ||
803 | /* | ||
804 | * XXXKYMA: not sure if this is safe, how large is the stack?? | ||
805 | * Now jump to the kvm_mips_handle_exit() to see if we can deal | ||
806 | * with this in the kernel | ||
807 | */ | ||
808 | uasm_i_move(&p, A0, S0); | ||
809 | UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); | ||
810 | uasm_i_jalr(&p, RA, T9); | ||
811 | UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); | ||
812 | |||
813 | uasm_resolve_relocs(relocs, labels); | ||
814 | |||
815 | p = kvm_mips_build_ret_from_exit(p); | ||
816 | |||
817 | return p; | ||
818 | } | ||
819 | |||
820 | /** | ||
821 | * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. | ||
822 | * @addr: Address to start writing code. | ||
823 | * | ||
824 | * Assemble the code to handle the return from kvm_mips_handle_exit(), either | ||
825 | * resuming the guest or returning to the host depending on the return value. | ||
826 | * | ||
827 | * Returns: Next address after end of written function. | ||
828 | */ | ||
829 | static void *kvm_mips_build_ret_from_exit(void *addr) | ||
830 | { | ||
831 | u32 *p = addr; | ||
832 | struct uasm_label labels[2]; | ||
833 | struct uasm_reloc relocs[2]; | ||
834 | struct uasm_label *l = labels; | ||
835 | struct uasm_reloc *r = relocs; | ||
836 | |||
837 | memset(labels, 0, sizeof(labels)); | ||
838 | memset(relocs, 0, sizeof(relocs)); | ||
839 | |||
840 | /* Return from handler Make sure interrupts are disabled */ | ||
841 | uasm_i_di(&p, ZERO); | ||
842 | uasm_i_ehb(&p); | ||
843 | |||
844 | /* | ||
845 | * XXXKYMA: k0/k1 could have been blown away if we processed | ||
846 | * an exception while we were handling the exception from the | ||
847 | * guest, reload k1 | ||
848 | */ | ||
849 | |||
850 | uasm_i_move(&p, K1, S0); | ||
851 | UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); | ||
852 | |||
853 | /* | ||
854 | * Check return value, should tell us if we are returning to the | ||
855 | * host (handle I/O etc)or resuming the guest | ||
856 | */ | ||
857 | uasm_i_andi(&p, T0, V0, RESUME_HOST); | ||
858 | uasm_il_bnez(&p, &r, T0, label_return_to_host); | ||
859 | uasm_i_nop(&p); | ||
860 | |||
861 | p = kvm_mips_build_ret_to_guest(p); | ||
862 | |||
863 | uasm_l_return_to_host(&l, p); | ||
864 | p = kvm_mips_build_ret_to_host(p); | ||
865 | |||
866 | uasm_resolve_relocs(relocs, labels); | ||
867 | |||
868 | return p; | ||
869 | } | ||
870 | |||
871 | /** | ||
872 | * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. | ||
873 | * @addr: Address to start writing code. | ||
874 | * | ||
875 | * Assemble the code to handle return from the guest exit handler | ||
876 | * (kvm_mips_handle_exit()) back to the guest. | ||
877 | * | ||
878 | * Returns: Next address after end of written function. | ||
879 | */ | ||
880 | static void *kvm_mips_build_ret_to_guest(void *addr) | ||
881 | { | ||
882 | u32 *p = addr; | ||
883 | |||
884 | /* Put the saved pointer to vcpu (s0) back into the scratch register */ | ||
885 | UASM_i_MTC0(&p, S0, scratch_vcpu[0], scratch_vcpu[1]); | ||
886 | |||
887 | /* Load up the Guest EBASE to minimize the window where BEV is set */ | ||
888 | UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); | ||
889 | |||
890 | /* Switch EBASE back to the one used by KVM */ | ||
891 | uasm_i_mfc0(&p, V1, C0_STATUS); | ||
892 | uasm_i_lui(&p, AT, ST0_BEV >> 16); | ||
893 | uasm_i_or(&p, K0, V1, AT); | ||
894 | uasm_i_mtc0(&p, K0, C0_STATUS); | ||
895 | uasm_i_ehb(&p); | ||
896 | build_set_exc_base(&p, T0); | ||
897 | |||
898 | /* Setup status register for running guest in UM */ | ||
899 | uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); | ||
900 | UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX | ST0_SX | ST0_UX)); | ||
901 | uasm_i_and(&p, V1, V1, AT); | ||
902 | uasm_i_mtc0(&p, V1, C0_STATUS); | ||
903 | uasm_i_ehb(&p); | ||
904 | |||
905 | p = kvm_mips_build_enter_guest(p); | ||
906 | |||
907 | return p; | ||
908 | } | ||
909 | |||
910 | /** | ||
911 | * kvm_mips_build_ret_to_host() - Assemble code to return to the host. | ||
912 | * @addr: Address to start writing code. | ||
913 | * | ||
914 | * Assemble the code to handle return from the guest exit handler | ||
915 | * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run | ||
916 | * function generated by kvm_mips_build_vcpu_run(). | ||
917 | * | ||
918 | * Returns: Next address after end of written function. | ||
919 | */ | ||
920 | static void *kvm_mips_build_ret_to_host(void *addr) | ||
921 | { | ||
922 | u32 *p = addr; | ||
923 | unsigned int i; | ||
924 | |||
925 | /* EBASE is already pointing to Linux */ | ||
926 | UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1); | ||
927 | UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs)); | ||
928 | |||
929 | /* | ||
930 | * r2/v0 is the return code, shift it down by 2 (arithmetic) | ||
931 | * to recover the err code | ||
932 | */ | ||
933 | uasm_i_sra(&p, K0, V0, 2); | ||
934 | uasm_i_move(&p, V0, K0); | ||
935 | |||
936 | /* Load context saved on the host stack */ | ||
937 | for (i = 16; i < 31; ++i) { | ||
938 | if (i == 24) | ||
939 | i = 28; | ||
940 | UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1); | ||
941 | } | ||
942 | |||
943 | /* Restore RDHWR access */ | ||
944 | UASM_i_LA_mostly(&p, K0, (long)&hwrena); | ||
945 | uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); | ||
946 | uasm_i_mtc0(&p, K0, C0_HWRENA); | ||
947 | |||
948 | /* Restore RA, which is the address we will return to */ | ||
949 | UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1); | ||
950 | uasm_i_jr(&p, RA); | ||
951 | uasm_i_nop(&p); | ||
952 | |||
953 | return p; | ||
954 | } | ||
955 | |||
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S new file mode 100644 index 000000000..16f17c639 --- /dev/null +++ b/arch/mips/kvm/fpu.S | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * FPU context handling code for KVM. | ||
7 | * | ||
8 | * Copyright (C) 2015 Imagination Technologies Ltd. | ||
9 | */ | ||
10 | |||
11 | #include <asm/asm.h> | ||
12 | #include <asm/asm-offsets.h> | ||
13 | #include <asm/fpregdef.h> | ||
14 | #include <asm/mipsregs.h> | ||
15 | #include <asm/regdef.h> | ||
16 | |||
17 | /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ | ||
18 | #undef fp | ||
19 | |||
20 | .set noreorder | ||
21 | .set noat | ||
22 | |||
23 | LEAF(__kvm_save_fpu) | ||
24 | .set push | ||
25 | SET_HARDFLOAT | ||
26 | .set fp=64 | ||
27 | mfc0 t0, CP0_STATUS | ||
28 | sll t0, t0, 5 # is Status.FR set? | ||
29 | bgez t0, 1f # no: skip odd doubles | ||
30 | nop | ||
31 | sdc1 $f1, VCPU_FPR1(a0) | ||
32 | sdc1 $f3, VCPU_FPR3(a0) | ||
33 | sdc1 $f5, VCPU_FPR5(a0) | ||
34 | sdc1 $f7, VCPU_FPR7(a0) | ||
35 | sdc1 $f9, VCPU_FPR9(a0) | ||
36 | sdc1 $f11, VCPU_FPR11(a0) | ||
37 | sdc1 $f13, VCPU_FPR13(a0) | ||
38 | sdc1 $f15, VCPU_FPR15(a0) | ||
39 | sdc1 $f17, VCPU_FPR17(a0) | ||
40 | sdc1 $f19, VCPU_FPR19(a0) | ||
41 | sdc1 $f21, VCPU_FPR21(a0) | ||
42 | sdc1 $f23, VCPU_FPR23(a0) | ||
43 | sdc1 $f25, VCPU_FPR25(a0) | ||
44 | sdc1 $f27, VCPU_FPR27(a0) | ||
45 | sdc1 $f29, VCPU_FPR29(a0) | ||
46 | sdc1 $f31, VCPU_FPR31(a0) | ||
47 | 1: sdc1 $f0, VCPU_FPR0(a0) | ||
48 | sdc1 $f2, VCPU_FPR2(a0) | ||
49 | sdc1 $f4, VCPU_FPR4(a0) | ||
50 | sdc1 $f6, VCPU_FPR6(a0) | ||
51 | sdc1 $f8, VCPU_FPR8(a0) | ||
52 | sdc1 $f10, VCPU_FPR10(a0) | ||
53 | sdc1 $f12, VCPU_FPR12(a0) | ||
54 | sdc1 $f14, VCPU_FPR14(a0) | ||
55 | sdc1 $f16, VCPU_FPR16(a0) | ||
56 | sdc1 $f18, VCPU_FPR18(a0) | ||
57 | sdc1 $f20, VCPU_FPR20(a0) | ||
58 | sdc1 $f22, VCPU_FPR22(a0) | ||
59 | sdc1 $f24, VCPU_FPR24(a0) | ||
60 | sdc1 $f26, VCPU_FPR26(a0) | ||
61 | sdc1 $f28, VCPU_FPR28(a0) | ||
62 | jr ra | ||
63 | sdc1 $f30, VCPU_FPR30(a0) | ||
64 | .set pop | ||
65 | END(__kvm_save_fpu) | ||
66 | |||
67 | LEAF(__kvm_restore_fpu) | ||
68 | .set push | ||
69 | SET_HARDFLOAT | ||
70 | .set fp=64 | ||
71 | mfc0 t0, CP0_STATUS | ||
72 | sll t0, t0, 5 # is Status.FR set? | ||
73 | bgez t0, 1f # no: skip odd doubles | ||
74 | nop | ||
75 | ldc1 $f1, VCPU_FPR1(a0) | ||
76 | ldc1 $f3, VCPU_FPR3(a0) | ||
77 | ldc1 $f5, VCPU_FPR5(a0) | ||
78 | ldc1 $f7, VCPU_FPR7(a0) | ||
79 | ldc1 $f9, VCPU_FPR9(a0) | ||
80 | ldc1 $f11, VCPU_FPR11(a0) | ||
81 | ldc1 $f13, VCPU_FPR13(a0) | ||
82 | ldc1 $f15, VCPU_FPR15(a0) | ||
83 | ldc1 $f17, VCPU_FPR17(a0) | ||
84 | ldc1 $f19, VCPU_FPR19(a0) | ||
85 | ldc1 $f21, VCPU_FPR21(a0) | ||
86 | ldc1 $f23, VCPU_FPR23(a0) | ||
87 | ldc1 $f25, VCPU_FPR25(a0) | ||
88 | ldc1 $f27, VCPU_FPR27(a0) | ||
89 | ldc1 $f29, VCPU_FPR29(a0) | ||
90 | ldc1 $f31, VCPU_FPR31(a0) | ||
91 | 1: ldc1 $f0, VCPU_FPR0(a0) | ||
92 | ldc1 $f2, VCPU_FPR2(a0) | ||
93 | ldc1 $f4, VCPU_FPR4(a0) | ||
94 | ldc1 $f6, VCPU_FPR6(a0) | ||
95 | ldc1 $f8, VCPU_FPR8(a0) | ||
96 | ldc1 $f10, VCPU_FPR10(a0) | ||
97 | ldc1 $f12, VCPU_FPR12(a0) | ||
98 | ldc1 $f14, VCPU_FPR14(a0) | ||
99 | ldc1 $f16, VCPU_FPR16(a0) | ||
100 | ldc1 $f18, VCPU_FPR18(a0) | ||
101 | ldc1 $f20, VCPU_FPR20(a0) | ||
102 | ldc1 $f22, VCPU_FPR22(a0) | ||
103 | ldc1 $f24, VCPU_FPR24(a0) | ||
104 | ldc1 $f26, VCPU_FPR26(a0) | ||
105 | ldc1 $f28, VCPU_FPR28(a0) | ||
106 | jr ra | ||
107 | ldc1 $f30, VCPU_FPR30(a0) | ||
108 | .set pop | ||
109 | END(__kvm_restore_fpu) | ||
110 | |||
111 | LEAF(__kvm_restore_fcsr) | ||
112 | .set push | ||
113 | SET_HARDFLOAT | ||
114 | lw t0, VCPU_FCR31(a0) | ||
115 | /* | ||
116 | * The ctc1 must stay at this offset in __kvm_restore_fcsr. | ||
117 | * See kvm_mips_csr_die_notify() which handles t0 containing a value | ||
118 | * which triggers an FP Exception, which must be stepped over and | ||
119 | * ignored since the set cause bits must remain there for the guest. | ||
120 | */ | ||
121 | ctc1 t0, fcr31 | ||
122 | jr ra | ||
123 | nop | ||
124 | .set pop | ||
125 | END(__kvm_restore_fcsr) | ||
diff --git a/arch/mips/kvm/hypcall.c b/arch/mips/kvm/hypcall.c new file mode 100644 index 000000000..830634351 --- /dev/null +++ b/arch/mips/kvm/hypcall.c | |||
@@ -0,0 +1,53 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: Hypercall handling. | ||
7 | * | ||
8 | * Copyright (C) 2015 Imagination Technologies Ltd. | ||
9 | */ | ||
10 | |||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/kvm_host.h> | ||
13 | #include <linux/kvm_para.h> | ||
14 | |||
15 | #define MAX_HYPCALL_ARGS 4 | ||
16 | |||
17 | enum emulation_result kvm_mips_emul_hypcall(struct kvm_vcpu *vcpu, | ||
18 | union mips_instruction inst) | ||
19 | { | ||
20 | unsigned int code = (inst.co_format.code >> 5) & 0x3ff; | ||
21 | |||
22 | kvm_debug("[%#lx] HYPCALL %#03x\n", vcpu->arch.pc, code); | ||
23 | |||
24 | switch (code) { | ||
25 | case 0: | ||
26 | return EMULATE_HYPERCALL; | ||
27 | default: | ||
28 | return EMULATE_FAIL; | ||
29 | }; | ||
30 | } | ||
31 | |||
32 | static int kvm_mips_hypercall(struct kvm_vcpu *vcpu, unsigned long num, | ||
33 | const unsigned long *args, unsigned long *hret) | ||
34 | { | ||
35 | /* Report unimplemented hypercall to guest */ | ||
36 | *hret = -KVM_ENOSYS; | ||
37 | return RESUME_GUEST; | ||
38 | } | ||
39 | |||
40 | int kvm_mips_handle_hypcall(struct kvm_vcpu *vcpu) | ||
41 | { | ||
42 | unsigned long num, args[MAX_HYPCALL_ARGS]; | ||
43 | |||
44 | /* read hypcall number and arguments */ | ||
45 | num = vcpu->arch.gprs[2]; /* v0 */ | ||
46 | args[0] = vcpu->arch.gprs[4]; /* a0 */ | ||
47 | args[1] = vcpu->arch.gprs[5]; /* a1 */ | ||
48 | args[2] = vcpu->arch.gprs[6]; /* a2 */ | ||
49 | args[3] = vcpu->arch.gprs[7]; /* a3 */ | ||
50 | |||
51 | return kvm_mips_hypercall(vcpu, num, | ||
52 | args, &vcpu->arch.gprs[2] /* v0 */); | ||
53 | } | ||
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c new file mode 100644 index 000000000..d28c2c9c3 --- /dev/null +++ b/arch/mips/kvm/interrupt.c | |||
@@ -0,0 +1,175 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: Interrupt delivery | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/vmalloc.h> | ||
15 | #include <linux/fs.h> | ||
16 | #include <linux/memblock.h> | ||
17 | #include <asm/page.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | |||
20 | #include <linux/kvm_host.h> | ||
21 | |||
22 | #include "interrupt.h" | ||
23 | |||
24 | void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) | ||
25 | { | ||
26 | set_bit(priority, &vcpu->arch.pending_exceptions); | ||
27 | } | ||
28 | |||
29 | void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) | ||
30 | { | ||
31 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
32 | } | ||
33 | |||
34 | void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu) | ||
35 | { | ||
36 | /* | ||
37 | * Cause bits to reflect the pending timer interrupt, | ||
38 | * the EXC code will be set when we are actually | ||
39 | * delivering the interrupt: | ||
40 | */ | ||
41 | kvm_set_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); | ||
42 | |||
43 | /* Queue up an INT exception for the core */ | ||
44 | kvm_mips_queue_irq(vcpu, MIPS_EXC_INT_TIMER); | ||
45 | |||
46 | } | ||
47 | |||
48 | void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) | ||
49 | { | ||
50 | kvm_clear_c0_guest_cause(vcpu->arch.cop0, (C_IRQ5 | C_TI)); | ||
51 | kvm_mips_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); | ||
52 | } | ||
53 | |||
54 | void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, | ||
55 | struct kvm_mips_interrupt *irq) | ||
56 | { | ||
57 | int intr = (int)irq->irq; | ||
58 | |||
59 | /* | ||
60 | * Cause bits to reflect the pending IO interrupt, | ||
61 | * the EXC code will be set when we are actually | ||
62 | * delivering the interrupt: | ||
63 | */ | ||
64 | kvm_set_c0_guest_cause(vcpu->arch.cop0, 1 << (intr + 8)); | ||
65 | kvm_mips_queue_irq(vcpu, kvm_irq_to_priority(intr)); | ||
66 | } | ||
67 | |||
68 | void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, | ||
69 | struct kvm_mips_interrupt *irq) | ||
70 | { | ||
71 | int intr = (int)irq->irq; | ||
72 | |||
73 | kvm_clear_c0_guest_cause(vcpu->arch.cop0, 1 << (-intr + 8)); | ||
74 | kvm_mips_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); | ||
75 | } | ||
76 | |||
77 | /* Deliver the interrupt of the corresponding priority, if possible. */ | ||
78 | int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | ||
79 | u32 cause) | ||
80 | { | ||
81 | int allowed = 0; | ||
82 | u32 exccode, ie; | ||
83 | |||
84 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
85 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
86 | |||
87 | if (priority == MIPS_EXC_MAX) | ||
88 | return 0; | ||
89 | |||
90 | ie = 1 << (kvm_priority_to_irq[priority] + 8); | ||
91 | if ((kvm_read_c0_guest_status(cop0) & ST0_IE) | ||
92 | && (!(kvm_read_c0_guest_status(cop0) & (ST0_EXL | ST0_ERL))) | ||
93 | && (kvm_read_c0_guest_status(cop0) & ie)) { | ||
94 | allowed = 1; | ||
95 | exccode = EXCCODE_INT; | ||
96 | } | ||
97 | |||
98 | /* Are we allowed to deliver the interrupt ??? */ | ||
99 | if (allowed) { | ||
100 | if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) { | ||
101 | /* save old pc */ | ||
102 | kvm_write_c0_guest_epc(cop0, arch->pc); | ||
103 | kvm_set_c0_guest_status(cop0, ST0_EXL); | ||
104 | |||
105 | if (cause & CAUSEF_BD) | ||
106 | kvm_set_c0_guest_cause(cop0, CAUSEF_BD); | ||
107 | else | ||
108 | kvm_clear_c0_guest_cause(cop0, CAUSEF_BD); | ||
109 | |||
110 | kvm_debug("Delivering INT @ pc %#lx\n", arch->pc); | ||
111 | |||
112 | } else | ||
113 | kvm_err("Trying to deliver interrupt when EXL is already set\n"); | ||
114 | |||
115 | kvm_change_c0_guest_cause(cop0, CAUSEF_EXCCODE, | ||
116 | (exccode << CAUSEB_EXCCODE)); | ||
117 | |||
118 | /* XXXSL Set PC to the interrupt exception entry point */ | ||
119 | arch->pc = kvm_mips_guest_exception_base(vcpu); | ||
120 | if (kvm_read_c0_guest_cause(cop0) & CAUSEF_IV) | ||
121 | arch->pc += 0x200; | ||
122 | else | ||
123 | arch->pc += 0x180; | ||
124 | |||
125 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
126 | } | ||
127 | |||
128 | return allowed; | ||
129 | } | ||
130 | |||
131 | int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, | ||
132 | u32 cause) | ||
133 | { | ||
134 | return 1; | ||
135 | } | ||
136 | |||
137 | void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause) | ||
138 | { | ||
139 | unsigned long *pending = &vcpu->arch.pending_exceptions; | ||
140 | unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr; | ||
141 | unsigned int priority; | ||
142 | |||
143 | if (!(*pending) && !(*pending_clr)) | ||
144 | return; | ||
145 | |||
146 | priority = __ffs(*pending_clr); | ||
147 | while (priority <= MIPS_EXC_MAX) { | ||
148 | if (kvm_mips_callbacks->irq_clear(vcpu, priority, cause)) { | ||
149 | if (!KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE) | ||
150 | break; | ||
151 | } | ||
152 | |||
153 | priority = find_next_bit(pending_clr, | ||
154 | BITS_PER_BYTE * sizeof(*pending_clr), | ||
155 | priority + 1); | ||
156 | } | ||
157 | |||
158 | priority = __ffs(*pending); | ||
159 | while (priority <= MIPS_EXC_MAX) { | ||
160 | if (kvm_mips_callbacks->irq_deliver(vcpu, priority, cause)) { | ||
161 | if (!KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE) | ||
162 | break; | ||
163 | } | ||
164 | |||
165 | priority = find_next_bit(pending, | ||
166 | BITS_PER_BYTE * sizeof(*pending), | ||
167 | priority + 1); | ||
168 | } | ||
169 | |||
170 | } | ||
171 | |||
172 | int kvm_mips_pending_timer(struct kvm_vcpu *vcpu) | ||
173 | { | ||
174 | return test_bit(MIPS_EXC_INT_TIMER, &vcpu->arch.pending_exceptions); | ||
175 | } | ||
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h new file mode 100644 index 000000000..c3e878ca3 --- /dev/null +++ b/arch/mips/kvm/interrupt.h | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: Interrupts | ||
7 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
8 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
9 | */ | ||
10 | |||
11 | /* | ||
12 | * MIPS Exception Priorities, exceptions (including interrupts) are queued up | ||
13 | * for the guest in the order specified by their priorities | ||
14 | */ | ||
15 | |||
16 | #define MIPS_EXC_RESET 0 | ||
17 | #define MIPS_EXC_SRESET 1 | ||
18 | #define MIPS_EXC_DEBUG_ST 2 | ||
19 | #define MIPS_EXC_DEBUG 3 | ||
20 | #define MIPS_EXC_DDB 4 | ||
21 | #define MIPS_EXC_NMI 5 | ||
22 | #define MIPS_EXC_MCHK 6 | ||
23 | #define MIPS_EXC_INT_TIMER 7 | ||
24 | #define MIPS_EXC_INT_IO_1 8 | ||
25 | #define MIPS_EXC_INT_IO_2 9 | ||
26 | #define MIPS_EXC_EXECUTE 10 | ||
27 | #define MIPS_EXC_INT_IPI_1 11 | ||
28 | #define MIPS_EXC_INT_IPI_2 12 | ||
29 | #define MIPS_EXC_MAX 13 | ||
30 | /* XXXSL More to follow */ | ||
31 | |||
32 | #define C_TI (_ULCAST_(1) << 30) | ||
33 | |||
34 | #ifdef CONFIG_KVM_MIPS_VZ | ||
35 | #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (1) | ||
36 | #define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (1) | ||
37 | #else | ||
38 | #define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0) | ||
39 | #define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0) | ||
40 | #endif | ||
41 | |||
42 | extern u32 *kvm_priority_to_irq; | ||
43 | u32 kvm_irq_to_priority(u32 irq); | ||
44 | |||
45 | void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority); | ||
46 | void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority); | ||
47 | int kvm_mips_pending_timer(struct kvm_vcpu *vcpu); | ||
48 | |||
49 | void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu); | ||
50 | void kvm_mips_dequeue_timer_int_cb(struct kvm_vcpu *vcpu); | ||
51 | void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu, | ||
52 | struct kvm_mips_interrupt *irq); | ||
53 | void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu, | ||
54 | struct kvm_mips_interrupt *irq); | ||
55 | int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | ||
56 | u32 cause); | ||
57 | int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, | ||
58 | u32 cause); | ||
59 | void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause); | ||
diff --git a/arch/mips/kvm/loongson_ipi.c b/arch/mips/kvm/loongson_ipi.c new file mode 100644 index 000000000..3681fc8fb --- /dev/null +++ b/arch/mips/kvm/loongson_ipi.c | |||
@@ -0,0 +1,214 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Loongson-3 Virtual IPI interrupt support. | ||
4 | * | ||
5 | * Copyright (C) 2019 Loongson Technologies, Inc. All rights reserved. | ||
6 | * | ||
7 | * Authors: Chen Zhu <zhuchen@loongson.cn> | ||
8 | * Authors: Huacai Chen <chenhc@lemote.com> | ||
9 | */ | ||
10 | |||
11 | #include <linux/kvm_host.h> | ||
12 | |||
13 | #define IPI_BASE 0x3ff01000ULL | ||
14 | |||
15 | #define CORE0_STATUS_OFF 0x000 | ||
16 | #define CORE0_EN_OFF 0x004 | ||
17 | #define CORE0_SET_OFF 0x008 | ||
18 | #define CORE0_CLEAR_OFF 0x00c | ||
19 | #define CORE0_BUF_20 0x020 | ||
20 | #define CORE0_BUF_28 0x028 | ||
21 | #define CORE0_BUF_30 0x030 | ||
22 | #define CORE0_BUF_38 0x038 | ||
23 | |||
24 | #define CORE1_STATUS_OFF 0x100 | ||
25 | #define CORE1_EN_OFF 0x104 | ||
26 | #define CORE1_SET_OFF 0x108 | ||
27 | #define CORE1_CLEAR_OFF 0x10c | ||
28 | #define CORE1_BUF_20 0x120 | ||
29 | #define CORE1_BUF_28 0x128 | ||
30 | #define CORE1_BUF_30 0x130 | ||
31 | #define CORE1_BUF_38 0x138 | ||
32 | |||
33 | #define CORE2_STATUS_OFF 0x200 | ||
34 | #define CORE2_EN_OFF 0x204 | ||
35 | #define CORE2_SET_OFF 0x208 | ||
36 | #define CORE2_CLEAR_OFF 0x20c | ||
37 | #define CORE2_BUF_20 0x220 | ||
38 | #define CORE2_BUF_28 0x228 | ||
39 | #define CORE2_BUF_30 0x230 | ||
40 | #define CORE2_BUF_38 0x238 | ||
41 | |||
42 | #define CORE3_STATUS_OFF 0x300 | ||
43 | #define CORE3_EN_OFF 0x304 | ||
44 | #define CORE3_SET_OFF 0x308 | ||
45 | #define CORE3_CLEAR_OFF 0x30c | ||
46 | #define CORE3_BUF_20 0x320 | ||
47 | #define CORE3_BUF_28 0x328 | ||
48 | #define CORE3_BUF_30 0x330 | ||
49 | #define CORE3_BUF_38 0x338 | ||
50 | |||
51 | static int loongson_vipi_read(struct loongson_kvm_ipi *ipi, | ||
52 | gpa_t addr, int len, void *val) | ||
53 | { | ||
54 | uint32_t core = (addr >> 8) & 3; | ||
55 | uint32_t node = (addr >> 44) & 3; | ||
56 | uint32_t id = core + node * 4; | ||
57 | uint64_t offset = addr & 0xff; | ||
58 | void *pbuf; | ||
59 | struct ipi_state *s = &(ipi->ipistate[id]); | ||
60 | |||
61 | BUG_ON(offset & (len - 1)); | ||
62 | |||
63 | switch (offset) { | ||
64 | case CORE0_STATUS_OFF: | ||
65 | *(uint64_t *)val = s->status; | ||
66 | break; | ||
67 | |||
68 | case CORE0_EN_OFF: | ||
69 | *(uint64_t *)val = s->en; | ||
70 | break; | ||
71 | |||
72 | case CORE0_SET_OFF: | ||
73 | *(uint64_t *)val = 0; | ||
74 | break; | ||
75 | |||
76 | case CORE0_CLEAR_OFF: | ||
77 | *(uint64_t *)val = 0; | ||
78 | break; | ||
79 | |||
80 | case CORE0_BUF_20 ... CORE0_BUF_38: | ||
81 | pbuf = (void *)s->buf + (offset - 0x20); | ||
82 | if (len == 8) | ||
83 | *(uint64_t *)val = *(uint64_t *)pbuf; | ||
84 | else /* Assume len == 4 */ | ||
85 | *(uint32_t *)val = *(uint32_t *)pbuf; | ||
86 | break; | ||
87 | |||
88 | default: | ||
89 | pr_notice("%s with unknown addr %llx\n", __func__, addr); | ||
90 | break; | ||
91 | } | ||
92 | |||
93 | return 0; | ||
94 | } | ||
95 | |||
96 | static int loongson_vipi_write(struct loongson_kvm_ipi *ipi, | ||
97 | gpa_t addr, int len, const void *val) | ||
98 | { | ||
99 | uint32_t core = (addr >> 8) & 3; | ||
100 | uint32_t node = (addr >> 44) & 3; | ||
101 | uint32_t id = core + node * 4; | ||
102 | uint64_t data, offset = addr & 0xff; | ||
103 | void *pbuf; | ||
104 | struct kvm *kvm = ipi->kvm; | ||
105 | struct kvm_mips_interrupt irq; | ||
106 | struct ipi_state *s = &(ipi->ipistate[id]); | ||
107 | |||
108 | data = *(uint64_t *)val; | ||
109 | BUG_ON(offset & (len - 1)); | ||
110 | |||
111 | switch (offset) { | ||
112 | case CORE0_STATUS_OFF: | ||
113 | break; | ||
114 | |||
115 | case CORE0_EN_OFF: | ||
116 | s->en = data; | ||
117 | break; | ||
118 | |||
119 | case CORE0_SET_OFF: | ||
120 | s->status |= data; | ||
121 | irq.cpu = id; | ||
122 | irq.irq = 6; | ||
123 | kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq); | ||
124 | break; | ||
125 | |||
126 | case CORE0_CLEAR_OFF: | ||
127 | s->status &= ~data; | ||
128 | if (!s->status) { | ||
129 | irq.cpu = id; | ||
130 | irq.irq = -6; | ||
131 | kvm_vcpu_ioctl_interrupt(kvm->vcpus[id], &irq); | ||
132 | } | ||
133 | break; | ||
134 | |||
135 | case CORE0_BUF_20 ... CORE0_BUF_38: | ||
136 | pbuf = (void *)s->buf + (offset - 0x20); | ||
137 | if (len == 8) | ||
138 | *(uint64_t *)pbuf = (uint64_t)data; | ||
139 | else /* Assume len == 4 */ | ||
140 | *(uint32_t *)pbuf = (uint32_t)data; | ||
141 | break; | ||
142 | |||
143 | default: | ||
144 | pr_notice("%s with unknown addr %llx\n", __func__, addr); | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | return 0; | ||
149 | } | ||
150 | |||
151 | static int kvm_ipi_read(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | ||
152 | gpa_t addr, int len, void *val) | ||
153 | { | ||
154 | unsigned long flags; | ||
155 | struct loongson_kvm_ipi *ipi; | ||
156 | struct ipi_io_device *ipi_device; | ||
157 | |||
158 | ipi_device = container_of(dev, struct ipi_io_device, device); | ||
159 | ipi = ipi_device->ipi; | ||
160 | |||
161 | spin_lock_irqsave(&ipi->lock, flags); | ||
162 | loongson_vipi_read(ipi, addr, len, val); | ||
163 | spin_unlock_irqrestore(&ipi->lock, flags); | ||
164 | |||
165 | return 0; | ||
166 | } | ||
167 | |||
168 | static int kvm_ipi_write(struct kvm_vcpu *vcpu, struct kvm_io_device *dev, | ||
169 | gpa_t addr, int len, const void *val) | ||
170 | { | ||
171 | unsigned long flags; | ||
172 | struct loongson_kvm_ipi *ipi; | ||
173 | struct ipi_io_device *ipi_device; | ||
174 | |||
175 | ipi_device = container_of(dev, struct ipi_io_device, device); | ||
176 | ipi = ipi_device->ipi; | ||
177 | |||
178 | spin_lock_irqsave(&ipi->lock, flags); | ||
179 | loongson_vipi_write(ipi, addr, len, val); | ||
180 | spin_unlock_irqrestore(&ipi->lock, flags); | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | static const struct kvm_io_device_ops kvm_ipi_ops = { | ||
186 | .read = kvm_ipi_read, | ||
187 | .write = kvm_ipi_write, | ||
188 | }; | ||
189 | |||
190 | void kvm_init_loongson_ipi(struct kvm *kvm) | ||
191 | { | ||
192 | int i; | ||
193 | unsigned long addr; | ||
194 | struct loongson_kvm_ipi *s; | ||
195 | struct kvm_io_device *device; | ||
196 | |||
197 | s = &kvm->arch.ipi; | ||
198 | s->kvm = kvm; | ||
199 | spin_lock_init(&s->lock); | ||
200 | |||
201 | /* | ||
202 | * Initialize IPI device | ||
203 | */ | ||
204 | for (i = 0; i < 4; i++) { | ||
205 | device = &s->dev_ipi[i].device; | ||
206 | kvm_iodevice_init(device, &kvm_ipi_ops); | ||
207 | addr = (((unsigned long)i) << 44) + IPI_BASE; | ||
208 | mutex_lock(&kvm->slots_lock); | ||
209 | kvm_io_bus_register_dev(kvm, KVM_MMIO_BUS, addr, 0x400, device); | ||
210 | mutex_unlock(&kvm->slots_lock); | ||
211 | s->dev_ipi[i].ipi = s; | ||
212 | s->dev_ipi[i].node_id = i; | ||
213 | } | ||
214 | } | ||
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c new file mode 100644 index 000000000..3d6a7f582 --- /dev/null +++ b/arch/mips/kvm/mips.c | |||
@@ -0,0 +1,1701 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: MIPS specific KVM APIs | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/bitops.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/kdebug.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/vmalloc.h> | ||
19 | #include <linux/sched/signal.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/memblock.h> | ||
22 | #include <linux/pgtable.h> | ||
23 | |||
24 | #include <asm/fpu.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/mmu_context.h> | ||
28 | #include <asm/pgalloc.h> | ||
29 | |||
30 | #include <linux/kvm_host.h> | ||
31 | |||
32 | #include "interrupt.h" | ||
33 | #include "commpage.h" | ||
34 | |||
35 | #define CREATE_TRACE_POINTS | ||
36 | #include "trace.h" | ||
37 | |||
38 | #ifndef VECTORSPACING | ||
39 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | ||
40 | #endif | ||
41 | |||
42 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
43 | VCPU_STAT("wait", wait_exits), | ||
44 | VCPU_STAT("cache", cache_exits), | ||
45 | VCPU_STAT("signal", signal_exits), | ||
46 | VCPU_STAT("interrupt", int_exits), | ||
47 | VCPU_STAT("cop_unusable", cop_unusable_exits), | ||
48 | VCPU_STAT("tlbmod", tlbmod_exits), | ||
49 | VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits), | ||
50 | VCPU_STAT("tlbmiss_st", tlbmiss_st_exits), | ||
51 | VCPU_STAT("addrerr_st", addrerr_st_exits), | ||
52 | VCPU_STAT("addrerr_ld", addrerr_ld_exits), | ||
53 | VCPU_STAT("syscall", syscall_exits), | ||
54 | VCPU_STAT("resvd_inst", resvd_inst_exits), | ||
55 | VCPU_STAT("break_inst", break_inst_exits), | ||
56 | VCPU_STAT("trap_inst", trap_inst_exits), | ||
57 | VCPU_STAT("msa_fpe", msa_fpe_exits), | ||
58 | VCPU_STAT("fpe", fpe_exits), | ||
59 | VCPU_STAT("msa_disabled", msa_disabled_exits), | ||
60 | VCPU_STAT("flush_dcache", flush_dcache_exits), | ||
61 | #ifdef CONFIG_KVM_MIPS_VZ | ||
62 | VCPU_STAT("vz_gpsi", vz_gpsi_exits), | ||
63 | VCPU_STAT("vz_gsfc", vz_gsfc_exits), | ||
64 | VCPU_STAT("vz_hc", vz_hc_exits), | ||
65 | VCPU_STAT("vz_grr", vz_grr_exits), | ||
66 | VCPU_STAT("vz_gva", vz_gva_exits), | ||
67 | VCPU_STAT("vz_ghfc", vz_ghfc_exits), | ||
68 | VCPU_STAT("vz_gpa", vz_gpa_exits), | ||
69 | VCPU_STAT("vz_resvd", vz_resvd_exits), | ||
70 | #ifdef CONFIG_CPU_LOONGSON64 | ||
71 | VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), | ||
72 | #endif | ||
73 | #endif | ||
74 | VCPU_STAT("halt_successful_poll", halt_successful_poll), | ||
75 | VCPU_STAT("halt_attempted_poll", halt_attempted_poll), | ||
76 | VCPU_STAT("halt_poll_invalid", halt_poll_invalid), | ||
77 | VCPU_STAT("halt_wakeup", halt_wakeup), | ||
78 | VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), | ||
79 | VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), | ||
80 | {NULL} | ||
81 | }; | ||
82 | |||
83 | bool kvm_trace_guest_mode_change; | ||
84 | |||
85 | int kvm_guest_mode_change_trace_reg(void) | ||
86 | { | ||
87 | kvm_trace_guest_mode_change = true; | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | void kvm_guest_mode_change_trace_unreg(void) | ||
92 | { | ||
93 | kvm_trace_guest_mode_change = false; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in | ||
98 | * Config7, so we are "runnable" if interrupts are pending | ||
99 | */ | ||
100 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | ||
101 | { | ||
102 | return !!(vcpu->arch.pending_exceptions); | ||
103 | } | ||
104 | |||
105 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) | ||
106 | { | ||
107 | return false; | ||
108 | } | ||
109 | |||
110 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | ||
111 | { | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | int kvm_arch_hardware_enable(void) | ||
116 | { | ||
117 | return kvm_mips_callbacks->hardware_enable(); | ||
118 | } | ||
119 | |||
120 | void kvm_arch_hardware_disable(void) | ||
121 | { | ||
122 | kvm_mips_callbacks->hardware_disable(); | ||
123 | } | ||
124 | |||
125 | int kvm_arch_hardware_setup(void *opaque) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | int kvm_arch_check_processor_compat(void *opaque) | ||
131 | { | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | extern void kvm_init_loongson_ipi(struct kvm *kvm); | ||
136 | |||
137 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | ||
138 | { | ||
139 | switch (type) { | ||
140 | case KVM_VM_MIPS_AUTO: | ||
141 | break; | ||
142 | #ifdef CONFIG_KVM_MIPS_VZ | ||
143 | case KVM_VM_MIPS_VZ: | ||
144 | #else | ||
145 | case KVM_VM_MIPS_TE: | ||
146 | #endif | ||
147 | break; | ||
148 | default: | ||
149 | /* Unsupported KVM type */ | ||
150 | return -EINVAL; | ||
151 | }; | ||
152 | |||
153 | /* Allocate page table to map GPA -> RPA */ | ||
154 | kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); | ||
155 | if (!kvm->arch.gpa_mm.pgd) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | #ifdef CONFIG_CPU_LOONGSON64 | ||
159 | kvm_init_loongson_ipi(kvm); | ||
160 | #endif | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | void kvm_mips_free_vcpus(struct kvm *kvm) | ||
166 | { | ||
167 | unsigned int i; | ||
168 | struct kvm_vcpu *vcpu; | ||
169 | |||
170 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
171 | kvm_vcpu_destroy(vcpu); | ||
172 | } | ||
173 | |||
174 | mutex_lock(&kvm->lock); | ||
175 | |||
176 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | ||
177 | kvm->vcpus[i] = NULL; | ||
178 | |||
179 | atomic_set(&kvm->online_vcpus, 0); | ||
180 | |||
181 | mutex_unlock(&kvm->lock); | ||
182 | } | ||
183 | |||
184 | static void kvm_mips_free_gpa_pt(struct kvm *kvm) | ||
185 | { | ||
186 | /* It should always be safe to remove after flushing the whole range */ | ||
187 | WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0)); | ||
188 | pgd_free(NULL, kvm->arch.gpa_mm.pgd); | ||
189 | } | ||
190 | |||
191 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
192 | { | ||
193 | kvm_mips_free_vcpus(kvm); | ||
194 | kvm_mips_free_gpa_pt(kvm); | ||
195 | } | ||
196 | |||
197 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, | ||
198 | unsigned long arg) | ||
199 | { | ||
200 | return -ENOIOCTLCMD; | ||
201 | } | ||
202 | |||
203 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | ||
204 | { | ||
205 | /* Flush whole GPA */ | ||
206 | kvm_mips_flush_gpa_pt(kvm, 0, ~0); | ||
207 | |||
208 | /* Let implementation do the rest */ | ||
209 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
210 | } | ||
211 | |||
212 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | ||
213 | struct kvm_memory_slot *slot) | ||
214 | { | ||
215 | /* | ||
216 | * The slot has been made invalid (ready for moving or deletion), so we | ||
217 | * need to ensure that it can no longer be accessed by any guest VCPUs. | ||
218 | */ | ||
219 | |||
220 | spin_lock(&kvm->mmu_lock); | ||
221 | /* Flush slot from GPA */ | ||
222 | kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, | ||
223 | slot->base_gfn + slot->npages - 1); | ||
224 | /* Let implementation do the rest */ | ||
225 | kvm_mips_callbacks->flush_shadow_memslot(kvm, slot); | ||
226 | spin_unlock(&kvm->mmu_lock); | ||
227 | } | ||
228 | |||
229 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | ||
230 | struct kvm_memory_slot *memslot, | ||
231 | const struct kvm_userspace_memory_region *mem, | ||
232 | enum kvm_mr_change change) | ||
233 | { | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | void kvm_arch_commit_memory_region(struct kvm *kvm, | ||
238 | const struct kvm_userspace_memory_region *mem, | ||
239 | struct kvm_memory_slot *old, | ||
240 | const struct kvm_memory_slot *new, | ||
241 | enum kvm_mr_change change) | ||
242 | { | ||
243 | int needs_flush; | ||
244 | |||
245 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", | ||
246 | __func__, kvm, mem->slot, mem->guest_phys_addr, | ||
247 | mem->memory_size, mem->userspace_addr); | ||
248 | |||
249 | /* | ||
250 | * If dirty page logging is enabled, write protect all pages in the slot | ||
251 | * ready for dirty logging. | ||
252 | * | ||
253 | * There is no need to do this in any of the following cases: | ||
254 | * CREATE: No dirty mappings will already exist. | ||
255 | * MOVE/DELETE: The old mappings will already have been cleaned up by | ||
256 | * kvm_arch_flush_shadow_memslot() | ||
257 | */ | ||
258 | if (change == KVM_MR_FLAGS_ONLY && | ||
259 | (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && | ||
260 | new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { | ||
261 | spin_lock(&kvm->mmu_lock); | ||
262 | /* Write protect GPA page table entries */ | ||
263 | needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, | ||
264 | new->base_gfn + new->npages - 1); | ||
265 | /* Let implementation do the rest */ | ||
266 | if (needs_flush) | ||
267 | kvm_mips_callbacks->flush_shadow_memslot(kvm, new); | ||
268 | spin_unlock(&kvm->mmu_lock); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | static inline void dump_handler(const char *symbol, void *start, void *end) | ||
273 | { | ||
274 | u32 *p; | ||
275 | |||
276 | pr_debug("LEAF(%s)\n", symbol); | ||
277 | |||
278 | pr_debug("\t.set push\n"); | ||
279 | pr_debug("\t.set noreorder\n"); | ||
280 | |||
281 | for (p = start; p < (u32 *)end; ++p) | ||
282 | pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p); | ||
283 | |||
284 | pr_debug("\t.set\tpop\n"); | ||
285 | |||
286 | pr_debug("\tEND(%s)\n", symbol); | ||
287 | } | ||
288 | |||
289 | /* low level hrtimer wake routine */ | ||
290 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) | ||
291 | { | ||
292 | struct kvm_vcpu *vcpu; | ||
293 | |||
294 | vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); | ||
295 | |||
296 | kvm_mips_callbacks->queue_timer_int(vcpu); | ||
297 | |||
298 | vcpu->arch.wait = 0; | ||
299 | rcuwait_wake_up(&vcpu->wait); | ||
300 | |||
301 | return kvm_mips_count_timeout(vcpu); | ||
302 | } | ||
303 | |||
304 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) | ||
305 | { | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) | ||
310 | { | ||
311 | int err, size; | ||
312 | void *gebase, *p, *handler, *refill_start, *refill_end; | ||
313 | int i; | ||
314 | |||
315 | kvm_debug("kvm @ %p: create cpu %d at %p\n", | ||
316 | vcpu->kvm, vcpu->vcpu_id, vcpu); | ||
317 | |||
318 | err = kvm_mips_callbacks->vcpu_init(vcpu); | ||
319 | if (err) | ||
320 | return err; | ||
321 | |||
322 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, | ||
323 | HRTIMER_MODE_REL); | ||
324 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; | ||
325 | |||
326 | /* | ||
327 | * Allocate space for host mode exception handlers that handle | ||
328 | * guest mode exits | ||
329 | */ | ||
330 | if (cpu_has_veic || cpu_has_vint) | ||
331 | size = 0x200 + VECTORSPACING * 64; | ||
332 | else | ||
333 | size = 0x4000; | ||
334 | |||
335 | gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); | ||
336 | |||
337 | if (!gebase) { | ||
338 | err = -ENOMEM; | ||
339 | goto out_uninit_vcpu; | ||
340 | } | ||
341 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", | ||
342 | ALIGN(size, PAGE_SIZE), gebase); | ||
343 | |||
344 | /* | ||
345 | * Check new ebase actually fits in CP0_EBase. The lack of a write gate | ||
346 | * limits us to the low 512MB of physical address space. If the memory | ||
347 | * we allocate is out of range, just give up now. | ||
348 | */ | ||
349 | if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { | ||
350 | kvm_err("CP0_EBase.WG required for guest exception base %pK\n", | ||
351 | gebase); | ||
352 | err = -ENOMEM; | ||
353 | goto out_free_gebase; | ||
354 | } | ||
355 | |||
356 | /* Save new ebase */ | ||
357 | vcpu->arch.guest_ebase = gebase; | ||
358 | |||
359 | /* Build guest exception vectors dynamically in unmapped memory */ | ||
360 | handler = gebase + 0x2000; | ||
361 | |||
362 | /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ | ||
363 | refill_start = gebase; | ||
364 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT)) | ||
365 | refill_start += 0x080; | ||
366 | refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); | ||
367 | |||
368 | /* General Exception Entry point */ | ||
369 | kvm_mips_build_exception(gebase + 0x180, handler); | ||
370 | |||
371 | /* For vectored interrupts poke the exception code @ all offsets 0-7 */ | ||
372 | for (i = 0; i < 8; i++) { | ||
373 | kvm_debug("L1 Vectored handler @ %p\n", | ||
374 | gebase + 0x200 + (i * VECTORSPACING)); | ||
375 | kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, | ||
376 | handler); | ||
377 | } | ||
378 | |||
379 | /* General exit handler */ | ||
380 | p = handler; | ||
381 | p = kvm_mips_build_exit(p); | ||
382 | |||
383 | /* Guest entry routine */ | ||
384 | vcpu->arch.vcpu_run = p; | ||
385 | p = kvm_mips_build_vcpu_run(p); | ||
386 | |||
387 | /* Dump the generated code */ | ||
388 | pr_debug("#include <asm/asm.h>\n"); | ||
389 | pr_debug("#include <asm/regdef.h>\n"); | ||
390 | pr_debug("\n"); | ||
391 | dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); | ||
392 | dump_handler("kvm_tlb_refill", refill_start, refill_end); | ||
393 | dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); | ||
394 | dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); | ||
395 | |||
396 | /* Invalidate the icache for these ranges */ | ||
397 | flush_icache_range((unsigned long)gebase, | ||
398 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | ||
399 | |||
400 | /* | ||
401 | * Allocate comm page for guest kernel, a TLB will be reserved for | ||
402 | * mapping GVA @ 0xFFFF8000 to this page | ||
403 | */ | ||
404 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); | ||
405 | |||
406 | if (!vcpu->arch.kseg0_commpage) { | ||
407 | err = -ENOMEM; | ||
408 | goto out_free_gebase; | ||
409 | } | ||
410 | |||
411 | kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); | ||
412 | kvm_mips_commpage_init(vcpu); | ||
413 | |||
414 | /* Init */ | ||
415 | vcpu->arch.last_sched_cpu = -1; | ||
416 | vcpu->arch.last_exec_cpu = -1; | ||
417 | |||
418 | /* Initial guest state */ | ||
419 | err = kvm_mips_callbacks->vcpu_setup(vcpu); | ||
420 | if (err) | ||
421 | goto out_free_commpage; | ||
422 | |||
423 | return 0; | ||
424 | |||
425 | out_free_commpage: | ||
426 | kfree(vcpu->arch.kseg0_commpage); | ||
427 | out_free_gebase: | ||
428 | kfree(gebase); | ||
429 | out_uninit_vcpu: | ||
430 | kvm_mips_callbacks->vcpu_uninit(vcpu); | ||
431 | return err; | ||
432 | } | ||
433 | |||
434 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
435 | { | ||
436 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | ||
437 | |||
438 | kvm_mips_dump_stats(vcpu); | ||
439 | |||
440 | kvm_mmu_free_memory_caches(vcpu); | ||
441 | kfree(vcpu->arch.guest_ebase); | ||
442 | kfree(vcpu->arch.kseg0_commpage); | ||
443 | |||
444 | kvm_mips_callbacks->vcpu_uninit(vcpu); | ||
445 | } | ||
446 | |||
447 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | ||
448 | struct kvm_guest_debug *dbg) | ||
449 | { | ||
450 | return -ENOIOCTLCMD; | ||
451 | } | ||
452 | |||
453 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) | ||
454 | { | ||
455 | int r = -EINTR; | ||
456 | |||
457 | vcpu_load(vcpu); | ||
458 | |||
459 | kvm_sigset_activate(vcpu); | ||
460 | |||
461 | if (vcpu->mmio_needed) { | ||
462 | if (!vcpu->mmio_is_write) | ||
463 | kvm_mips_complete_mmio_load(vcpu); | ||
464 | vcpu->mmio_needed = 0; | ||
465 | } | ||
466 | |||
467 | if (vcpu->run->immediate_exit) | ||
468 | goto out; | ||
469 | |||
470 | lose_fpu(1); | ||
471 | |||
472 | local_irq_disable(); | ||
473 | guest_enter_irqoff(); | ||
474 | trace_kvm_enter(vcpu); | ||
475 | |||
476 | /* | ||
477 | * Make sure the read of VCPU requests in vcpu_run() callback is not | ||
478 | * reordered ahead of the write to vcpu->mode, or we could miss a TLB | ||
479 | * flush request while the requester sees the VCPU as outside of guest | ||
480 | * mode and not needing an IPI. | ||
481 | */ | ||
482 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | ||
483 | |||
484 | r = kvm_mips_callbacks->vcpu_run(vcpu); | ||
485 | |||
486 | trace_kvm_out(vcpu); | ||
487 | guest_exit_irqoff(); | ||
488 | local_irq_enable(); | ||
489 | |||
490 | out: | ||
491 | kvm_sigset_deactivate(vcpu); | ||
492 | |||
493 | vcpu_put(vcpu); | ||
494 | return r; | ||
495 | } | ||
496 | |||
497 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | ||
498 | struct kvm_mips_interrupt *irq) | ||
499 | { | ||
500 | int intr = (int)irq->irq; | ||
501 | struct kvm_vcpu *dvcpu = NULL; | ||
502 | |||
503 | if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] || | ||
504 | intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] || | ||
505 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) || | ||
506 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2])) | ||
507 | kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, | ||
508 | (int)intr); | ||
509 | |||
510 | if (irq->cpu == -1) | ||
511 | dvcpu = vcpu; | ||
512 | else | ||
513 | dvcpu = vcpu->kvm->vcpus[irq->cpu]; | ||
514 | |||
515 | if (intr == 2 || intr == 3 || intr == 4 || intr == 6) { | ||
516 | kvm_mips_callbacks->queue_io_int(dvcpu, irq); | ||
517 | |||
518 | } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) { | ||
519 | kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); | ||
520 | } else { | ||
521 | kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, | ||
522 | irq->cpu, irq->irq); | ||
523 | return -EINVAL; | ||
524 | } | ||
525 | |||
526 | dvcpu->arch.wait = 0; | ||
527 | |||
528 | rcuwait_wake_up(&dvcpu->wait); | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | ||
534 | struct kvm_mp_state *mp_state) | ||
535 | { | ||
536 | return -ENOIOCTLCMD; | ||
537 | } | ||
538 | |||
539 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | ||
540 | struct kvm_mp_state *mp_state) | ||
541 | { | ||
542 | return -ENOIOCTLCMD; | ||
543 | } | ||
544 | |||
545 | static u64 kvm_mips_get_one_regs[] = { | ||
546 | KVM_REG_MIPS_R0, | ||
547 | KVM_REG_MIPS_R1, | ||
548 | KVM_REG_MIPS_R2, | ||
549 | KVM_REG_MIPS_R3, | ||
550 | KVM_REG_MIPS_R4, | ||
551 | KVM_REG_MIPS_R5, | ||
552 | KVM_REG_MIPS_R6, | ||
553 | KVM_REG_MIPS_R7, | ||
554 | KVM_REG_MIPS_R8, | ||
555 | KVM_REG_MIPS_R9, | ||
556 | KVM_REG_MIPS_R10, | ||
557 | KVM_REG_MIPS_R11, | ||
558 | KVM_REG_MIPS_R12, | ||
559 | KVM_REG_MIPS_R13, | ||
560 | KVM_REG_MIPS_R14, | ||
561 | KVM_REG_MIPS_R15, | ||
562 | KVM_REG_MIPS_R16, | ||
563 | KVM_REG_MIPS_R17, | ||
564 | KVM_REG_MIPS_R18, | ||
565 | KVM_REG_MIPS_R19, | ||
566 | KVM_REG_MIPS_R20, | ||
567 | KVM_REG_MIPS_R21, | ||
568 | KVM_REG_MIPS_R22, | ||
569 | KVM_REG_MIPS_R23, | ||
570 | KVM_REG_MIPS_R24, | ||
571 | KVM_REG_MIPS_R25, | ||
572 | KVM_REG_MIPS_R26, | ||
573 | KVM_REG_MIPS_R27, | ||
574 | KVM_REG_MIPS_R28, | ||
575 | KVM_REG_MIPS_R29, | ||
576 | KVM_REG_MIPS_R30, | ||
577 | KVM_REG_MIPS_R31, | ||
578 | |||
579 | #ifndef CONFIG_CPU_MIPSR6 | ||
580 | KVM_REG_MIPS_HI, | ||
581 | KVM_REG_MIPS_LO, | ||
582 | #endif | ||
583 | KVM_REG_MIPS_PC, | ||
584 | }; | ||
585 | |||
586 | static u64 kvm_mips_get_one_regs_fpu[] = { | ||
587 | KVM_REG_MIPS_FCR_IR, | ||
588 | KVM_REG_MIPS_FCR_CSR, | ||
589 | }; | ||
590 | |||
591 | static u64 kvm_mips_get_one_regs_msa[] = { | ||
592 | KVM_REG_MIPS_MSA_IR, | ||
593 | KVM_REG_MIPS_MSA_CSR, | ||
594 | }; | ||
595 | |||
596 | static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) | ||
597 | { | ||
598 | unsigned long ret; | ||
599 | |||
600 | ret = ARRAY_SIZE(kvm_mips_get_one_regs); | ||
601 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { | ||
602 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; | ||
603 | /* odd doubles */ | ||
604 | if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) | ||
605 | ret += 16; | ||
606 | } | ||
607 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | ||
608 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; | ||
609 | ret += kvm_mips_callbacks->num_regs(vcpu); | ||
610 | |||
611 | return ret; | ||
612 | } | ||
613 | |||
614 | static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) | ||
615 | { | ||
616 | u64 index; | ||
617 | unsigned int i; | ||
618 | |||
619 | if (copy_to_user(indices, kvm_mips_get_one_regs, | ||
620 | sizeof(kvm_mips_get_one_regs))) | ||
621 | return -EFAULT; | ||
622 | indices += ARRAY_SIZE(kvm_mips_get_one_regs); | ||
623 | |||
624 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { | ||
625 | if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, | ||
626 | sizeof(kvm_mips_get_one_regs_fpu))) | ||
627 | return -EFAULT; | ||
628 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); | ||
629 | |||
630 | for (i = 0; i < 32; ++i) { | ||
631 | index = KVM_REG_MIPS_FPR_32(i); | ||
632 | if (copy_to_user(indices, &index, sizeof(index))) | ||
633 | return -EFAULT; | ||
634 | ++indices; | ||
635 | |||
636 | /* skip odd doubles if no F64 */ | ||
637 | if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) | ||
638 | continue; | ||
639 | |||
640 | index = KVM_REG_MIPS_FPR_64(i); | ||
641 | if (copy_to_user(indices, &index, sizeof(index))) | ||
642 | return -EFAULT; | ||
643 | ++indices; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { | ||
648 | if (copy_to_user(indices, kvm_mips_get_one_regs_msa, | ||
649 | sizeof(kvm_mips_get_one_regs_msa))) | ||
650 | return -EFAULT; | ||
651 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); | ||
652 | |||
653 | for (i = 0; i < 32; ++i) { | ||
654 | index = KVM_REG_MIPS_VEC_128(i); | ||
655 | if (copy_to_user(indices, &index, sizeof(index))) | ||
656 | return -EFAULT; | ||
657 | ++indices; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); | ||
662 | } | ||
663 | |||
664 | static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | ||
665 | const struct kvm_one_reg *reg) | ||
666 | { | ||
667 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
668 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; | ||
669 | int ret; | ||
670 | s64 v; | ||
671 | s64 vs[2]; | ||
672 | unsigned int idx; | ||
673 | |||
674 | switch (reg->id) { | ||
675 | /* General purpose registers */ | ||
676 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: | ||
677 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; | ||
678 | break; | ||
679 | #ifndef CONFIG_CPU_MIPSR6 | ||
680 | case KVM_REG_MIPS_HI: | ||
681 | v = (long)vcpu->arch.hi; | ||
682 | break; | ||
683 | case KVM_REG_MIPS_LO: | ||
684 | v = (long)vcpu->arch.lo; | ||
685 | break; | ||
686 | #endif | ||
687 | case KVM_REG_MIPS_PC: | ||
688 | v = (long)vcpu->arch.pc; | ||
689 | break; | ||
690 | |||
691 | /* Floating point registers */ | ||
692 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | ||
693 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
694 | return -EINVAL; | ||
695 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | ||
696 | /* Odd singles in top of even double when FR=0 */ | ||
697 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | ||
698 | v = get_fpr32(&fpu->fpr[idx], 0); | ||
699 | else | ||
700 | v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); | ||
701 | break; | ||
702 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | ||
703 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
704 | return -EINVAL; | ||
705 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | ||
706 | /* Can't access odd doubles in FR=0 mode */ | ||
707 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
708 | return -EINVAL; | ||
709 | v = get_fpr64(&fpu->fpr[idx], 0); | ||
710 | break; | ||
711 | case KVM_REG_MIPS_FCR_IR: | ||
712 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
713 | return -EINVAL; | ||
714 | v = boot_cpu_data.fpu_id; | ||
715 | break; | ||
716 | case KVM_REG_MIPS_FCR_CSR: | ||
717 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
718 | return -EINVAL; | ||
719 | v = fpu->fcr31; | ||
720 | break; | ||
721 | |||
722 | /* MIPS SIMD Architecture (MSA) registers */ | ||
723 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | ||
724 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
725 | return -EINVAL; | ||
726 | /* Can't access MSA registers in FR=0 mode */ | ||
727 | if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
728 | return -EINVAL; | ||
729 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | ||
730 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
731 | /* least significant byte first */ | ||
732 | vs[0] = get_fpr64(&fpu->fpr[idx], 0); | ||
733 | vs[1] = get_fpr64(&fpu->fpr[idx], 1); | ||
734 | #else | ||
735 | /* most significant byte first */ | ||
736 | vs[0] = get_fpr64(&fpu->fpr[idx], 1); | ||
737 | vs[1] = get_fpr64(&fpu->fpr[idx], 0); | ||
738 | #endif | ||
739 | break; | ||
740 | case KVM_REG_MIPS_MSA_IR: | ||
741 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
742 | return -EINVAL; | ||
743 | v = boot_cpu_data.msa_id; | ||
744 | break; | ||
745 | case KVM_REG_MIPS_MSA_CSR: | ||
746 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
747 | return -EINVAL; | ||
748 | v = fpu->msacsr; | ||
749 | break; | ||
750 | |||
751 | /* registers to be handled specially */ | ||
752 | default: | ||
753 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); | ||
754 | if (ret) | ||
755 | return ret; | ||
756 | break; | ||
757 | } | ||
758 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | ||
759 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | ||
760 | |||
761 | return put_user(v, uaddr64); | ||
762 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | ||
763 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | ||
764 | u32 v32 = (u32)v; | ||
765 | |||
766 | return put_user(v32, uaddr32); | ||
767 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { | ||
768 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
769 | |||
770 | return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; | ||
771 | } else { | ||
772 | return -EINVAL; | ||
773 | } | ||
774 | } | ||
775 | |||
776 | static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | ||
777 | const struct kvm_one_reg *reg) | ||
778 | { | ||
779 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
780 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; | ||
781 | s64 v; | ||
782 | s64 vs[2]; | ||
783 | unsigned int idx; | ||
784 | |||
785 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | ||
786 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | ||
787 | |||
788 | if (get_user(v, uaddr64) != 0) | ||
789 | return -EFAULT; | ||
790 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | ||
791 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | ||
792 | s32 v32; | ||
793 | |||
794 | if (get_user(v32, uaddr32) != 0) | ||
795 | return -EFAULT; | ||
796 | v = (s64)v32; | ||
797 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { | ||
798 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
799 | |||
800 | return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; | ||
801 | } else { | ||
802 | return -EINVAL; | ||
803 | } | ||
804 | |||
805 | switch (reg->id) { | ||
806 | /* General purpose registers */ | ||
807 | case KVM_REG_MIPS_R0: | ||
808 | /* Silently ignore requests to set $0 */ | ||
809 | break; | ||
810 | case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: | ||
811 | vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; | ||
812 | break; | ||
813 | #ifndef CONFIG_CPU_MIPSR6 | ||
814 | case KVM_REG_MIPS_HI: | ||
815 | vcpu->arch.hi = v; | ||
816 | break; | ||
817 | case KVM_REG_MIPS_LO: | ||
818 | vcpu->arch.lo = v; | ||
819 | break; | ||
820 | #endif | ||
821 | case KVM_REG_MIPS_PC: | ||
822 | vcpu->arch.pc = v; | ||
823 | break; | ||
824 | |||
825 | /* Floating point registers */ | ||
826 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | ||
827 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
828 | return -EINVAL; | ||
829 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | ||
830 | /* Odd singles in top of even double when FR=0 */ | ||
831 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | ||
832 | set_fpr32(&fpu->fpr[idx], 0, v); | ||
833 | else | ||
834 | set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); | ||
835 | break; | ||
836 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | ||
837 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
838 | return -EINVAL; | ||
839 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | ||
840 | /* Can't access odd doubles in FR=0 mode */ | ||
841 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
842 | return -EINVAL; | ||
843 | set_fpr64(&fpu->fpr[idx], 0, v); | ||
844 | break; | ||
845 | case KVM_REG_MIPS_FCR_IR: | ||
846 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
847 | return -EINVAL; | ||
848 | /* Read-only */ | ||
849 | break; | ||
850 | case KVM_REG_MIPS_FCR_CSR: | ||
851 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
852 | return -EINVAL; | ||
853 | fpu->fcr31 = v; | ||
854 | break; | ||
855 | |||
856 | /* MIPS SIMD Architecture (MSA) registers */ | ||
857 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | ||
858 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
859 | return -EINVAL; | ||
860 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | ||
861 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
862 | /* least significant byte first */ | ||
863 | set_fpr64(&fpu->fpr[idx], 0, vs[0]); | ||
864 | set_fpr64(&fpu->fpr[idx], 1, vs[1]); | ||
865 | #else | ||
866 | /* most significant byte first */ | ||
867 | set_fpr64(&fpu->fpr[idx], 1, vs[0]); | ||
868 | set_fpr64(&fpu->fpr[idx], 0, vs[1]); | ||
869 | #endif | ||
870 | break; | ||
871 | case KVM_REG_MIPS_MSA_IR: | ||
872 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
873 | return -EINVAL; | ||
874 | /* Read-only */ | ||
875 | break; | ||
876 | case KVM_REG_MIPS_MSA_CSR: | ||
877 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
878 | return -EINVAL; | ||
879 | fpu->msacsr = v; | ||
880 | break; | ||
881 | |||
882 | /* registers to be handled specially */ | ||
883 | default: | ||
884 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); | ||
885 | } | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | ||
890 | struct kvm_enable_cap *cap) | ||
891 | { | ||
892 | int r = 0; | ||
893 | |||
894 | if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) | ||
895 | return -EINVAL; | ||
896 | if (cap->flags) | ||
897 | return -EINVAL; | ||
898 | if (cap->args[0]) | ||
899 | return -EINVAL; | ||
900 | |||
901 | switch (cap->cap) { | ||
902 | case KVM_CAP_MIPS_FPU: | ||
903 | vcpu->arch.fpu_enabled = true; | ||
904 | break; | ||
905 | case KVM_CAP_MIPS_MSA: | ||
906 | vcpu->arch.msa_enabled = true; | ||
907 | break; | ||
908 | default: | ||
909 | r = -EINVAL; | ||
910 | break; | ||
911 | } | ||
912 | |||
913 | return r; | ||
914 | } | ||
915 | |||
916 | long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, | ||
917 | unsigned long arg) | ||
918 | { | ||
919 | struct kvm_vcpu *vcpu = filp->private_data; | ||
920 | void __user *argp = (void __user *)arg; | ||
921 | |||
922 | if (ioctl == KVM_INTERRUPT) { | ||
923 | struct kvm_mips_interrupt irq; | ||
924 | |||
925 | if (copy_from_user(&irq, argp, sizeof(irq))) | ||
926 | return -EFAULT; | ||
927 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, | ||
928 | irq.irq); | ||
929 | |||
930 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); | ||
931 | } | ||
932 | |||
933 | return -ENOIOCTLCMD; | ||
934 | } | ||
935 | |||
936 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, | ||
937 | unsigned long arg) | ||
938 | { | ||
939 | struct kvm_vcpu *vcpu = filp->private_data; | ||
940 | void __user *argp = (void __user *)arg; | ||
941 | long r; | ||
942 | |||
943 | vcpu_load(vcpu); | ||
944 | |||
945 | switch (ioctl) { | ||
946 | case KVM_SET_ONE_REG: | ||
947 | case KVM_GET_ONE_REG: { | ||
948 | struct kvm_one_reg reg; | ||
949 | |||
950 | r = -EFAULT; | ||
951 | if (copy_from_user(®, argp, sizeof(reg))) | ||
952 | break; | ||
953 | if (ioctl == KVM_SET_ONE_REG) | ||
954 | r = kvm_mips_set_reg(vcpu, ®); | ||
955 | else | ||
956 | r = kvm_mips_get_reg(vcpu, ®); | ||
957 | break; | ||
958 | } | ||
959 | case KVM_GET_REG_LIST: { | ||
960 | struct kvm_reg_list __user *user_list = argp; | ||
961 | struct kvm_reg_list reg_list; | ||
962 | unsigned n; | ||
963 | |||
964 | r = -EFAULT; | ||
965 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | ||
966 | break; | ||
967 | n = reg_list.n; | ||
968 | reg_list.n = kvm_mips_num_regs(vcpu); | ||
969 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | ||
970 | break; | ||
971 | r = -E2BIG; | ||
972 | if (n < reg_list.n) | ||
973 | break; | ||
974 | r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); | ||
975 | break; | ||
976 | } | ||
977 | case KVM_ENABLE_CAP: { | ||
978 | struct kvm_enable_cap cap; | ||
979 | |||
980 | r = -EFAULT; | ||
981 | if (copy_from_user(&cap, argp, sizeof(cap))) | ||
982 | break; | ||
983 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | ||
984 | break; | ||
985 | } | ||
986 | default: | ||
987 | r = -ENOIOCTLCMD; | ||
988 | } | ||
989 | |||
990 | vcpu_put(vcpu); | ||
991 | return r; | ||
992 | } | ||
993 | |||
994 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) | ||
995 | { | ||
996 | |||
997 | } | ||
998 | |||
999 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, | ||
1000 | struct kvm_memory_slot *memslot) | ||
1001 | { | ||
1002 | /* Let implementation handle TLB/GVA invalidation */ | ||
1003 | kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); | ||
1004 | } | ||
1005 | |||
1006 | long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | ||
1007 | { | ||
1008 | long r; | ||
1009 | |||
1010 | switch (ioctl) { | ||
1011 | default: | ||
1012 | r = -ENOIOCTLCMD; | ||
1013 | } | ||
1014 | |||
1015 | return r; | ||
1016 | } | ||
1017 | |||
1018 | int kvm_arch_init(void *opaque) | ||
1019 | { | ||
1020 | if (kvm_mips_callbacks) { | ||
1021 | kvm_err("kvm: module already exists\n"); | ||
1022 | return -EEXIST; | ||
1023 | } | ||
1024 | |||
1025 | return kvm_mips_emulation_init(&kvm_mips_callbacks); | ||
1026 | } | ||
1027 | |||
1028 | void kvm_arch_exit(void) | ||
1029 | { | ||
1030 | kvm_mips_callbacks = NULL; | ||
1031 | } | ||
1032 | |||
1033 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
1034 | struct kvm_sregs *sregs) | ||
1035 | { | ||
1036 | return -ENOIOCTLCMD; | ||
1037 | } | ||
1038 | |||
1039 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
1040 | struct kvm_sregs *sregs) | ||
1041 | { | ||
1042 | return -ENOIOCTLCMD; | ||
1043 | } | ||
1044 | |||
1045 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | ||
1046 | { | ||
1047 | } | ||
1048 | |||
1049 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1050 | { | ||
1051 | return -ENOIOCTLCMD; | ||
1052 | } | ||
1053 | |||
1054 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1055 | { | ||
1056 | return -ENOIOCTLCMD; | ||
1057 | } | ||
1058 | |||
1059 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | ||
1060 | { | ||
1061 | return VM_FAULT_SIGBUS; | ||
1062 | } | ||
1063 | |||
1064 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | ||
1065 | { | ||
1066 | int r; | ||
1067 | |||
1068 | switch (ext) { | ||
1069 | case KVM_CAP_ONE_REG: | ||
1070 | case KVM_CAP_ENABLE_CAP: | ||
1071 | case KVM_CAP_READONLY_MEM: | ||
1072 | case KVM_CAP_SYNC_MMU: | ||
1073 | case KVM_CAP_IMMEDIATE_EXIT: | ||
1074 | r = 1; | ||
1075 | break; | ||
1076 | case KVM_CAP_NR_VCPUS: | ||
1077 | r = num_online_cpus(); | ||
1078 | break; | ||
1079 | case KVM_CAP_MAX_VCPUS: | ||
1080 | r = KVM_MAX_VCPUS; | ||
1081 | break; | ||
1082 | case KVM_CAP_MAX_VCPU_ID: | ||
1083 | r = KVM_MAX_VCPU_ID; | ||
1084 | break; | ||
1085 | case KVM_CAP_MIPS_FPU: | ||
1086 | /* We don't handle systems with inconsistent cpu_has_fpu */ | ||
1087 | r = !!raw_cpu_has_fpu; | ||
1088 | break; | ||
1089 | case KVM_CAP_MIPS_MSA: | ||
1090 | /* | ||
1091 | * We don't support MSA vector partitioning yet: | ||
1092 | * 1) It would require explicit support which can't be tested | ||
1093 | * yet due to lack of support in current hardware. | ||
1094 | * 2) It extends the state that would need to be saved/restored | ||
1095 | * by e.g. QEMU for migration. | ||
1096 | * | ||
1097 | * When vector partitioning hardware becomes available, support | ||
1098 | * could be added by requiring a flag when enabling | ||
1099 | * KVM_CAP_MIPS_MSA capability to indicate that userland knows | ||
1100 | * to save/restore the appropriate extra state. | ||
1101 | */ | ||
1102 | r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); | ||
1103 | break; | ||
1104 | default: | ||
1105 | r = kvm_mips_callbacks->check_extension(kvm, ext); | ||
1106 | break; | ||
1107 | } | ||
1108 | return r; | ||
1109 | } | ||
1110 | |||
1111 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
1112 | { | ||
1113 | return kvm_mips_pending_timer(vcpu) || | ||
1114 | kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; | ||
1115 | } | ||
1116 | |||
1117 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) | ||
1118 | { | ||
1119 | int i; | ||
1120 | struct mips_coproc *cop0; | ||
1121 | |||
1122 | if (!vcpu) | ||
1123 | return -1; | ||
1124 | |||
1125 | kvm_debug("VCPU Register Dump:\n"); | ||
1126 | kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); | ||
1127 | kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); | ||
1128 | |||
1129 | for (i = 0; i < 32; i += 4) { | ||
1130 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, | ||
1131 | vcpu->arch.gprs[i], | ||
1132 | vcpu->arch.gprs[i + 1], | ||
1133 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); | ||
1134 | } | ||
1135 | kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); | ||
1136 | kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); | ||
1137 | |||
1138 | cop0 = vcpu->arch.cop0; | ||
1139 | kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", | ||
1140 | kvm_read_c0_guest_status(cop0), | ||
1141 | kvm_read_c0_guest_cause(cop0)); | ||
1142 | |||
1143 | kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); | ||
1144 | |||
1145 | return 0; | ||
1146 | } | ||
1147 | |||
1148 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
1149 | { | ||
1150 | int i; | ||
1151 | |||
1152 | vcpu_load(vcpu); | ||
1153 | |||
1154 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) | ||
1155 | vcpu->arch.gprs[i] = regs->gpr[i]; | ||
1156 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ | ||
1157 | vcpu->arch.hi = regs->hi; | ||
1158 | vcpu->arch.lo = regs->lo; | ||
1159 | vcpu->arch.pc = regs->pc; | ||
1160 | |||
1161 | vcpu_put(vcpu); | ||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1165 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
1166 | { | ||
1167 | int i; | ||
1168 | |||
1169 | vcpu_load(vcpu); | ||
1170 | |||
1171 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) | ||
1172 | regs->gpr[i] = vcpu->arch.gprs[i]; | ||
1173 | |||
1174 | regs->hi = vcpu->arch.hi; | ||
1175 | regs->lo = vcpu->arch.lo; | ||
1176 | regs->pc = vcpu->arch.pc; | ||
1177 | |||
1178 | vcpu_put(vcpu); | ||
1179 | return 0; | ||
1180 | } | ||
1181 | |||
1182 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
1183 | struct kvm_translation *tr) | ||
1184 | { | ||
1185 | return 0; | ||
1186 | } | ||
1187 | |||
1188 | static void kvm_mips_set_c0_status(void) | ||
1189 | { | ||
1190 | u32 status = read_c0_status(); | ||
1191 | |||
1192 | if (cpu_has_dsp) | ||
1193 | status |= (ST0_MX); | ||
1194 | |||
1195 | write_c0_status(status); | ||
1196 | ehb(); | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | ||
1201 | */ | ||
1202 | int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) | ||
1203 | { | ||
1204 | struct kvm_run *run = vcpu->run; | ||
1205 | u32 cause = vcpu->arch.host_cp0_cause; | ||
1206 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | ||
1207 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
1208 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
1209 | enum emulation_result er = EMULATE_DONE; | ||
1210 | u32 inst; | ||
1211 | int ret = RESUME_GUEST; | ||
1212 | |||
1213 | vcpu->mode = OUTSIDE_GUEST_MODE; | ||
1214 | |||
1215 | /* re-enable HTW before enabling interrupts */ | ||
1216 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) | ||
1217 | htw_start(); | ||
1218 | |||
1219 | /* Set a default exit reason */ | ||
1220 | run->exit_reason = KVM_EXIT_UNKNOWN; | ||
1221 | run->ready_for_interrupt_injection = 1; | ||
1222 | |||
1223 | /* | ||
1224 | * Set the appropriate status bits based on host CPU features, | ||
1225 | * before we hit the scheduler | ||
1226 | */ | ||
1227 | kvm_mips_set_c0_status(); | ||
1228 | |||
1229 | local_irq_enable(); | ||
1230 | |||
1231 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", | ||
1232 | cause, opc, run, vcpu); | ||
1233 | trace_kvm_exit(vcpu, exccode); | ||
1234 | |||
1235 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { | ||
1236 | /* | ||
1237 | * Do a privilege check, if in UM most of these exit conditions | ||
1238 | * end up causing an exception to be delivered to the Guest | ||
1239 | * Kernel | ||
1240 | */ | ||
1241 | er = kvm_mips_check_privilege(cause, opc, vcpu); | ||
1242 | if (er == EMULATE_PRIV_FAIL) { | ||
1243 | goto skip_emul; | ||
1244 | } else if (er == EMULATE_FAIL) { | ||
1245 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1246 | ret = RESUME_HOST; | ||
1247 | goto skip_emul; | ||
1248 | } | ||
1249 | } | ||
1250 | |||
1251 | switch (exccode) { | ||
1252 | case EXCCODE_INT: | ||
1253 | kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); | ||
1254 | |||
1255 | ++vcpu->stat.int_exits; | ||
1256 | |||
1257 | if (need_resched()) | ||
1258 | cond_resched(); | ||
1259 | |||
1260 | ret = RESUME_GUEST; | ||
1261 | break; | ||
1262 | |||
1263 | case EXCCODE_CPU: | ||
1264 | kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); | ||
1265 | |||
1266 | ++vcpu->stat.cop_unusable_exits; | ||
1267 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); | ||
1268 | /* XXXKYMA: Might need to return to user space */ | ||
1269 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) | ||
1270 | ret = RESUME_HOST; | ||
1271 | break; | ||
1272 | |||
1273 | case EXCCODE_MOD: | ||
1274 | ++vcpu->stat.tlbmod_exits; | ||
1275 | ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); | ||
1276 | break; | ||
1277 | |||
1278 | case EXCCODE_TLBS: | ||
1279 | kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", | ||
1280 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, | ||
1281 | badvaddr); | ||
1282 | |||
1283 | ++vcpu->stat.tlbmiss_st_exits; | ||
1284 | ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); | ||
1285 | break; | ||
1286 | |||
1287 | case EXCCODE_TLBL: | ||
1288 | kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", | ||
1289 | cause, opc, badvaddr); | ||
1290 | |||
1291 | ++vcpu->stat.tlbmiss_ld_exits; | ||
1292 | ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); | ||
1293 | break; | ||
1294 | |||
1295 | case EXCCODE_ADES: | ||
1296 | ++vcpu->stat.addrerr_st_exits; | ||
1297 | ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); | ||
1298 | break; | ||
1299 | |||
1300 | case EXCCODE_ADEL: | ||
1301 | ++vcpu->stat.addrerr_ld_exits; | ||
1302 | ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); | ||
1303 | break; | ||
1304 | |||
1305 | case EXCCODE_SYS: | ||
1306 | ++vcpu->stat.syscall_exits; | ||
1307 | ret = kvm_mips_callbacks->handle_syscall(vcpu); | ||
1308 | break; | ||
1309 | |||
1310 | case EXCCODE_RI: | ||
1311 | ++vcpu->stat.resvd_inst_exits; | ||
1312 | ret = kvm_mips_callbacks->handle_res_inst(vcpu); | ||
1313 | break; | ||
1314 | |||
1315 | case EXCCODE_BP: | ||
1316 | ++vcpu->stat.break_inst_exits; | ||
1317 | ret = kvm_mips_callbacks->handle_break(vcpu); | ||
1318 | break; | ||
1319 | |||
1320 | case EXCCODE_TR: | ||
1321 | ++vcpu->stat.trap_inst_exits; | ||
1322 | ret = kvm_mips_callbacks->handle_trap(vcpu); | ||
1323 | break; | ||
1324 | |||
1325 | case EXCCODE_MSAFPE: | ||
1326 | ++vcpu->stat.msa_fpe_exits; | ||
1327 | ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); | ||
1328 | break; | ||
1329 | |||
1330 | case EXCCODE_FPE: | ||
1331 | ++vcpu->stat.fpe_exits; | ||
1332 | ret = kvm_mips_callbacks->handle_fpe(vcpu); | ||
1333 | break; | ||
1334 | |||
1335 | case EXCCODE_MSADIS: | ||
1336 | ++vcpu->stat.msa_disabled_exits; | ||
1337 | ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); | ||
1338 | break; | ||
1339 | |||
1340 | case EXCCODE_GE: | ||
1341 | /* defer exit accounting to handler */ | ||
1342 | ret = kvm_mips_callbacks->handle_guest_exit(vcpu); | ||
1343 | break; | ||
1344 | |||
1345 | default: | ||
1346 | if (cause & CAUSEF_BD) | ||
1347 | opc += 1; | ||
1348 | inst = 0; | ||
1349 | kvm_get_badinstr(opc, vcpu, &inst); | ||
1350 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", | ||
1351 | exccode, opc, inst, badvaddr, | ||
1352 | kvm_read_c0_guest_status(vcpu->arch.cop0)); | ||
1353 | kvm_arch_vcpu_dump_regs(vcpu); | ||
1354 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1355 | ret = RESUME_HOST; | ||
1356 | break; | ||
1357 | |||
1358 | } | ||
1359 | |||
1360 | skip_emul: | ||
1361 | local_irq_disable(); | ||
1362 | |||
1363 | if (ret == RESUME_GUEST) | ||
1364 | kvm_vz_acquire_htimer(vcpu); | ||
1365 | |||
1366 | if (er == EMULATE_DONE && !(ret & RESUME_HOST)) | ||
1367 | kvm_mips_deliver_interrupts(vcpu, cause); | ||
1368 | |||
1369 | if (!(ret & RESUME_HOST)) { | ||
1370 | /* Only check for signals if not already exiting to userspace */ | ||
1371 | if (signal_pending(current)) { | ||
1372 | run->exit_reason = KVM_EXIT_INTR; | ||
1373 | ret = (-EINTR << 2) | RESUME_HOST; | ||
1374 | ++vcpu->stat.signal_exits; | ||
1375 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); | ||
1376 | } | ||
1377 | } | ||
1378 | |||
1379 | if (ret == RESUME_GUEST) { | ||
1380 | trace_kvm_reenter(vcpu); | ||
1381 | |||
1382 | /* | ||
1383 | * Make sure the read of VCPU requests in vcpu_reenter() | ||
1384 | * callback is not reordered ahead of the write to vcpu->mode, | ||
1385 | * or we could miss a TLB flush request while the requester sees | ||
1386 | * the VCPU as outside of guest mode and not needing an IPI. | ||
1387 | */ | ||
1388 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | ||
1389 | |||
1390 | kvm_mips_callbacks->vcpu_reenter(vcpu); | ||
1391 | |||
1392 | /* | ||
1393 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context | ||
1394 | * is live), restore FCR31 / MSACSR. | ||
1395 | * | ||
1396 | * This should be before returning to the guest exception | ||
1397 | * vector, as it may well cause an [MSA] FP exception if there | ||
1398 | * are pending exception bits unmasked. (see | ||
1399 | * kvm_mips_csr_die_notifier() for how that is handled). | ||
1400 | */ | ||
1401 | if (kvm_mips_guest_has_fpu(&vcpu->arch) && | ||
1402 | read_c0_status() & ST0_CU1) | ||
1403 | __kvm_restore_fcsr(&vcpu->arch); | ||
1404 | |||
1405 | if (kvm_mips_guest_has_msa(&vcpu->arch) && | ||
1406 | read_c0_config5() & MIPS_CONF5_MSAEN) | ||
1407 | __kvm_restore_msacsr(&vcpu->arch); | ||
1408 | } | ||
1409 | |||
1410 | /* Disable HTW before returning to guest or host */ | ||
1411 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) | ||
1412 | htw_stop(); | ||
1413 | |||
1414 | return ret; | ||
1415 | } | ||
1416 | |||
1417 | /* Enable FPU for guest and restore context */ | ||
1418 | void kvm_own_fpu(struct kvm_vcpu *vcpu) | ||
1419 | { | ||
1420 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1421 | unsigned int sr, cfg5; | ||
1422 | |||
1423 | preempt_disable(); | ||
1424 | |||
1425 | sr = kvm_read_c0_guest_status(cop0); | ||
1426 | |||
1427 | /* | ||
1428 | * If MSA state is already live, it is undefined how it interacts with | ||
1429 | * FR=0 FPU state, and we don't want to hit reserved instruction | ||
1430 | * exceptions trying to save the MSA state later when CU=1 && FR=1, so | ||
1431 | * play it safe and save it first. | ||
1432 | * | ||
1433 | * In theory we shouldn't ever hit this case since kvm_lose_fpu() should | ||
1434 | * get called when guest CU1 is set, however we can't trust the guest | ||
1435 | * not to clobber the status register directly via the commpage. | ||
1436 | */ | ||
1437 | if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && | ||
1438 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) | ||
1439 | kvm_lose_fpu(vcpu); | ||
1440 | |||
1441 | /* | ||
1442 | * Enable FPU for guest | ||
1443 | * We set FR and FRE according to guest context | ||
1444 | */ | ||
1445 | change_c0_status(ST0_CU1 | ST0_FR, sr); | ||
1446 | if (cpu_has_fre) { | ||
1447 | cfg5 = kvm_read_c0_guest_config5(cop0); | ||
1448 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | ||
1449 | } | ||
1450 | enable_fpu_hazard(); | ||
1451 | |||
1452 | /* If guest FPU state not active, restore it now */ | ||
1453 | if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { | ||
1454 | __kvm_restore_fpu(&vcpu->arch); | ||
1455 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; | ||
1456 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); | ||
1457 | } else { | ||
1458 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); | ||
1459 | } | ||
1460 | |||
1461 | preempt_enable(); | ||
1462 | } | ||
1463 | |||
1464 | #ifdef CONFIG_CPU_HAS_MSA | ||
1465 | /* Enable MSA for guest and restore context */ | ||
1466 | void kvm_own_msa(struct kvm_vcpu *vcpu) | ||
1467 | { | ||
1468 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1469 | unsigned int sr, cfg5; | ||
1470 | |||
1471 | preempt_disable(); | ||
1472 | |||
1473 | /* | ||
1474 | * Enable FPU if enabled in guest, since we're restoring FPU context | ||
1475 | * anyway. We set FR and FRE according to guest context. | ||
1476 | */ | ||
1477 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | ||
1478 | sr = kvm_read_c0_guest_status(cop0); | ||
1479 | |||
1480 | /* | ||
1481 | * If FR=0 FPU state is already live, it is undefined how it | ||
1482 | * interacts with MSA state, so play it safe and save it first. | ||
1483 | */ | ||
1484 | if (!(sr & ST0_FR) && | ||
1485 | (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | | ||
1486 | KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) | ||
1487 | kvm_lose_fpu(vcpu); | ||
1488 | |||
1489 | change_c0_status(ST0_CU1 | ST0_FR, sr); | ||
1490 | if (sr & ST0_CU1 && cpu_has_fre) { | ||
1491 | cfg5 = kvm_read_c0_guest_config5(cop0); | ||
1492 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | ||
1493 | } | ||
1494 | } | ||
1495 | |||
1496 | /* Enable MSA for guest */ | ||
1497 | set_c0_config5(MIPS_CONF5_MSAEN); | ||
1498 | enable_fpu_hazard(); | ||
1499 | |||
1500 | switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { | ||
1501 | case KVM_MIPS_AUX_FPU: | ||
1502 | /* | ||
1503 | * Guest FPU state already loaded, only restore upper MSA state | ||
1504 | */ | ||
1505 | __kvm_restore_msa_upper(&vcpu->arch); | ||
1506 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; | ||
1507 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); | ||
1508 | break; | ||
1509 | case 0: | ||
1510 | /* Neither FPU or MSA already active, restore full MSA state */ | ||
1511 | __kvm_restore_msa(&vcpu->arch); | ||
1512 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; | ||
1513 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1514 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; | ||
1515 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, | ||
1516 | KVM_TRACE_AUX_FPU_MSA); | ||
1517 | break; | ||
1518 | default: | ||
1519 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); | ||
1520 | break; | ||
1521 | } | ||
1522 | |||
1523 | preempt_enable(); | ||
1524 | } | ||
1525 | #endif | ||
1526 | |||
1527 | /* Drop FPU & MSA without saving it */ | ||
1528 | void kvm_drop_fpu(struct kvm_vcpu *vcpu) | ||
1529 | { | ||
1530 | preempt_disable(); | ||
1531 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { | ||
1532 | disable_msa(); | ||
1533 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); | ||
1534 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; | ||
1535 | } | ||
1536 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | ||
1537 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1538 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); | ||
1539 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; | ||
1540 | } | ||
1541 | preempt_enable(); | ||
1542 | } | ||
1543 | |||
1544 | /* Save and disable FPU & MSA */ | ||
1545 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) | ||
1546 | { | ||
1547 | /* | ||
1548 | * With T&E, FPU & MSA get disabled in root context (hardware) when it | ||
1549 | * is disabled in guest context (software), but the register state in | ||
1550 | * the hardware may still be in use. | ||
1551 | * This is why we explicitly re-enable the hardware before saving. | ||
1552 | */ | ||
1553 | |||
1554 | preempt_disable(); | ||
1555 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { | ||
1556 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { | ||
1557 | set_c0_config5(MIPS_CONF5_MSAEN); | ||
1558 | enable_fpu_hazard(); | ||
1559 | } | ||
1560 | |||
1561 | __kvm_save_msa(&vcpu->arch); | ||
1562 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); | ||
1563 | |||
1564 | /* Disable MSA & FPU */ | ||
1565 | disable_msa(); | ||
1566 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | ||
1567 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1568 | disable_fpu_hazard(); | ||
1569 | } | ||
1570 | vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); | ||
1571 | } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | ||
1572 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { | ||
1573 | set_c0_status(ST0_CU1); | ||
1574 | enable_fpu_hazard(); | ||
1575 | } | ||
1576 | |||
1577 | __kvm_save_fpu(&vcpu->arch); | ||
1578 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; | ||
1579 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); | ||
1580 | |||
1581 | /* Disable FPU */ | ||
1582 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1583 | disable_fpu_hazard(); | ||
1584 | } | ||
1585 | preempt_enable(); | ||
1586 | } | ||
1587 | |||
1588 | /* | ||
1589 | * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are | ||
1590 | * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP | ||
1591 | * exception if cause bits are set in the value being written. | ||
1592 | */ | ||
1593 | static int kvm_mips_csr_die_notify(struct notifier_block *self, | ||
1594 | unsigned long cmd, void *ptr) | ||
1595 | { | ||
1596 | struct die_args *args = (struct die_args *)ptr; | ||
1597 | struct pt_regs *regs = args->regs; | ||
1598 | unsigned long pc; | ||
1599 | |||
1600 | /* Only interested in FPE and MSAFPE */ | ||
1601 | if (cmd != DIE_FP && cmd != DIE_MSAFP) | ||
1602 | return NOTIFY_DONE; | ||
1603 | |||
1604 | /* Return immediately if guest context isn't active */ | ||
1605 | if (!(current->flags & PF_VCPU)) | ||
1606 | return NOTIFY_DONE; | ||
1607 | |||
1608 | /* Should never get here from user mode */ | ||
1609 | BUG_ON(user_mode(regs)); | ||
1610 | |||
1611 | pc = instruction_pointer(regs); | ||
1612 | switch (cmd) { | ||
1613 | case DIE_FP: | ||
1614 | /* match 2nd instruction in __kvm_restore_fcsr */ | ||
1615 | if (pc != (unsigned long)&__kvm_restore_fcsr + 4) | ||
1616 | return NOTIFY_DONE; | ||
1617 | break; | ||
1618 | case DIE_MSAFP: | ||
1619 | /* match 2nd/3rd instruction in __kvm_restore_msacsr */ | ||
1620 | if (!cpu_has_msa || | ||
1621 | pc < (unsigned long)&__kvm_restore_msacsr + 4 || | ||
1622 | pc > (unsigned long)&__kvm_restore_msacsr + 8) | ||
1623 | return NOTIFY_DONE; | ||
1624 | break; | ||
1625 | } | ||
1626 | |||
1627 | /* Move PC forward a little and continue executing */ | ||
1628 | instruction_pointer(regs) += 4; | ||
1629 | |||
1630 | return NOTIFY_STOP; | ||
1631 | } | ||
1632 | |||
1633 | static struct notifier_block kvm_mips_csr_die_notifier = { | ||
1634 | .notifier_call = kvm_mips_csr_die_notify, | ||
1635 | }; | ||
1636 | |||
1637 | static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = { | ||
1638 | [MIPS_EXC_INT_TIMER] = C_IRQ5, | ||
1639 | [MIPS_EXC_INT_IO_1] = C_IRQ0, | ||
1640 | [MIPS_EXC_INT_IPI_1] = C_IRQ1, | ||
1641 | [MIPS_EXC_INT_IPI_2] = C_IRQ2, | ||
1642 | }; | ||
1643 | |||
1644 | static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = { | ||
1645 | [MIPS_EXC_INT_TIMER] = C_IRQ5, | ||
1646 | [MIPS_EXC_INT_IO_1] = C_IRQ0, | ||
1647 | [MIPS_EXC_INT_IO_2] = C_IRQ1, | ||
1648 | [MIPS_EXC_INT_IPI_1] = C_IRQ4, | ||
1649 | }; | ||
1650 | |||
1651 | u32 *kvm_priority_to_irq = kvm_default_priority_to_irq; | ||
1652 | |||
1653 | u32 kvm_irq_to_priority(u32 irq) | ||
1654 | { | ||
1655 | int i; | ||
1656 | |||
1657 | for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) { | ||
1658 | if (kvm_priority_to_irq[i] == (1 << (irq + 8))) | ||
1659 | return i; | ||
1660 | } | ||
1661 | |||
1662 | return MIPS_EXC_MAX; | ||
1663 | } | ||
1664 | |||
1665 | static int __init kvm_mips_init(void) | ||
1666 | { | ||
1667 | int ret; | ||
1668 | |||
1669 | if (cpu_has_mmid) { | ||
1670 | pr_warn("KVM does not yet support MMIDs. KVM Disabled\n"); | ||
1671 | return -EOPNOTSUPP; | ||
1672 | } | ||
1673 | |||
1674 | ret = kvm_mips_entry_setup(); | ||
1675 | if (ret) | ||
1676 | return ret; | ||
1677 | |||
1678 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | ||
1679 | |||
1680 | if (ret) | ||
1681 | return ret; | ||
1682 | |||
1683 | if (boot_cpu_type() == CPU_LOONGSON64) | ||
1684 | kvm_priority_to_irq = kvm_loongson3_priority_to_irq; | ||
1685 | |||
1686 | register_die_notifier(&kvm_mips_csr_die_notifier); | ||
1687 | |||
1688 | return 0; | ||
1689 | } | ||
1690 | |||
1691 | static void __exit kvm_mips_exit(void) | ||
1692 | { | ||
1693 | kvm_exit(); | ||
1694 | |||
1695 | unregister_die_notifier(&kvm_mips_csr_die_notifier); | ||
1696 | } | ||
1697 | |||
1698 | module_init(kvm_mips_init); | ||
1699 | module_exit(kvm_mips_exit); | ||
1700 | |||
1701 | EXPORT_TRACEPOINT_SYMBOL(kvm_exit); | ||
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c new file mode 100644 index 000000000..28c366d30 --- /dev/null +++ b/arch/mips/kvm/mmu.c | |||
@@ -0,0 +1,1236 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS MMU handling in the KVM module. | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/highmem.h> | ||
13 | #include <linux/kvm_host.h> | ||
14 | #include <linux/uaccess.h> | ||
15 | #include <asm/mmu_context.h> | ||
16 | #include <asm/pgalloc.h> | ||
17 | |||
18 | /* | ||
19 | * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels | ||
20 | * for which pages need to be cached. | ||
21 | */ | ||
22 | #if defined(__PAGETABLE_PMD_FOLDED) | ||
23 | #define KVM_MMU_CACHE_MIN_PAGES 1 | ||
24 | #else | ||
25 | #define KVM_MMU_CACHE_MIN_PAGES 2 | ||
26 | #endif | ||
27 | |||
28 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) | ||
29 | { | ||
30 | kvm_mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); | ||
31 | } | ||
32 | |||
33 | /** | ||
34 | * kvm_pgd_init() - Initialise KVM GPA page directory. | ||
35 | * @page: Pointer to page directory (PGD) for KVM GPA. | ||
36 | * | ||
37 | * Initialise a KVM GPA page directory with pointers to the invalid table, i.e. | ||
38 | * representing no mappings. This is similar to pgd_init(), however it | ||
39 | * initialises all the page directory pointers, not just the ones corresponding | ||
40 | * to the userland address space (since it is for the guest physical address | ||
41 | * space rather than a virtual address space). | ||
42 | */ | ||
43 | static void kvm_pgd_init(void *page) | ||
44 | { | ||
45 | unsigned long *p, *end; | ||
46 | unsigned long entry; | ||
47 | |||
48 | #ifdef __PAGETABLE_PMD_FOLDED | ||
49 | entry = (unsigned long)invalid_pte_table; | ||
50 | #else | ||
51 | entry = (unsigned long)invalid_pmd_table; | ||
52 | #endif | ||
53 | |||
54 | p = (unsigned long *)page; | ||
55 | end = p + PTRS_PER_PGD; | ||
56 | |||
57 | do { | ||
58 | p[0] = entry; | ||
59 | p[1] = entry; | ||
60 | p[2] = entry; | ||
61 | p[3] = entry; | ||
62 | p[4] = entry; | ||
63 | p += 8; | ||
64 | p[-3] = entry; | ||
65 | p[-2] = entry; | ||
66 | p[-1] = entry; | ||
67 | } while (p != end); | ||
68 | } | ||
69 | |||
70 | /** | ||
71 | * kvm_pgd_alloc() - Allocate and initialise a KVM GPA page directory. | ||
72 | * | ||
73 | * Allocate a blank KVM GPA page directory (PGD) for representing guest physical | ||
74 | * to host physical page mappings. | ||
75 | * | ||
76 | * Returns: Pointer to new KVM GPA page directory. | ||
77 | * NULL on allocation failure. | ||
78 | */ | ||
79 | pgd_t *kvm_pgd_alloc(void) | ||
80 | { | ||
81 | pgd_t *ret; | ||
82 | |||
83 | ret = (pgd_t *)__get_free_pages(GFP_KERNEL, PGD_ORDER); | ||
84 | if (ret) | ||
85 | kvm_pgd_init(ret); | ||
86 | |||
87 | return ret; | ||
88 | } | ||
89 | |||
90 | /** | ||
91 | * kvm_mips_walk_pgd() - Walk page table with optional allocation. | ||
92 | * @pgd: Page directory pointer. | ||
93 | * @addr: Address to index page table using. | ||
94 | * @cache: MMU page cache to allocate new page tables from, or NULL. | ||
95 | * | ||
96 | * Walk the page tables pointed to by @pgd to find the PTE corresponding to the | ||
97 | * address @addr. If page tables don't exist for @addr, they will be created | ||
98 | * from the MMU cache if @cache is not NULL. | ||
99 | * | ||
100 | * Returns: Pointer to pte_t corresponding to @addr. | ||
101 | * NULL if a page table doesn't exist for @addr and !@cache. | ||
102 | * NULL if a page table allocation failed. | ||
103 | */ | ||
104 | static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, | ||
105 | unsigned long addr) | ||
106 | { | ||
107 | p4d_t *p4d; | ||
108 | pud_t *pud; | ||
109 | pmd_t *pmd; | ||
110 | |||
111 | pgd += pgd_index(addr); | ||
112 | if (pgd_none(*pgd)) { | ||
113 | /* Not used on MIPS yet */ | ||
114 | BUG(); | ||
115 | return NULL; | ||
116 | } | ||
117 | p4d = p4d_offset(pgd, addr); | ||
118 | pud = pud_offset(p4d, addr); | ||
119 | if (pud_none(*pud)) { | ||
120 | pmd_t *new_pmd; | ||
121 | |||
122 | if (!cache) | ||
123 | return NULL; | ||
124 | new_pmd = kvm_mmu_memory_cache_alloc(cache); | ||
125 | pmd_init((unsigned long)new_pmd, | ||
126 | (unsigned long)invalid_pte_table); | ||
127 | pud_populate(NULL, pud, new_pmd); | ||
128 | } | ||
129 | pmd = pmd_offset(pud, addr); | ||
130 | if (pmd_none(*pmd)) { | ||
131 | pte_t *new_pte; | ||
132 | |||
133 | if (!cache) | ||
134 | return NULL; | ||
135 | new_pte = kvm_mmu_memory_cache_alloc(cache); | ||
136 | clear_page(new_pte); | ||
137 | pmd_populate_kernel(NULL, pmd, new_pte); | ||
138 | } | ||
139 | return pte_offset_kernel(pmd, addr); | ||
140 | } | ||
141 | |||
142 | /* Caller must hold kvm->mm_lock */ | ||
143 | static pte_t *kvm_mips_pte_for_gpa(struct kvm *kvm, | ||
144 | struct kvm_mmu_memory_cache *cache, | ||
145 | unsigned long addr) | ||
146 | { | ||
147 | return kvm_mips_walk_pgd(kvm->arch.gpa_mm.pgd, cache, addr); | ||
148 | } | ||
149 | |||
150 | /* | ||
151 | * kvm_mips_flush_gpa_{pte,pmd,pud,pgd,pt}. | ||
152 | * Flush a range of guest physical address space from the VM's GPA page tables. | ||
153 | */ | ||
154 | |||
155 | static bool kvm_mips_flush_gpa_pte(pte_t *pte, unsigned long start_gpa, | ||
156 | unsigned long end_gpa) | ||
157 | { | ||
158 | int i_min = pte_index(start_gpa); | ||
159 | int i_max = pte_index(end_gpa); | ||
160 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); | ||
161 | int i; | ||
162 | |||
163 | for (i = i_min; i <= i_max; ++i) { | ||
164 | if (!pte_present(pte[i])) | ||
165 | continue; | ||
166 | |||
167 | set_pte(pte + i, __pte(0)); | ||
168 | } | ||
169 | return safe_to_remove; | ||
170 | } | ||
171 | |||
172 | static bool kvm_mips_flush_gpa_pmd(pmd_t *pmd, unsigned long start_gpa, | ||
173 | unsigned long end_gpa) | ||
174 | { | ||
175 | pte_t *pte; | ||
176 | unsigned long end = ~0ul; | ||
177 | int i_min = pmd_index(start_gpa); | ||
178 | int i_max = pmd_index(end_gpa); | ||
179 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); | ||
180 | int i; | ||
181 | |||
182 | for (i = i_min; i <= i_max; ++i, start_gpa = 0) { | ||
183 | if (!pmd_present(pmd[i])) | ||
184 | continue; | ||
185 | |||
186 | pte = pte_offset_kernel(pmd + i, 0); | ||
187 | if (i == i_max) | ||
188 | end = end_gpa; | ||
189 | |||
190 | if (kvm_mips_flush_gpa_pte(pte, start_gpa, end)) { | ||
191 | pmd_clear(pmd + i); | ||
192 | pte_free_kernel(NULL, pte); | ||
193 | } else { | ||
194 | safe_to_remove = false; | ||
195 | } | ||
196 | } | ||
197 | return safe_to_remove; | ||
198 | } | ||
199 | |||
200 | static bool kvm_mips_flush_gpa_pud(pud_t *pud, unsigned long start_gpa, | ||
201 | unsigned long end_gpa) | ||
202 | { | ||
203 | pmd_t *pmd; | ||
204 | unsigned long end = ~0ul; | ||
205 | int i_min = pud_index(start_gpa); | ||
206 | int i_max = pud_index(end_gpa); | ||
207 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); | ||
208 | int i; | ||
209 | |||
210 | for (i = i_min; i <= i_max; ++i, start_gpa = 0) { | ||
211 | if (!pud_present(pud[i])) | ||
212 | continue; | ||
213 | |||
214 | pmd = pmd_offset(pud + i, 0); | ||
215 | if (i == i_max) | ||
216 | end = end_gpa; | ||
217 | |||
218 | if (kvm_mips_flush_gpa_pmd(pmd, start_gpa, end)) { | ||
219 | pud_clear(pud + i); | ||
220 | pmd_free(NULL, pmd); | ||
221 | } else { | ||
222 | safe_to_remove = false; | ||
223 | } | ||
224 | } | ||
225 | return safe_to_remove; | ||
226 | } | ||
227 | |||
228 | static bool kvm_mips_flush_gpa_pgd(pgd_t *pgd, unsigned long start_gpa, | ||
229 | unsigned long end_gpa) | ||
230 | { | ||
231 | p4d_t *p4d; | ||
232 | pud_t *pud; | ||
233 | unsigned long end = ~0ul; | ||
234 | int i_min = pgd_index(start_gpa); | ||
235 | int i_max = pgd_index(end_gpa); | ||
236 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); | ||
237 | int i; | ||
238 | |||
239 | for (i = i_min; i <= i_max; ++i, start_gpa = 0) { | ||
240 | if (!pgd_present(pgd[i])) | ||
241 | continue; | ||
242 | |||
243 | p4d = p4d_offset(pgd, 0); | ||
244 | pud = pud_offset(p4d + i, 0); | ||
245 | if (i == i_max) | ||
246 | end = end_gpa; | ||
247 | |||
248 | if (kvm_mips_flush_gpa_pud(pud, start_gpa, end)) { | ||
249 | pgd_clear(pgd + i); | ||
250 | pud_free(NULL, pud); | ||
251 | } else { | ||
252 | safe_to_remove = false; | ||
253 | } | ||
254 | } | ||
255 | return safe_to_remove; | ||
256 | } | ||
257 | |||
258 | /** | ||
259 | * kvm_mips_flush_gpa_pt() - Flush a range of guest physical addresses. | ||
260 | * @kvm: KVM pointer. | ||
261 | * @start_gfn: Guest frame number of first page in GPA range to flush. | ||
262 | * @end_gfn: Guest frame number of last page in GPA range to flush. | ||
263 | * | ||
264 | * Flushes a range of GPA mappings from the GPA page tables. | ||
265 | * | ||
266 | * The caller must hold the @kvm->mmu_lock spinlock. | ||
267 | * | ||
268 | * Returns: Whether its safe to remove the top level page directory because | ||
269 | * all lower levels have been removed. | ||
270 | */ | ||
271 | bool kvm_mips_flush_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) | ||
272 | { | ||
273 | return kvm_mips_flush_gpa_pgd(kvm->arch.gpa_mm.pgd, | ||
274 | start_gfn << PAGE_SHIFT, | ||
275 | end_gfn << PAGE_SHIFT); | ||
276 | } | ||
277 | |||
278 | #define BUILD_PTE_RANGE_OP(name, op) \ | ||
279 | static int kvm_mips_##name##_pte(pte_t *pte, unsigned long start, \ | ||
280 | unsigned long end) \ | ||
281 | { \ | ||
282 | int ret = 0; \ | ||
283 | int i_min = pte_index(start); \ | ||
284 | int i_max = pte_index(end); \ | ||
285 | int i; \ | ||
286 | pte_t old, new; \ | ||
287 | \ | ||
288 | for (i = i_min; i <= i_max; ++i) { \ | ||
289 | if (!pte_present(pte[i])) \ | ||
290 | continue; \ | ||
291 | \ | ||
292 | old = pte[i]; \ | ||
293 | new = op(old); \ | ||
294 | if (pte_val(new) == pte_val(old)) \ | ||
295 | continue; \ | ||
296 | set_pte(pte + i, new); \ | ||
297 | ret = 1; \ | ||
298 | } \ | ||
299 | return ret; \ | ||
300 | } \ | ||
301 | \ | ||
302 | /* returns true if anything was done */ \ | ||
303 | static int kvm_mips_##name##_pmd(pmd_t *pmd, unsigned long start, \ | ||
304 | unsigned long end) \ | ||
305 | { \ | ||
306 | int ret = 0; \ | ||
307 | pte_t *pte; \ | ||
308 | unsigned long cur_end = ~0ul; \ | ||
309 | int i_min = pmd_index(start); \ | ||
310 | int i_max = pmd_index(end); \ | ||
311 | int i; \ | ||
312 | \ | ||
313 | for (i = i_min; i <= i_max; ++i, start = 0) { \ | ||
314 | if (!pmd_present(pmd[i])) \ | ||
315 | continue; \ | ||
316 | \ | ||
317 | pte = pte_offset_kernel(pmd + i, 0); \ | ||
318 | if (i == i_max) \ | ||
319 | cur_end = end; \ | ||
320 | \ | ||
321 | ret |= kvm_mips_##name##_pte(pte, start, cur_end); \ | ||
322 | } \ | ||
323 | return ret; \ | ||
324 | } \ | ||
325 | \ | ||
326 | static int kvm_mips_##name##_pud(pud_t *pud, unsigned long start, \ | ||
327 | unsigned long end) \ | ||
328 | { \ | ||
329 | int ret = 0; \ | ||
330 | pmd_t *pmd; \ | ||
331 | unsigned long cur_end = ~0ul; \ | ||
332 | int i_min = pud_index(start); \ | ||
333 | int i_max = pud_index(end); \ | ||
334 | int i; \ | ||
335 | \ | ||
336 | for (i = i_min; i <= i_max; ++i, start = 0) { \ | ||
337 | if (!pud_present(pud[i])) \ | ||
338 | continue; \ | ||
339 | \ | ||
340 | pmd = pmd_offset(pud + i, 0); \ | ||
341 | if (i == i_max) \ | ||
342 | cur_end = end; \ | ||
343 | \ | ||
344 | ret |= kvm_mips_##name##_pmd(pmd, start, cur_end); \ | ||
345 | } \ | ||
346 | return ret; \ | ||
347 | } \ | ||
348 | \ | ||
349 | static int kvm_mips_##name##_pgd(pgd_t *pgd, unsigned long start, \ | ||
350 | unsigned long end) \ | ||
351 | { \ | ||
352 | int ret = 0; \ | ||
353 | p4d_t *p4d; \ | ||
354 | pud_t *pud; \ | ||
355 | unsigned long cur_end = ~0ul; \ | ||
356 | int i_min = pgd_index(start); \ | ||
357 | int i_max = pgd_index(end); \ | ||
358 | int i; \ | ||
359 | \ | ||
360 | for (i = i_min; i <= i_max; ++i, start = 0) { \ | ||
361 | if (!pgd_present(pgd[i])) \ | ||
362 | continue; \ | ||
363 | \ | ||
364 | p4d = p4d_offset(pgd, 0); \ | ||
365 | pud = pud_offset(p4d + i, 0); \ | ||
366 | if (i == i_max) \ | ||
367 | cur_end = end; \ | ||
368 | \ | ||
369 | ret |= kvm_mips_##name##_pud(pud, start, cur_end); \ | ||
370 | } \ | ||
371 | return ret; \ | ||
372 | } | ||
373 | |||
374 | /* | ||
375 | * kvm_mips_mkclean_gpa_pt. | ||
376 | * Mark a range of guest physical address space clean (writes fault) in the VM's | ||
377 | * GPA page table to allow dirty page tracking. | ||
378 | */ | ||
379 | |||
380 | BUILD_PTE_RANGE_OP(mkclean, pte_mkclean) | ||
381 | |||
382 | /** | ||
383 | * kvm_mips_mkclean_gpa_pt() - Make a range of guest physical addresses clean. | ||
384 | * @kvm: KVM pointer. | ||
385 | * @start_gfn: Guest frame number of first page in GPA range to flush. | ||
386 | * @end_gfn: Guest frame number of last page in GPA range to flush. | ||
387 | * | ||
388 | * Make a range of GPA mappings clean so that guest writes will fault and | ||
389 | * trigger dirty page logging. | ||
390 | * | ||
391 | * The caller must hold the @kvm->mmu_lock spinlock. | ||
392 | * | ||
393 | * Returns: Whether any GPA mappings were modified, which would require | ||
394 | * derived mappings (GVA page tables & TLB enties) to be | ||
395 | * invalidated. | ||
396 | */ | ||
397 | int kvm_mips_mkclean_gpa_pt(struct kvm *kvm, gfn_t start_gfn, gfn_t end_gfn) | ||
398 | { | ||
399 | return kvm_mips_mkclean_pgd(kvm->arch.gpa_mm.pgd, | ||
400 | start_gfn << PAGE_SHIFT, | ||
401 | end_gfn << PAGE_SHIFT); | ||
402 | } | ||
403 | |||
404 | /** | ||
405 | * kvm_arch_mmu_enable_log_dirty_pt_masked() - write protect dirty pages | ||
406 | * @kvm: The KVM pointer | ||
407 | * @slot: The memory slot associated with mask | ||
408 | * @gfn_offset: The gfn offset in memory slot | ||
409 | * @mask: The mask of dirty pages at offset 'gfn_offset' in this memory | ||
410 | * slot to be write protected | ||
411 | * | ||
412 | * Walks bits set in mask write protects the associated pte's. Caller must | ||
413 | * acquire @kvm->mmu_lock. | ||
414 | */ | ||
415 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, | ||
416 | struct kvm_memory_slot *slot, | ||
417 | gfn_t gfn_offset, unsigned long mask) | ||
418 | { | ||
419 | gfn_t base_gfn = slot->base_gfn + gfn_offset; | ||
420 | gfn_t start = base_gfn + __ffs(mask); | ||
421 | gfn_t end = base_gfn + __fls(mask); | ||
422 | |||
423 | kvm_mips_mkclean_gpa_pt(kvm, start, end); | ||
424 | } | ||
425 | |||
426 | /* | ||
427 | * kvm_mips_mkold_gpa_pt. | ||
428 | * Mark a range of guest physical address space old (all accesses fault) in the | ||
429 | * VM's GPA page table to allow detection of commonly used pages. | ||
430 | */ | ||
431 | |||
432 | BUILD_PTE_RANGE_OP(mkold, pte_mkold) | ||
433 | |||
434 | static int kvm_mips_mkold_gpa_pt(struct kvm *kvm, gfn_t start_gfn, | ||
435 | gfn_t end_gfn) | ||
436 | { | ||
437 | return kvm_mips_mkold_pgd(kvm->arch.gpa_mm.pgd, | ||
438 | start_gfn << PAGE_SHIFT, | ||
439 | end_gfn << PAGE_SHIFT); | ||
440 | } | ||
441 | |||
442 | static int handle_hva_to_gpa(struct kvm *kvm, | ||
443 | unsigned long start, | ||
444 | unsigned long end, | ||
445 | int (*handler)(struct kvm *kvm, gfn_t gfn, | ||
446 | gpa_t gfn_end, | ||
447 | struct kvm_memory_slot *memslot, | ||
448 | void *data), | ||
449 | void *data) | ||
450 | { | ||
451 | struct kvm_memslots *slots; | ||
452 | struct kvm_memory_slot *memslot; | ||
453 | int ret = 0; | ||
454 | |||
455 | slots = kvm_memslots(kvm); | ||
456 | |||
457 | /* we only care about the pages that the guest sees */ | ||
458 | kvm_for_each_memslot(memslot, slots) { | ||
459 | unsigned long hva_start, hva_end; | ||
460 | gfn_t gfn, gfn_end; | ||
461 | |||
462 | hva_start = max(start, memslot->userspace_addr); | ||
463 | hva_end = min(end, memslot->userspace_addr + | ||
464 | (memslot->npages << PAGE_SHIFT)); | ||
465 | if (hva_start >= hva_end) | ||
466 | continue; | ||
467 | |||
468 | /* | ||
469 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | ||
470 | * {gfn_start, gfn_start+1, ..., gfn_end-1}. | ||
471 | */ | ||
472 | gfn = hva_to_gfn_memslot(hva_start, memslot); | ||
473 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | ||
474 | |||
475 | ret |= handler(kvm, gfn, gfn_end, memslot, data); | ||
476 | } | ||
477 | |||
478 | return ret; | ||
479 | } | ||
480 | |||
481 | |||
482 | static int kvm_unmap_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
483 | struct kvm_memory_slot *memslot, void *data) | ||
484 | { | ||
485 | kvm_mips_flush_gpa_pt(kvm, gfn, gfn_end); | ||
486 | return 1; | ||
487 | } | ||
488 | |||
489 | int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end, | ||
490 | unsigned flags) | ||
491 | { | ||
492 | handle_hva_to_gpa(kvm, start, end, &kvm_unmap_hva_handler, NULL); | ||
493 | |||
494 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
495 | return 0; | ||
496 | } | ||
497 | |||
498 | static int kvm_set_spte_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
499 | struct kvm_memory_slot *memslot, void *data) | ||
500 | { | ||
501 | gpa_t gpa = gfn << PAGE_SHIFT; | ||
502 | pte_t hva_pte = *(pte_t *)data; | ||
503 | pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); | ||
504 | pte_t old_pte; | ||
505 | |||
506 | if (!gpa_pte) | ||
507 | return 0; | ||
508 | |||
509 | /* Mapping may need adjusting depending on memslot flags */ | ||
510 | old_pte = *gpa_pte; | ||
511 | if (memslot->flags & KVM_MEM_LOG_DIRTY_PAGES && !pte_dirty(old_pte)) | ||
512 | hva_pte = pte_mkclean(hva_pte); | ||
513 | else if (memslot->flags & KVM_MEM_READONLY) | ||
514 | hva_pte = pte_wrprotect(hva_pte); | ||
515 | |||
516 | set_pte(gpa_pte, hva_pte); | ||
517 | |||
518 | /* Replacing an absent or old page doesn't need flushes */ | ||
519 | if (!pte_present(old_pte) || !pte_young(old_pte)) | ||
520 | return 0; | ||
521 | |||
522 | /* Pages swapped, aged, moved, or cleaned require flushes */ | ||
523 | return !pte_present(hva_pte) || | ||
524 | !pte_young(hva_pte) || | ||
525 | pte_pfn(old_pte) != pte_pfn(hva_pte) || | ||
526 | (pte_dirty(old_pte) && !pte_dirty(hva_pte)); | ||
527 | } | ||
528 | |||
529 | int kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte) | ||
530 | { | ||
531 | unsigned long end = hva + PAGE_SIZE; | ||
532 | int ret; | ||
533 | |||
534 | ret = handle_hva_to_gpa(kvm, hva, end, &kvm_set_spte_handler, &pte); | ||
535 | if (ret) | ||
536 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
537 | return 0; | ||
538 | } | ||
539 | |||
540 | static int kvm_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
541 | struct kvm_memory_slot *memslot, void *data) | ||
542 | { | ||
543 | return kvm_mips_mkold_gpa_pt(kvm, gfn, gfn_end); | ||
544 | } | ||
545 | |||
546 | static int kvm_test_age_hva_handler(struct kvm *kvm, gfn_t gfn, gfn_t gfn_end, | ||
547 | struct kvm_memory_slot *memslot, void *data) | ||
548 | { | ||
549 | gpa_t gpa = gfn << PAGE_SHIFT; | ||
550 | pte_t *gpa_pte = kvm_mips_pte_for_gpa(kvm, NULL, gpa); | ||
551 | |||
552 | if (!gpa_pte) | ||
553 | return 0; | ||
554 | return pte_young(*gpa_pte); | ||
555 | } | ||
556 | |||
557 | int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end) | ||
558 | { | ||
559 | return handle_hva_to_gpa(kvm, start, end, kvm_age_hva_handler, NULL); | ||
560 | } | ||
561 | |||
562 | int kvm_test_age_hva(struct kvm *kvm, unsigned long hva) | ||
563 | { | ||
564 | return handle_hva_to_gpa(kvm, hva, hva, kvm_test_age_hva_handler, NULL); | ||
565 | } | ||
566 | |||
567 | /** | ||
568 | * _kvm_mips_map_page_fast() - Fast path GPA fault handler. | ||
569 | * @vcpu: VCPU pointer. | ||
570 | * @gpa: Guest physical address of fault. | ||
571 | * @write_fault: Whether the fault was due to a write. | ||
572 | * @out_entry: New PTE for @gpa (written on success unless NULL). | ||
573 | * @out_buddy: New PTE for @gpa's buddy (written on success unless | ||
574 | * NULL). | ||
575 | * | ||
576 | * Perform fast path GPA fault handling, doing all that can be done without | ||
577 | * calling into KVM. This handles marking old pages young (for idle page | ||
578 | * tracking), and dirtying of clean pages (for dirty page logging). | ||
579 | * | ||
580 | * Returns: 0 on success, in which case we can update derived mappings and | ||
581 | * resume guest execution. | ||
582 | * -EFAULT on failure due to absent GPA mapping or write to | ||
583 | * read-only page, in which case KVM must be consulted. | ||
584 | */ | ||
585 | static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, | ||
586 | bool write_fault, | ||
587 | pte_t *out_entry, pte_t *out_buddy) | ||
588 | { | ||
589 | struct kvm *kvm = vcpu->kvm; | ||
590 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
591 | pte_t *ptep; | ||
592 | kvm_pfn_t pfn = 0; /* silence bogus GCC warning */ | ||
593 | bool pfn_valid = false; | ||
594 | int ret = 0; | ||
595 | |||
596 | spin_lock(&kvm->mmu_lock); | ||
597 | |||
598 | /* Fast path - just check GPA page table for an existing entry */ | ||
599 | ptep = kvm_mips_pte_for_gpa(kvm, NULL, gpa); | ||
600 | if (!ptep || !pte_present(*ptep)) { | ||
601 | ret = -EFAULT; | ||
602 | goto out; | ||
603 | } | ||
604 | |||
605 | /* Track access to pages marked old */ | ||
606 | if (!pte_young(*ptep)) { | ||
607 | set_pte(ptep, pte_mkyoung(*ptep)); | ||
608 | pfn = pte_pfn(*ptep); | ||
609 | pfn_valid = true; | ||
610 | /* call kvm_set_pfn_accessed() after unlock */ | ||
611 | } | ||
612 | if (write_fault && !pte_dirty(*ptep)) { | ||
613 | if (!pte_write(*ptep)) { | ||
614 | ret = -EFAULT; | ||
615 | goto out; | ||
616 | } | ||
617 | |||
618 | /* Track dirtying of writeable pages */ | ||
619 | set_pte(ptep, pte_mkdirty(*ptep)); | ||
620 | pfn = pte_pfn(*ptep); | ||
621 | mark_page_dirty(kvm, gfn); | ||
622 | kvm_set_pfn_dirty(pfn); | ||
623 | } | ||
624 | |||
625 | if (out_entry) | ||
626 | *out_entry = *ptep; | ||
627 | if (out_buddy) | ||
628 | *out_buddy = *ptep_buddy(ptep); | ||
629 | |||
630 | out: | ||
631 | spin_unlock(&kvm->mmu_lock); | ||
632 | if (pfn_valid) | ||
633 | kvm_set_pfn_accessed(pfn); | ||
634 | return ret; | ||
635 | } | ||
636 | |||
637 | /** | ||
638 | * kvm_mips_map_page() - Map a guest physical page. | ||
639 | * @vcpu: VCPU pointer. | ||
640 | * @gpa: Guest physical address of fault. | ||
641 | * @write_fault: Whether the fault was due to a write. | ||
642 | * @out_entry: New PTE for @gpa (written on success unless NULL). | ||
643 | * @out_buddy: New PTE for @gpa's buddy (written on success unless | ||
644 | * NULL). | ||
645 | * | ||
646 | * Handle GPA faults by creating a new GPA mapping (or updating an existing | ||
647 | * one). | ||
648 | * | ||
649 | * This takes care of marking pages young or dirty (idle/dirty page tracking), | ||
650 | * asking KVM for the corresponding PFN, and creating a mapping in the GPA page | ||
651 | * tables. Derived mappings (GVA page tables and TLBs) must be handled by the | ||
652 | * caller. | ||
653 | * | ||
654 | * Returns: 0 on success, in which case the caller may use the @out_entry | ||
655 | * and @out_buddy PTEs to update derived mappings and resume guest | ||
656 | * execution. | ||
657 | * -EFAULT if there is no memory region at @gpa or a write was | ||
658 | * attempted to a read-only memory region. This is usually handled | ||
659 | * as an MMIO access. | ||
660 | */ | ||
661 | static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, | ||
662 | bool write_fault, | ||
663 | pte_t *out_entry, pte_t *out_buddy) | ||
664 | { | ||
665 | struct kvm *kvm = vcpu->kvm; | ||
666 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | ||
667 | gfn_t gfn = gpa >> PAGE_SHIFT; | ||
668 | int srcu_idx, err; | ||
669 | kvm_pfn_t pfn; | ||
670 | pte_t *ptep, entry, old_pte; | ||
671 | bool writeable; | ||
672 | unsigned long prot_bits; | ||
673 | unsigned long mmu_seq; | ||
674 | |||
675 | /* Try the fast path to handle old / clean pages */ | ||
676 | srcu_idx = srcu_read_lock(&kvm->srcu); | ||
677 | err = _kvm_mips_map_page_fast(vcpu, gpa, write_fault, out_entry, | ||
678 | out_buddy); | ||
679 | if (!err) | ||
680 | goto out; | ||
681 | |||
682 | /* We need a minimum of cached pages ready for page table creation */ | ||
683 | err = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); | ||
684 | if (err) | ||
685 | goto out; | ||
686 | |||
687 | retry: | ||
688 | /* | ||
689 | * Used to check for invalidations in progress, of the pfn that is | ||
690 | * returned by pfn_to_pfn_prot below. | ||
691 | */ | ||
692 | mmu_seq = kvm->mmu_notifier_seq; | ||
693 | /* | ||
694 | * Ensure the read of mmu_notifier_seq isn't reordered with PTE reads in | ||
695 | * gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't | ||
696 | * risk the page we get a reference to getting unmapped before we have a | ||
697 | * chance to grab the mmu_lock without mmu_notifier_retry() noticing. | ||
698 | * | ||
699 | * This smp_rmb() pairs with the effective smp_wmb() of the combination | ||
700 | * of the pte_unmap_unlock() after the PTE is zapped, and the | ||
701 | * spin_lock() in kvm_mmu_notifier_invalidate_<page|range_end>() before | ||
702 | * mmu_notifier_seq is incremented. | ||
703 | */ | ||
704 | smp_rmb(); | ||
705 | |||
706 | /* Slow path - ask KVM core whether we can access this GPA */ | ||
707 | pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable); | ||
708 | if (is_error_noslot_pfn(pfn)) { | ||
709 | err = -EFAULT; | ||
710 | goto out; | ||
711 | } | ||
712 | |||
713 | spin_lock(&kvm->mmu_lock); | ||
714 | /* Check if an invalidation has taken place since we got pfn */ | ||
715 | if (mmu_notifier_retry(kvm, mmu_seq)) { | ||
716 | /* | ||
717 | * This can happen when mappings are changed asynchronously, but | ||
718 | * also synchronously if a COW is triggered by | ||
719 | * gfn_to_pfn_prot(). | ||
720 | */ | ||
721 | spin_unlock(&kvm->mmu_lock); | ||
722 | kvm_release_pfn_clean(pfn); | ||
723 | goto retry; | ||
724 | } | ||
725 | |||
726 | /* Ensure page tables are allocated */ | ||
727 | ptep = kvm_mips_pte_for_gpa(kvm, memcache, gpa); | ||
728 | |||
729 | /* Set up the PTE */ | ||
730 | prot_bits = _PAGE_PRESENT | __READABLE | _page_cachable_default; | ||
731 | if (writeable) { | ||
732 | prot_bits |= _PAGE_WRITE; | ||
733 | if (write_fault) { | ||
734 | prot_bits |= __WRITEABLE; | ||
735 | mark_page_dirty(kvm, gfn); | ||
736 | kvm_set_pfn_dirty(pfn); | ||
737 | } | ||
738 | } | ||
739 | entry = pfn_pte(pfn, __pgprot(prot_bits)); | ||
740 | |||
741 | /* Write the PTE */ | ||
742 | old_pte = *ptep; | ||
743 | set_pte(ptep, entry); | ||
744 | |||
745 | err = 0; | ||
746 | if (out_entry) | ||
747 | *out_entry = *ptep; | ||
748 | if (out_buddy) | ||
749 | *out_buddy = *ptep_buddy(ptep); | ||
750 | |||
751 | spin_unlock(&kvm->mmu_lock); | ||
752 | kvm_release_pfn_clean(pfn); | ||
753 | kvm_set_pfn_accessed(pfn); | ||
754 | out: | ||
755 | srcu_read_unlock(&kvm->srcu, srcu_idx); | ||
756 | return err; | ||
757 | } | ||
758 | |||
759 | static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu, | ||
760 | unsigned long addr) | ||
761 | { | ||
762 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; | ||
763 | pgd_t *pgdp; | ||
764 | int ret; | ||
765 | |||
766 | /* We need a minimum of cached pages ready for page table creation */ | ||
767 | ret = kvm_mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES); | ||
768 | if (ret) | ||
769 | return NULL; | ||
770 | |||
771 | if (KVM_GUEST_KERNEL_MODE(vcpu)) | ||
772 | pgdp = vcpu->arch.guest_kernel_mm.pgd; | ||
773 | else | ||
774 | pgdp = vcpu->arch.guest_user_mm.pgd; | ||
775 | |||
776 | return kvm_mips_walk_pgd(pgdp, memcache, addr); | ||
777 | } | ||
778 | |||
779 | void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, | ||
780 | bool user) | ||
781 | { | ||
782 | pgd_t *pgdp; | ||
783 | pte_t *ptep; | ||
784 | |||
785 | addr &= PAGE_MASK << 1; | ||
786 | |||
787 | pgdp = vcpu->arch.guest_kernel_mm.pgd; | ||
788 | ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); | ||
789 | if (ptep) { | ||
790 | ptep[0] = pfn_pte(0, __pgprot(0)); | ||
791 | ptep[1] = pfn_pte(0, __pgprot(0)); | ||
792 | } | ||
793 | |||
794 | if (user) { | ||
795 | pgdp = vcpu->arch.guest_user_mm.pgd; | ||
796 | ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); | ||
797 | if (ptep) { | ||
798 | ptep[0] = pfn_pte(0, __pgprot(0)); | ||
799 | ptep[1] = pfn_pte(0, __pgprot(0)); | ||
800 | } | ||
801 | } | ||
802 | } | ||
803 | |||
804 | /* | ||
805 | * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. | ||
806 | * Flush a range of guest physical address space from the VM's GPA page tables. | ||
807 | */ | ||
808 | |||
809 | static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva, | ||
810 | unsigned long end_gva) | ||
811 | { | ||
812 | int i_min = pte_index(start_gva); | ||
813 | int i_max = pte_index(end_gva); | ||
814 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); | ||
815 | int i; | ||
816 | |||
817 | /* | ||
818 | * There's no freeing to do, so there's no point clearing individual | ||
819 | * entries unless only part of the last level page table needs flushing. | ||
820 | */ | ||
821 | if (safe_to_remove) | ||
822 | return true; | ||
823 | |||
824 | for (i = i_min; i <= i_max; ++i) { | ||
825 | if (!pte_present(pte[i])) | ||
826 | continue; | ||
827 | |||
828 | set_pte(pte + i, __pte(0)); | ||
829 | } | ||
830 | return false; | ||
831 | } | ||
832 | |||
833 | static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva, | ||
834 | unsigned long end_gva) | ||
835 | { | ||
836 | pte_t *pte; | ||
837 | unsigned long end = ~0ul; | ||
838 | int i_min = pmd_index(start_gva); | ||
839 | int i_max = pmd_index(end_gva); | ||
840 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); | ||
841 | int i; | ||
842 | |||
843 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { | ||
844 | if (!pmd_present(pmd[i])) | ||
845 | continue; | ||
846 | |||
847 | pte = pte_offset_kernel(pmd + i, 0); | ||
848 | if (i == i_max) | ||
849 | end = end_gva; | ||
850 | |||
851 | if (kvm_mips_flush_gva_pte(pte, start_gva, end)) { | ||
852 | pmd_clear(pmd + i); | ||
853 | pte_free_kernel(NULL, pte); | ||
854 | } else { | ||
855 | safe_to_remove = false; | ||
856 | } | ||
857 | } | ||
858 | return safe_to_remove; | ||
859 | } | ||
860 | |||
861 | static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva, | ||
862 | unsigned long end_gva) | ||
863 | { | ||
864 | pmd_t *pmd; | ||
865 | unsigned long end = ~0ul; | ||
866 | int i_min = pud_index(start_gva); | ||
867 | int i_max = pud_index(end_gva); | ||
868 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); | ||
869 | int i; | ||
870 | |||
871 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { | ||
872 | if (!pud_present(pud[i])) | ||
873 | continue; | ||
874 | |||
875 | pmd = pmd_offset(pud + i, 0); | ||
876 | if (i == i_max) | ||
877 | end = end_gva; | ||
878 | |||
879 | if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) { | ||
880 | pud_clear(pud + i); | ||
881 | pmd_free(NULL, pmd); | ||
882 | } else { | ||
883 | safe_to_remove = false; | ||
884 | } | ||
885 | } | ||
886 | return safe_to_remove; | ||
887 | } | ||
888 | |||
889 | static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva, | ||
890 | unsigned long end_gva) | ||
891 | { | ||
892 | p4d_t *p4d; | ||
893 | pud_t *pud; | ||
894 | unsigned long end = ~0ul; | ||
895 | int i_min = pgd_index(start_gva); | ||
896 | int i_max = pgd_index(end_gva); | ||
897 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); | ||
898 | int i; | ||
899 | |||
900 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { | ||
901 | if (!pgd_present(pgd[i])) | ||
902 | continue; | ||
903 | |||
904 | p4d = p4d_offset(pgd, 0); | ||
905 | pud = pud_offset(p4d + i, 0); | ||
906 | if (i == i_max) | ||
907 | end = end_gva; | ||
908 | |||
909 | if (kvm_mips_flush_gva_pud(pud, start_gva, end)) { | ||
910 | pgd_clear(pgd + i); | ||
911 | pud_free(NULL, pud); | ||
912 | } else { | ||
913 | safe_to_remove = false; | ||
914 | } | ||
915 | } | ||
916 | return safe_to_remove; | ||
917 | } | ||
918 | |||
919 | void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags) | ||
920 | { | ||
921 | if (flags & KMF_GPA) { | ||
922 | /* all of guest virtual address space could be affected */ | ||
923 | if (flags & KMF_KERN) | ||
924 | /* useg, kseg0, seg2/3 */ | ||
925 | kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff); | ||
926 | else | ||
927 | /* useg */ | ||
928 | kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); | ||
929 | } else { | ||
930 | /* useg */ | ||
931 | kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); | ||
932 | |||
933 | /* kseg2/3 */ | ||
934 | if (flags & KMF_KERN) | ||
935 | kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff); | ||
936 | } | ||
937 | } | ||
938 | |||
939 | static pte_t kvm_mips_gpa_pte_to_gva_unmapped(pte_t pte) | ||
940 | { | ||
941 | /* | ||
942 | * Don't leak writeable but clean entries from GPA page tables. We don't | ||
943 | * want the normal Linux tlbmod handler to handle dirtying when KVM | ||
944 | * accesses guest memory. | ||
945 | */ | ||
946 | if (!pte_dirty(pte)) | ||
947 | pte = pte_wrprotect(pte); | ||
948 | |||
949 | return pte; | ||
950 | } | ||
951 | |||
952 | static pte_t kvm_mips_gpa_pte_to_gva_mapped(pte_t pte, long entrylo) | ||
953 | { | ||
954 | /* Guest EntryLo overrides host EntryLo */ | ||
955 | if (!(entrylo & ENTRYLO_D)) | ||
956 | pte = pte_mkclean(pte); | ||
957 | |||
958 | return kvm_mips_gpa_pte_to_gva_unmapped(pte); | ||
959 | } | ||
960 | |||
961 | #ifdef CONFIG_KVM_MIPS_VZ | ||
962 | int kvm_mips_handle_vz_root_tlb_fault(unsigned long badvaddr, | ||
963 | struct kvm_vcpu *vcpu, | ||
964 | bool write_fault) | ||
965 | { | ||
966 | int ret; | ||
967 | |||
968 | ret = kvm_mips_map_page(vcpu, badvaddr, write_fault, NULL, NULL); | ||
969 | if (ret) | ||
970 | return ret; | ||
971 | |||
972 | /* Invalidate this entry in the TLB */ | ||
973 | return kvm_vz_host_tlb_inv(vcpu, badvaddr); | ||
974 | } | ||
975 | #endif | ||
976 | |||
977 | /* XXXKYMA: Must be called with interrupts disabled */ | ||
978 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, | ||
979 | struct kvm_vcpu *vcpu, | ||
980 | bool write_fault) | ||
981 | { | ||
982 | unsigned long gpa; | ||
983 | pte_t pte_gpa[2], *ptep_gva; | ||
984 | int idx; | ||
985 | |||
986 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { | ||
987 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); | ||
988 | kvm_mips_dump_host_tlbs(); | ||
989 | return -1; | ||
990 | } | ||
991 | |||
992 | /* Get the GPA page table entry */ | ||
993 | gpa = KVM_GUEST_CPHYSADDR(badvaddr); | ||
994 | idx = (badvaddr >> PAGE_SHIFT) & 1; | ||
995 | if (kvm_mips_map_page(vcpu, gpa, write_fault, &pte_gpa[idx], | ||
996 | &pte_gpa[!idx]) < 0) | ||
997 | return -1; | ||
998 | |||
999 | /* Get the GVA page table entry */ | ||
1000 | ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, badvaddr & ~PAGE_SIZE); | ||
1001 | if (!ptep_gva) { | ||
1002 | kvm_err("No ptep for gva %lx\n", badvaddr); | ||
1003 | return -1; | ||
1004 | } | ||
1005 | |||
1006 | /* Copy a pair of entries from GPA page table to GVA page table */ | ||
1007 | ptep_gva[0] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[0]); | ||
1008 | ptep_gva[1] = kvm_mips_gpa_pte_to_gva_unmapped(pte_gpa[1]); | ||
1009 | |||
1010 | /* Invalidate this entry in the TLB, guest kernel ASID only */ | ||
1011 | kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); | ||
1012 | return 0; | ||
1013 | } | ||
1014 | |||
1015 | int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, | ||
1016 | struct kvm_mips_tlb *tlb, | ||
1017 | unsigned long gva, | ||
1018 | bool write_fault) | ||
1019 | { | ||
1020 | struct kvm *kvm = vcpu->kvm; | ||
1021 | long tlb_lo[2]; | ||
1022 | pte_t pte_gpa[2], *ptep_buddy, *ptep_gva; | ||
1023 | unsigned int idx = TLB_LO_IDX(*tlb, gva); | ||
1024 | bool kernel = KVM_GUEST_KERNEL_MODE(vcpu); | ||
1025 | |||
1026 | tlb_lo[0] = tlb->tlb_lo[0]; | ||
1027 | tlb_lo[1] = tlb->tlb_lo[1]; | ||
1028 | |||
1029 | /* | ||
1030 | * The commpage address must not be mapped to anything else if the guest | ||
1031 | * TLB contains entries nearby, or commpage accesses will break. | ||
1032 | */ | ||
1033 | if (!((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & (PAGE_MASK << 1))) | ||
1034 | tlb_lo[TLB_LO_IDX(*tlb, KVM_GUEST_COMMPAGE_ADDR)] = 0; | ||
1035 | |||
1036 | /* Get the GPA page table entry */ | ||
1037 | if (kvm_mips_map_page(vcpu, mips3_tlbpfn_to_paddr(tlb_lo[idx]), | ||
1038 | write_fault, &pte_gpa[idx], NULL) < 0) | ||
1039 | return -1; | ||
1040 | |||
1041 | /* And its GVA buddy's GPA page table entry if it also exists */ | ||
1042 | pte_gpa[!idx] = pfn_pte(0, __pgprot(0)); | ||
1043 | if (tlb_lo[!idx] & ENTRYLO_V) { | ||
1044 | spin_lock(&kvm->mmu_lock); | ||
1045 | ptep_buddy = kvm_mips_pte_for_gpa(kvm, NULL, | ||
1046 | mips3_tlbpfn_to_paddr(tlb_lo[!idx])); | ||
1047 | if (ptep_buddy) | ||
1048 | pte_gpa[!idx] = *ptep_buddy; | ||
1049 | spin_unlock(&kvm->mmu_lock); | ||
1050 | } | ||
1051 | |||
1052 | /* Get the GVA page table entry pair */ | ||
1053 | ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva & ~PAGE_SIZE); | ||
1054 | if (!ptep_gva) { | ||
1055 | kvm_err("No ptep for gva %lx\n", gva); | ||
1056 | return -1; | ||
1057 | } | ||
1058 | |||
1059 | /* Copy a pair of entries from GPA page table to GVA page table */ | ||
1060 | ptep_gva[0] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[0], tlb_lo[0]); | ||
1061 | ptep_gva[1] = kvm_mips_gpa_pte_to_gva_mapped(pte_gpa[1], tlb_lo[1]); | ||
1062 | |||
1063 | /* Invalidate this entry in the TLB, current guest mode ASID only */ | ||
1064 | kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel); | ||
1065 | |||
1066 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, | ||
1067 | tlb->tlb_lo[0], tlb->tlb_lo[1]); | ||
1068 | |||
1069 | return 0; | ||
1070 | } | ||
1071 | |||
1072 | int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr, | ||
1073 | struct kvm_vcpu *vcpu) | ||
1074 | { | ||
1075 | kvm_pfn_t pfn; | ||
1076 | pte_t *ptep; | ||
1077 | |||
1078 | ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr); | ||
1079 | if (!ptep) { | ||
1080 | kvm_err("No ptep for commpage %lx\n", badvaddr); | ||
1081 | return -1; | ||
1082 | } | ||
1083 | |||
1084 | pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage)); | ||
1085 | /* Also set valid and dirty, so refill handler doesn't have to */ | ||
1086 | *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED))); | ||
1087 | |||
1088 | /* Invalidate this entry in the TLB, guest kernel ASID only */ | ||
1089 | kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true); | ||
1090 | return 0; | ||
1091 | } | ||
1092 | |||
1093 | /** | ||
1094 | * kvm_mips_migrate_count() - Migrate timer. | ||
1095 | * @vcpu: Virtual CPU. | ||
1096 | * | ||
1097 | * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it | ||
1098 | * if it was running prior to being cancelled. | ||
1099 | * | ||
1100 | * Must be called when the VCPU is migrated to a different CPU to ensure that | ||
1101 | * timer expiry during guest execution interrupts the guest and causes the | ||
1102 | * interrupt to be delivered in a timely manner. | ||
1103 | */ | ||
1104 | static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) | ||
1105 | { | ||
1106 | if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) | ||
1107 | hrtimer_restart(&vcpu->arch.comparecount_timer); | ||
1108 | } | ||
1109 | |||
1110 | /* Restore ASID once we are scheduled back after preemption */ | ||
1111 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1112 | { | ||
1113 | unsigned long flags; | ||
1114 | |||
1115 | kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); | ||
1116 | |||
1117 | local_irq_save(flags); | ||
1118 | |||
1119 | vcpu->cpu = cpu; | ||
1120 | if (vcpu->arch.last_sched_cpu != cpu) { | ||
1121 | kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", | ||
1122 | vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); | ||
1123 | /* | ||
1124 | * Migrate the timer interrupt to the current CPU so that it | ||
1125 | * always interrupts the guest and synchronously triggers a | ||
1126 | * guest timer interrupt. | ||
1127 | */ | ||
1128 | kvm_mips_migrate_count(vcpu); | ||
1129 | } | ||
1130 | |||
1131 | /* restore guest state to registers */ | ||
1132 | kvm_mips_callbacks->vcpu_load(vcpu, cpu); | ||
1133 | |||
1134 | local_irq_restore(flags); | ||
1135 | } | ||
1136 | |||
1137 | /* ASID can change if another task is scheduled during preemption */ | ||
1138 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | ||
1139 | { | ||
1140 | unsigned long flags; | ||
1141 | int cpu; | ||
1142 | |||
1143 | local_irq_save(flags); | ||
1144 | |||
1145 | cpu = smp_processor_id(); | ||
1146 | vcpu->arch.last_sched_cpu = cpu; | ||
1147 | vcpu->cpu = -1; | ||
1148 | |||
1149 | /* save guest state in registers */ | ||
1150 | kvm_mips_callbacks->vcpu_put(vcpu, cpu); | ||
1151 | |||
1152 | local_irq_restore(flags); | ||
1153 | } | ||
1154 | |||
1155 | /** | ||
1156 | * kvm_trap_emul_gva_fault() - Safely attempt to handle a GVA access fault. | ||
1157 | * @vcpu: Virtual CPU. | ||
1158 | * @gva: Guest virtual address to be accessed. | ||
1159 | * @write: True if write attempted (must be dirtied and made writable). | ||
1160 | * | ||
1161 | * Safely attempt to handle a GVA fault, mapping GVA pages if necessary, and | ||
1162 | * dirtying the page if @write so that guest instructions can be modified. | ||
1163 | * | ||
1164 | * Returns: KVM_MIPS_MAPPED on success. | ||
1165 | * KVM_MIPS_GVA if bad guest virtual address. | ||
1166 | * KVM_MIPS_GPA if bad guest physical address. | ||
1167 | * KVM_MIPS_TLB if guest TLB not present. | ||
1168 | * KVM_MIPS_TLBINV if guest TLB present but not valid. | ||
1169 | * KVM_MIPS_TLBMOD if guest TLB read only. | ||
1170 | */ | ||
1171 | enum kvm_mips_fault_result kvm_trap_emul_gva_fault(struct kvm_vcpu *vcpu, | ||
1172 | unsigned long gva, | ||
1173 | bool write) | ||
1174 | { | ||
1175 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1176 | struct kvm_mips_tlb *tlb; | ||
1177 | int index; | ||
1178 | |||
1179 | if (KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG0) { | ||
1180 | if (kvm_mips_handle_kseg0_tlb_fault(gva, vcpu, write) < 0) | ||
1181 | return KVM_MIPS_GPA; | ||
1182 | } else if ((KVM_GUEST_KSEGX(gva) < KVM_GUEST_KSEG0) || | ||
1183 | KVM_GUEST_KSEGX(gva) == KVM_GUEST_KSEG23) { | ||
1184 | /* Address should be in the guest TLB */ | ||
1185 | index = kvm_mips_guest_tlb_lookup(vcpu, (gva & VPN2_MASK) | | ||
1186 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID)); | ||
1187 | if (index < 0) | ||
1188 | return KVM_MIPS_TLB; | ||
1189 | tlb = &vcpu->arch.guest_tlb[index]; | ||
1190 | |||
1191 | /* Entry should be valid, and dirty for writes */ | ||
1192 | if (!TLB_IS_VALID(*tlb, gva)) | ||
1193 | return KVM_MIPS_TLBINV; | ||
1194 | if (write && !TLB_IS_DIRTY(*tlb, gva)) | ||
1195 | return KVM_MIPS_TLBMOD; | ||
1196 | |||
1197 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, gva, write)) | ||
1198 | return KVM_MIPS_GPA; | ||
1199 | } else { | ||
1200 | return KVM_MIPS_GVA; | ||
1201 | } | ||
1202 | |||
1203 | return KVM_MIPS_MAPPED; | ||
1204 | } | ||
1205 | |||
1206 | int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out) | ||
1207 | { | ||
1208 | int err; | ||
1209 | |||
1210 | if (WARN(IS_ENABLED(CONFIG_KVM_MIPS_VZ), | ||
1211 | "Expect BadInstr/BadInstrP registers to be used with VZ\n")) | ||
1212 | return -EINVAL; | ||
1213 | |||
1214 | retry: | ||
1215 | kvm_trap_emul_gva_lockless_begin(vcpu); | ||
1216 | err = get_user(*out, opc); | ||
1217 | kvm_trap_emul_gva_lockless_end(vcpu); | ||
1218 | |||
1219 | if (unlikely(err)) { | ||
1220 | /* | ||
1221 | * Try to handle the fault, maybe we just raced with a GVA | ||
1222 | * invalidation. | ||
1223 | */ | ||
1224 | err = kvm_trap_emul_gva_fault(vcpu, (unsigned long)opc, | ||
1225 | false); | ||
1226 | if (unlikely(err)) { | ||
1227 | kvm_err("%s: illegal address: %p\n", | ||
1228 | __func__, opc); | ||
1229 | return -EFAULT; | ||
1230 | } | ||
1231 | |||
1232 | /* Hopefully it'll work now */ | ||
1233 | goto retry; | ||
1234 | } | ||
1235 | return 0; | ||
1236 | } | ||
diff --git a/arch/mips/kvm/msa.S b/arch/mips/kvm/msa.S new file mode 100644 index 000000000..d02f0c6cc --- /dev/null +++ b/arch/mips/kvm/msa.S | |||
@@ -0,0 +1,161 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * MIPS SIMD Architecture (MSA) context handling code for KVM. | ||
7 | * | ||
8 | * Copyright (C) 2015 Imagination Technologies Ltd. | ||
9 | */ | ||
10 | |||
11 | #include <asm/asm.h> | ||
12 | #include <asm/asm-offsets.h> | ||
13 | #include <asm/asmmacro.h> | ||
14 | #include <asm/regdef.h> | ||
15 | |||
16 | .set noreorder | ||
17 | .set noat | ||
18 | |||
19 | LEAF(__kvm_save_msa) | ||
20 | st_d 0, VCPU_FPR0, a0 | ||
21 | st_d 1, VCPU_FPR1, a0 | ||
22 | st_d 2, VCPU_FPR2, a0 | ||
23 | st_d 3, VCPU_FPR3, a0 | ||
24 | st_d 4, VCPU_FPR4, a0 | ||
25 | st_d 5, VCPU_FPR5, a0 | ||
26 | st_d 6, VCPU_FPR6, a0 | ||
27 | st_d 7, VCPU_FPR7, a0 | ||
28 | st_d 8, VCPU_FPR8, a0 | ||
29 | st_d 9, VCPU_FPR9, a0 | ||
30 | st_d 10, VCPU_FPR10, a0 | ||
31 | st_d 11, VCPU_FPR11, a0 | ||
32 | st_d 12, VCPU_FPR12, a0 | ||
33 | st_d 13, VCPU_FPR13, a0 | ||
34 | st_d 14, VCPU_FPR14, a0 | ||
35 | st_d 15, VCPU_FPR15, a0 | ||
36 | st_d 16, VCPU_FPR16, a0 | ||
37 | st_d 17, VCPU_FPR17, a0 | ||
38 | st_d 18, VCPU_FPR18, a0 | ||
39 | st_d 19, VCPU_FPR19, a0 | ||
40 | st_d 20, VCPU_FPR20, a0 | ||
41 | st_d 21, VCPU_FPR21, a0 | ||
42 | st_d 22, VCPU_FPR22, a0 | ||
43 | st_d 23, VCPU_FPR23, a0 | ||
44 | st_d 24, VCPU_FPR24, a0 | ||
45 | st_d 25, VCPU_FPR25, a0 | ||
46 | st_d 26, VCPU_FPR26, a0 | ||
47 | st_d 27, VCPU_FPR27, a0 | ||
48 | st_d 28, VCPU_FPR28, a0 | ||
49 | st_d 29, VCPU_FPR29, a0 | ||
50 | st_d 30, VCPU_FPR30, a0 | ||
51 | st_d 31, VCPU_FPR31, a0 | ||
52 | jr ra | ||
53 | nop | ||
54 | END(__kvm_save_msa) | ||
55 | |||
56 | LEAF(__kvm_restore_msa) | ||
57 | ld_d 0, VCPU_FPR0, a0 | ||
58 | ld_d 1, VCPU_FPR1, a0 | ||
59 | ld_d 2, VCPU_FPR2, a0 | ||
60 | ld_d 3, VCPU_FPR3, a0 | ||
61 | ld_d 4, VCPU_FPR4, a0 | ||
62 | ld_d 5, VCPU_FPR5, a0 | ||
63 | ld_d 6, VCPU_FPR6, a0 | ||
64 | ld_d 7, VCPU_FPR7, a0 | ||
65 | ld_d 8, VCPU_FPR8, a0 | ||
66 | ld_d 9, VCPU_FPR9, a0 | ||
67 | ld_d 10, VCPU_FPR10, a0 | ||
68 | ld_d 11, VCPU_FPR11, a0 | ||
69 | ld_d 12, VCPU_FPR12, a0 | ||
70 | ld_d 13, VCPU_FPR13, a0 | ||
71 | ld_d 14, VCPU_FPR14, a0 | ||
72 | ld_d 15, VCPU_FPR15, a0 | ||
73 | ld_d 16, VCPU_FPR16, a0 | ||
74 | ld_d 17, VCPU_FPR17, a0 | ||
75 | ld_d 18, VCPU_FPR18, a0 | ||
76 | ld_d 19, VCPU_FPR19, a0 | ||
77 | ld_d 20, VCPU_FPR20, a0 | ||
78 | ld_d 21, VCPU_FPR21, a0 | ||
79 | ld_d 22, VCPU_FPR22, a0 | ||
80 | ld_d 23, VCPU_FPR23, a0 | ||
81 | ld_d 24, VCPU_FPR24, a0 | ||
82 | ld_d 25, VCPU_FPR25, a0 | ||
83 | ld_d 26, VCPU_FPR26, a0 | ||
84 | ld_d 27, VCPU_FPR27, a0 | ||
85 | ld_d 28, VCPU_FPR28, a0 | ||
86 | ld_d 29, VCPU_FPR29, a0 | ||
87 | ld_d 30, VCPU_FPR30, a0 | ||
88 | ld_d 31, VCPU_FPR31, a0 | ||
89 | jr ra | ||
90 | nop | ||
91 | END(__kvm_restore_msa) | ||
92 | |||
93 | .macro kvm_restore_msa_upper wr, off, base | ||
94 | .set push | ||
95 | .set noat | ||
96 | #ifdef CONFIG_64BIT | ||
97 | ld $1, \off(\base) | ||
98 | insert_d \wr, 1 | ||
99 | #elif defined(CONFIG_CPU_LITTLE_ENDIAN) | ||
100 | lw $1, \off(\base) | ||
101 | insert_w \wr, 2 | ||
102 | lw $1, (\off+4)(\base) | ||
103 | insert_w \wr, 3 | ||
104 | #else /* CONFIG_CPU_BIG_ENDIAN */ | ||
105 | lw $1, (\off+4)(\base) | ||
106 | insert_w \wr, 2 | ||
107 | lw $1, \off(\base) | ||
108 | insert_w \wr, 3 | ||
109 | #endif | ||
110 | .set pop | ||
111 | .endm | ||
112 | |||
113 | LEAF(__kvm_restore_msa_upper) | ||
114 | kvm_restore_msa_upper 0, VCPU_FPR0 +8, a0 | ||
115 | kvm_restore_msa_upper 1, VCPU_FPR1 +8, a0 | ||
116 | kvm_restore_msa_upper 2, VCPU_FPR2 +8, a0 | ||
117 | kvm_restore_msa_upper 3, VCPU_FPR3 +8, a0 | ||
118 | kvm_restore_msa_upper 4, VCPU_FPR4 +8, a0 | ||
119 | kvm_restore_msa_upper 5, VCPU_FPR5 +8, a0 | ||
120 | kvm_restore_msa_upper 6, VCPU_FPR6 +8, a0 | ||
121 | kvm_restore_msa_upper 7, VCPU_FPR7 +8, a0 | ||
122 | kvm_restore_msa_upper 8, VCPU_FPR8 +8, a0 | ||
123 | kvm_restore_msa_upper 9, VCPU_FPR9 +8, a0 | ||
124 | kvm_restore_msa_upper 10, VCPU_FPR10+8, a0 | ||
125 | kvm_restore_msa_upper 11, VCPU_FPR11+8, a0 | ||
126 | kvm_restore_msa_upper 12, VCPU_FPR12+8, a0 | ||
127 | kvm_restore_msa_upper 13, VCPU_FPR13+8, a0 | ||
128 | kvm_restore_msa_upper 14, VCPU_FPR14+8, a0 | ||
129 | kvm_restore_msa_upper 15, VCPU_FPR15+8, a0 | ||
130 | kvm_restore_msa_upper 16, VCPU_FPR16+8, a0 | ||
131 | kvm_restore_msa_upper 17, VCPU_FPR17+8, a0 | ||
132 | kvm_restore_msa_upper 18, VCPU_FPR18+8, a0 | ||
133 | kvm_restore_msa_upper 19, VCPU_FPR19+8, a0 | ||
134 | kvm_restore_msa_upper 20, VCPU_FPR20+8, a0 | ||
135 | kvm_restore_msa_upper 21, VCPU_FPR21+8, a0 | ||
136 | kvm_restore_msa_upper 22, VCPU_FPR22+8, a0 | ||
137 | kvm_restore_msa_upper 23, VCPU_FPR23+8, a0 | ||
138 | kvm_restore_msa_upper 24, VCPU_FPR24+8, a0 | ||
139 | kvm_restore_msa_upper 25, VCPU_FPR25+8, a0 | ||
140 | kvm_restore_msa_upper 26, VCPU_FPR26+8, a0 | ||
141 | kvm_restore_msa_upper 27, VCPU_FPR27+8, a0 | ||
142 | kvm_restore_msa_upper 28, VCPU_FPR28+8, a0 | ||
143 | kvm_restore_msa_upper 29, VCPU_FPR29+8, a0 | ||
144 | kvm_restore_msa_upper 30, VCPU_FPR30+8, a0 | ||
145 | kvm_restore_msa_upper 31, VCPU_FPR31+8, a0 | ||
146 | jr ra | ||
147 | nop | ||
148 | END(__kvm_restore_msa_upper) | ||
149 | |||
150 | LEAF(__kvm_restore_msacsr) | ||
151 | lw t0, VCPU_MSA_CSR(a0) | ||
152 | /* | ||
153 | * The ctcmsa must stay at this offset in __kvm_restore_msacsr. | ||
154 | * See kvm_mips_csr_die_notify() which handles t0 containing a value | ||
155 | * which triggers an MSA FP Exception, which must be stepped over and | ||
156 | * ignored since the set cause bits must remain there for the guest. | ||
157 | */ | ||
158 | _ctcmsa MSA_CSR, t0 | ||
159 | jr ra | ||
160 | nop | ||
161 | END(__kvm_restore_msacsr) | ||
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c new file mode 100644 index 000000000..53f851a61 --- /dev/null +++ b/arch/mips/kvm/stats.c | |||
@@ -0,0 +1,63 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: COP0 access histogram | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/kvm_host.h> | ||
13 | |||
14 | char *kvm_cop0_str[N_MIPS_COPROC_REGS] = { | ||
15 | "Index", | ||
16 | "Random", | ||
17 | "EntryLo0", | ||
18 | "EntryLo1", | ||
19 | "Context", | ||
20 | "PG Mask", | ||
21 | "Wired", | ||
22 | "HWREna", | ||
23 | "BadVAddr", | ||
24 | "Count", | ||
25 | "EntryHI", | ||
26 | "Compare", | ||
27 | "Status", | ||
28 | "Cause", | ||
29 | "EXC PC", | ||
30 | "PRID", | ||
31 | "Config", | ||
32 | "LLAddr", | ||
33 | "Watch Lo", | ||
34 | "Watch Hi", | ||
35 | "X Context", | ||
36 | "Reserved", | ||
37 | "Impl Dep", | ||
38 | "Debug", | ||
39 | "DEPC", | ||
40 | "PerfCnt", | ||
41 | "ErrCtl", | ||
42 | "CacheErr", | ||
43 | "TagLo", | ||
44 | "TagHi", | ||
45 | "ErrorEPC", | ||
46 | "DESAVE" | ||
47 | }; | ||
48 | |||
49 | void kvm_mips_dump_stats(struct kvm_vcpu *vcpu) | ||
50 | { | ||
51 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | ||
52 | int i, j; | ||
53 | |||
54 | kvm_info("\nKVM VCPU[%d] COP0 Access Profile:\n", vcpu->vcpu_id); | ||
55 | for (i = 0; i < N_MIPS_COPROC_REGS; i++) { | ||
56 | for (j = 0; j < N_MIPS_COPROC_SEL; j++) { | ||
57 | if (vcpu->arch.cop0->stat[i][j]) | ||
58 | kvm_info("%s[%d]: %lu\n", kvm_cop0_str[i], j, | ||
59 | vcpu->arch.cop0->stat[i][j]); | ||
60 | } | ||
61 | } | ||
62 | #endif | ||
63 | } | ||
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c new file mode 100644 index 000000000..1c1fbce3f --- /dev/null +++ b/arch/mips/kvm/tlb.c | |||
@@ -0,0 +1,700 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS TLB handling, this file is part of the Linux host kernel so that | ||
7 | * TLB handlers run from KSEG0 | ||
8 | * | ||
9 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
11 | */ | ||
12 | |||
13 | #include <linux/sched.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/export.h> | ||
18 | #include <linux/kvm_host.h> | ||
19 | #include <linux/srcu.h> | ||
20 | |||
21 | #include <asm/cpu.h> | ||
22 | #include <asm/bootinfo.h> | ||
23 | #include <asm/mipsregs.h> | ||
24 | #include <asm/mmu_context.h> | ||
25 | #include <asm/cacheflush.h> | ||
26 | #include <asm/tlb.h> | ||
27 | #include <asm/tlbdebug.h> | ||
28 | |||
29 | #undef CONFIG_MIPS_MT | ||
30 | #include <asm/r4kcache.h> | ||
31 | #define CONFIG_MIPS_MT | ||
32 | |||
33 | #define KVM_GUEST_PC_TLB 0 | ||
34 | #define KVM_GUEST_SP_TLB 1 | ||
35 | |||
36 | #ifdef CONFIG_KVM_MIPS_VZ | ||
37 | unsigned long GUESTID_MASK; | ||
38 | EXPORT_SYMBOL_GPL(GUESTID_MASK); | ||
39 | unsigned long GUESTID_FIRST_VERSION; | ||
40 | EXPORT_SYMBOL_GPL(GUESTID_FIRST_VERSION); | ||
41 | unsigned long GUESTID_VERSION_MASK; | ||
42 | EXPORT_SYMBOL_GPL(GUESTID_VERSION_MASK); | ||
43 | |||
44 | static u32 kvm_mips_get_root_asid(struct kvm_vcpu *vcpu) | ||
45 | { | ||
46 | struct mm_struct *gpa_mm = &vcpu->kvm->arch.gpa_mm; | ||
47 | |||
48 | if (cpu_has_guestid) | ||
49 | return 0; | ||
50 | else | ||
51 | return cpu_asid(smp_processor_id(), gpa_mm); | ||
52 | } | ||
53 | #endif | ||
54 | |||
55 | static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) | ||
56 | { | ||
57 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
58 | int cpu = smp_processor_id(); | ||
59 | |||
60 | return cpu_asid(cpu, kern_mm); | ||
61 | } | ||
62 | |||
63 | static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) | ||
64 | { | ||
65 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
66 | int cpu = smp_processor_id(); | ||
67 | |||
68 | return cpu_asid(cpu, user_mm); | ||
69 | } | ||
70 | |||
71 | /* Structure defining an tlb entry data set. */ | ||
72 | |||
73 | void kvm_mips_dump_host_tlbs(void) | ||
74 | { | ||
75 | unsigned long flags; | ||
76 | |||
77 | local_irq_save(flags); | ||
78 | |||
79 | kvm_info("HOST TLBs:\n"); | ||
80 | dump_tlb_regs(); | ||
81 | pr_info("\n"); | ||
82 | dump_tlb_all(); | ||
83 | |||
84 | local_irq_restore(flags); | ||
85 | } | ||
86 | EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs); | ||
87 | |||
88 | void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu) | ||
89 | { | ||
90 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
91 | struct kvm_mips_tlb tlb; | ||
92 | int i; | ||
93 | |||
94 | kvm_info("Guest TLBs:\n"); | ||
95 | kvm_info("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0)); | ||
96 | |||
97 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | ||
98 | tlb = vcpu->arch.guest_tlb[i]; | ||
99 | kvm_info("TLB%c%3d Hi 0x%08lx ", | ||
100 | (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V | ||
101 | ? ' ' : '*', | ||
102 | i, tlb.tlb_hi); | ||
103 | kvm_info("Lo0=0x%09llx %c%c attr %lx ", | ||
104 | (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]), | ||
105 | (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ', | ||
106 | (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ', | ||
107 | (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT); | ||
108 | kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n", | ||
109 | (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]), | ||
110 | (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ', | ||
111 | (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ', | ||
112 | (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT, | ||
113 | tlb.tlb_mask); | ||
114 | } | ||
115 | } | ||
116 | EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs); | ||
117 | |||
118 | int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi) | ||
119 | { | ||
120 | int i; | ||
121 | int index = -1; | ||
122 | struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb; | ||
123 | |||
124 | for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) { | ||
125 | if (TLB_HI_VPN2_HIT(tlb[i], entryhi) && | ||
126 | TLB_HI_ASID_HIT(tlb[i], entryhi)) { | ||
127 | index = i; | ||
128 | break; | ||
129 | } | ||
130 | } | ||
131 | |||
132 | kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n", | ||
133 | __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]); | ||
134 | |||
135 | return index; | ||
136 | } | ||
137 | EXPORT_SYMBOL_GPL(kvm_mips_guest_tlb_lookup); | ||
138 | |||
139 | static int _kvm_mips_host_tlb_inv(unsigned long entryhi) | ||
140 | { | ||
141 | int idx; | ||
142 | |||
143 | write_c0_entryhi(entryhi); | ||
144 | mtc0_tlbw_hazard(); | ||
145 | |||
146 | tlb_probe(); | ||
147 | tlb_probe_hazard(); | ||
148 | idx = read_c0_index(); | ||
149 | |||
150 | if (idx >= current_cpu_data.tlbsize) | ||
151 | BUG(); | ||
152 | |||
153 | if (idx >= 0) { | ||
154 | write_c0_entryhi(UNIQUE_ENTRYHI(idx)); | ||
155 | write_c0_entrylo0(0); | ||
156 | write_c0_entrylo1(0); | ||
157 | mtc0_tlbw_hazard(); | ||
158 | |||
159 | tlb_write_indexed(); | ||
160 | tlbw_use_hazard(); | ||
161 | } | ||
162 | |||
163 | return idx; | ||
164 | } | ||
165 | |||
166 | int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va, | ||
167 | bool user, bool kernel) | ||
168 | { | ||
169 | /* | ||
170 | * Initialize idx_user and idx_kernel to workaround bogus | ||
171 | * maybe-initialized warning when using GCC 6. | ||
172 | */ | ||
173 | int idx_user = 0, idx_kernel = 0; | ||
174 | unsigned long flags, old_entryhi; | ||
175 | |||
176 | local_irq_save(flags); | ||
177 | |||
178 | old_entryhi = read_c0_entryhi(); | ||
179 | |||
180 | if (user) | ||
181 | idx_user = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | | ||
182 | kvm_mips_get_user_asid(vcpu)); | ||
183 | if (kernel) | ||
184 | idx_kernel = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | | ||
185 | kvm_mips_get_kernel_asid(vcpu)); | ||
186 | |||
187 | write_c0_entryhi(old_entryhi); | ||
188 | mtc0_tlbw_hazard(); | ||
189 | |||
190 | local_irq_restore(flags); | ||
191 | |||
192 | /* | ||
193 | * We don't want to get reserved instruction exceptions for missing tlb | ||
194 | * entries. | ||
195 | */ | ||
196 | if (cpu_has_vtag_icache) | ||
197 | flush_icache_all(); | ||
198 | |||
199 | if (user && idx_user >= 0) | ||
200 | kvm_debug("%s: Invalidated guest user entryhi %#lx @ idx %d\n", | ||
201 | __func__, (va & VPN2_MASK) | | ||
202 | kvm_mips_get_user_asid(vcpu), idx_user); | ||
203 | if (kernel && idx_kernel >= 0) | ||
204 | kvm_debug("%s: Invalidated guest kernel entryhi %#lx @ idx %d\n", | ||
205 | __func__, (va & VPN2_MASK) | | ||
206 | kvm_mips_get_kernel_asid(vcpu), idx_kernel); | ||
207 | |||
208 | return 0; | ||
209 | } | ||
210 | EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_inv); | ||
211 | |||
212 | #ifdef CONFIG_KVM_MIPS_VZ | ||
213 | |||
214 | /* GuestID management */ | ||
215 | |||
216 | /** | ||
217 | * clear_root_gid() - Set GuestCtl1.RID for normal root operation. | ||
218 | */ | ||
219 | static inline void clear_root_gid(void) | ||
220 | { | ||
221 | if (cpu_has_guestid) { | ||
222 | clear_c0_guestctl1(MIPS_GCTL1_RID); | ||
223 | mtc0_tlbw_hazard(); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * set_root_gid_to_guest_gid() - Set GuestCtl1.RID to match GuestCtl1.ID. | ||
229 | * | ||
230 | * Sets the root GuestID to match the current guest GuestID, for TLB operation | ||
231 | * on the GPA->RPA mappings in the root TLB. | ||
232 | * | ||
233 | * The caller must be sure to disable HTW while the root GID is set, and | ||
234 | * possibly longer if TLB registers are modified. | ||
235 | */ | ||
236 | static inline void set_root_gid_to_guest_gid(void) | ||
237 | { | ||
238 | unsigned int guestctl1; | ||
239 | |||
240 | if (cpu_has_guestid) { | ||
241 | back_to_back_c0_hazard(); | ||
242 | guestctl1 = read_c0_guestctl1(); | ||
243 | guestctl1 = (guestctl1 & ~MIPS_GCTL1_RID) | | ||
244 | ((guestctl1 & MIPS_GCTL1_ID) >> MIPS_GCTL1_ID_SHIFT) | ||
245 | << MIPS_GCTL1_RID_SHIFT; | ||
246 | write_c0_guestctl1(guestctl1); | ||
247 | mtc0_tlbw_hazard(); | ||
248 | } | ||
249 | } | ||
250 | |||
251 | int kvm_vz_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va) | ||
252 | { | ||
253 | int idx; | ||
254 | unsigned long flags, old_entryhi; | ||
255 | |||
256 | local_irq_save(flags); | ||
257 | htw_stop(); | ||
258 | |||
259 | /* Set root GuestID for root probe and write of guest TLB entry */ | ||
260 | set_root_gid_to_guest_gid(); | ||
261 | |||
262 | old_entryhi = read_c0_entryhi(); | ||
263 | |||
264 | idx = _kvm_mips_host_tlb_inv((va & VPN2_MASK) | | ||
265 | kvm_mips_get_root_asid(vcpu)); | ||
266 | |||
267 | write_c0_entryhi(old_entryhi); | ||
268 | clear_root_gid(); | ||
269 | mtc0_tlbw_hazard(); | ||
270 | |||
271 | htw_start(); | ||
272 | local_irq_restore(flags); | ||
273 | |||
274 | /* | ||
275 | * We don't want to get reserved instruction exceptions for missing tlb | ||
276 | * entries. | ||
277 | */ | ||
278 | if (cpu_has_vtag_icache) | ||
279 | flush_icache_all(); | ||
280 | |||
281 | if (idx > 0) | ||
282 | kvm_debug("%s: Invalidated root entryhi %#lx @ idx %d\n", | ||
283 | __func__, (va & VPN2_MASK) | | ||
284 | kvm_mips_get_root_asid(vcpu), idx); | ||
285 | |||
286 | return 0; | ||
287 | } | ||
288 | EXPORT_SYMBOL_GPL(kvm_vz_host_tlb_inv); | ||
289 | |||
290 | /** | ||
291 | * kvm_vz_guest_tlb_lookup() - Lookup a guest VZ TLB mapping. | ||
292 | * @vcpu: KVM VCPU pointer. | ||
293 | * @gpa: Guest virtual address in a TLB mapped guest segment. | ||
294 | * @gpa: Ponter to output guest physical address it maps to. | ||
295 | * | ||
296 | * Converts a guest virtual address in a guest TLB mapped segment to a guest | ||
297 | * physical address, by probing the guest TLB. | ||
298 | * | ||
299 | * Returns: 0 if guest TLB mapping exists for @gva. *@gpa will have been | ||
300 | * written. | ||
301 | * -EFAULT if no guest TLB mapping exists for @gva. *@gpa may not | ||
302 | * have been written. | ||
303 | */ | ||
304 | int kvm_vz_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long gva, | ||
305 | unsigned long *gpa) | ||
306 | { | ||
307 | unsigned long o_entryhi, o_entrylo[2], o_pagemask; | ||
308 | unsigned int o_index; | ||
309 | unsigned long entrylo[2], pagemask, pagemaskbit, pa; | ||
310 | unsigned long flags; | ||
311 | int index; | ||
312 | |||
313 | /* Probe the guest TLB for a mapping */ | ||
314 | local_irq_save(flags); | ||
315 | /* Set root GuestID for root probe of guest TLB entry */ | ||
316 | htw_stop(); | ||
317 | set_root_gid_to_guest_gid(); | ||
318 | |||
319 | o_entryhi = read_gc0_entryhi(); | ||
320 | o_index = read_gc0_index(); | ||
321 | |||
322 | write_gc0_entryhi((o_entryhi & 0x3ff) | (gva & ~0xfffl)); | ||
323 | mtc0_tlbw_hazard(); | ||
324 | guest_tlb_probe(); | ||
325 | tlb_probe_hazard(); | ||
326 | |||
327 | index = read_gc0_index(); | ||
328 | if (index < 0) { | ||
329 | /* No match, fail */ | ||
330 | write_gc0_entryhi(o_entryhi); | ||
331 | write_gc0_index(o_index); | ||
332 | |||
333 | clear_root_gid(); | ||
334 | htw_start(); | ||
335 | local_irq_restore(flags); | ||
336 | return -EFAULT; | ||
337 | } | ||
338 | |||
339 | /* Match! read the TLB entry */ | ||
340 | o_entrylo[0] = read_gc0_entrylo0(); | ||
341 | o_entrylo[1] = read_gc0_entrylo1(); | ||
342 | o_pagemask = read_gc0_pagemask(); | ||
343 | |||
344 | mtc0_tlbr_hazard(); | ||
345 | guest_tlb_read(); | ||
346 | tlb_read_hazard(); | ||
347 | |||
348 | entrylo[0] = read_gc0_entrylo0(); | ||
349 | entrylo[1] = read_gc0_entrylo1(); | ||
350 | pagemask = ~read_gc0_pagemask() & ~0x1fffl; | ||
351 | |||
352 | write_gc0_entryhi(o_entryhi); | ||
353 | write_gc0_index(o_index); | ||
354 | write_gc0_entrylo0(o_entrylo[0]); | ||
355 | write_gc0_entrylo1(o_entrylo[1]); | ||
356 | write_gc0_pagemask(o_pagemask); | ||
357 | |||
358 | clear_root_gid(); | ||
359 | htw_start(); | ||
360 | local_irq_restore(flags); | ||
361 | |||
362 | /* Select one of the EntryLo values and interpret the GPA */ | ||
363 | pagemaskbit = (pagemask ^ (pagemask & (pagemask - 1))) >> 1; | ||
364 | pa = entrylo[!!(gva & pagemaskbit)]; | ||
365 | |||
366 | /* | ||
367 | * TLB entry may have become invalid since TLB probe if physical FTLB | ||
368 | * entries are shared between threads (e.g. I6400). | ||
369 | */ | ||
370 | if (!(pa & ENTRYLO_V)) | ||
371 | return -EFAULT; | ||
372 | |||
373 | /* | ||
374 | * Note, this doesn't take guest MIPS32 XPA into account, where PFN is | ||
375 | * split with XI/RI in the middle. | ||
376 | */ | ||
377 | pa = (pa << 6) & ~0xfffl; | ||
378 | pa |= gva & ~(pagemask | pagemaskbit); | ||
379 | |||
380 | *gpa = pa; | ||
381 | return 0; | ||
382 | } | ||
383 | EXPORT_SYMBOL_GPL(kvm_vz_guest_tlb_lookup); | ||
384 | |||
385 | /** | ||
386 | * kvm_vz_local_flush_roottlb_all_guests() - Flush all root TLB entries for | ||
387 | * guests. | ||
388 | * | ||
389 | * Invalidate all entries in root tlb which are GPA mappings. | ||
390 | */ | ||
391 | void kvm_vz_local_flush_roottlb_all_guests(void) | ||
392 | { | ||
393 | unsigned long flags; | ||
394 | unsigned long old_entryhi, old_pagemask, old_guestctl1; | ||
395 | int entry; | ||
396 | |||
397 | if (WARN_ON(!cpu_has_guestid)) | ||
398 | return; | ||
399 | |||
400 | local_irq_save(flags); | ||
401 | htw_stop(); | ||
402 | |||
403 | /* TLBR may clobber EntryHi.ASID, PageMask, and GuestCtl1.RID */ | ||
404 | old_entryhi = read_c0_entryhi(); | ||
405 | old_pagemask = read_c0_pagemask(); | ||
406 | old_guestctl1 = read_c0_guestctl1(); | ||
407 | |||
408 | /* | ||
409 | * Invalidate guest entries in root TLB while leaving root entries | ||
410 | * intact when possible. | ||
411 | */ | ||
412 | for (entry = 0; entry < current_cpu_data.tlbsize; entry++) { | ||
413 | write_c0_index(entry); | ||
414 | mtc0_tlbw_hazard(); | ||
415 | tlb_read(); | ||
416 | tlb_read_hazard(); | ||
417 | |||
418 | /* Don't invalidate non-guest (RVA) mappings in the root TLB */ | ||
419 | if (!(read_c0_guestctl1() & MIPS_GCTL1_RID)) | ||
420 | continue; | ||
421 | |||
422 | /* Make sure all entries differ. */ | ||
423 | write_c0_entryhi(UNIQUE_ENTRYHI(entry)); | ||
424 | write_c0_entrylo0(0); | ||
425 | write_c0_entrylo1(0); | ||
426 | write_c0_guestctl1(0); | ||
427 | mtc0_tlbw_hazard(); | ||
428 | tlb_write_indexed(); | ||
429 | } | ||
430 | |||
431 | write_c0_entryhi(old_entryhi); | ||
432 | write_c0_pagemask(old_pagemask); | ||
433 | write_c0_guestctl1(old_guestctl1); | ||
434 | tlbw_use_hazard(); | ||
435 | |||
436 | htw_start(); | ||
437 | local_irq_restore(flags); | ||
438 | } | ||
439 | EXPORT_SYMBOL_GPL(kvm_vz_local_flush_roottlb_all_guests); | ||
440 | |||
441 | /** | ||
442 | * kvm_vz_local_flush_guesttlb_all() - Flush all guest TLB entries. | ||
443 | * | ||
444 | * Invalidate all entries in guest tlb irrespective of guestid. | ||
445 | */ | ||
446 | void kvm_vz_local_flush_guesttlb_all(void) | ||
447 | { | ||
448 | unsigned long flags; | ||
449 | unsigned long old_index; | ||
450 | unsigned long old_entryhi; | ||
451 | unsigned long old_entrylo[2]; | ||
452 | unsigned long old_pagemask; | ||
453 | int entry; | ||
454 | u64 cvmmemctl2 = 0; | ||
455 | |||
456 | local_irq_save(flags); | ||
457 | |||
458 | /* Preserve all clobbered guest registers */ | ||
459 | old_index = read_gc0_index(); | ||
460 | old_entryhi = read_gc0_entryhi(); | ||
461 | old_entrylo[0] = read_gc0_entrylo0(); | ||
462 | old_entrylo[1] = read_gc0_entrylo1(); | ||
463 | old_pagemask = read_gc0_pagemask(); | ||
464 | |||
465 | switch (current_cpu_type()) { | ||
466 | case CPU_CAVIUM_OCTEON3: | ||
467 | /* Inhibit machine check due to multiple matching TLB entries */ | ||
468 | cvmmemctl2 = read_c0_cvmmemctl2(); | ||
469 | cvmmemctl2 |= CVMMEMCTL2_INHIBITTS; | ||
470 | write_c0_cvmmemctl2(cvmmemctl2); | ||
471 | break; | ||
472 | } | ||
473 | |||
474 | /* Invalidate guest entries in guest TLB */ | ||
475 | write_gc0_entrylo0(0); | ||
476 | write_gc0_entrylo1(0); | ||
477 | write_gc0_pagemask(0); | ||
478 | for (entry = 0; entry < current_cpu_data.guest.tlbsize; entry++) { | ||
479 | /* Make sure all entries differ. */ | ||
480 | write_gc0_index(entry); | ||
481 | write_gc0_entryhi(UNIQUE_GUEST_ENTRYHI(entry)); | ||
482 | mtc0_tlbw_hazard(); | ||
483 | guest_tlb_write_indexed(); | ||
484 | } | ||
485 | |||
486 | if (cvmmemctl2) { | ||
487 | cvmmemctl2 &= ~CVMMEMCTL2_INHIBITTS; | ||
488 | write_c0_cvmmemctl2(cvmmemctl2); | ||
489 | } | ||
490 | |||
491 | write_gc0_index(old_index); | ||
492 | write_gc0_entryhi(old_entryhi); | ||
493 | write_gc0_entrylo0(old_entrylo[0]); | ||
494 | write_gc0_entrylo1(old_entrylo[1]); | ||
495 | write_gc0_pagemask(old_pagemask); | ||
496 | tlbw_use_hazard(); | ||
497 | |||
498 | local_irq_restore(flags); | ||
499 | } | ||
500 | EXPORT_SYMBOL_GPL(kvm_vz_local_flush_guesttlb_all); | ||
501 | |||
502 | /** | ||
503 | * kvm_vz_save_guesttlb() - Save a range of guest TLB entries. | ||
504 | * @buf: Buffer to write TLB entries into. | ||
505 | * @index: Start index. | ||
506 | * @count: Number of entries to save. | ||
507 | * | ||
508 | * Save a range of guest TLB entries. The caller must ensure interrupts are | ||
509 | * disabled. | ||
510 | */ | ||
511 | void kvm_vz_save_guesttlb(struct kvm_mips_tlb *buf, unsigned int index, | ||
512 | unsigned int count) | ||
513 | { | ||
514 | unsigned int end = index + count; | ||
515 | unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; | ||
516 | unsigned int guestctl1 = 0; | ||
517 | int old_index, i; | ||
518 | |||
519 | /* Save registers we're about to clobber */ | ||
520 | old_index = read_gc0_index(); | ||
521 | old_entryhi = read_gc0_entryhi(); | ||
522 | old_entrylo0 = read_gc0_entrylo0(); | ||
523 | old_entrylo1 = read_gc0_entrylo1(); | ||
524 | old_pagemask = read_gc0_pagemask(); | ||
525 | |||
526 | /* Set root GuestID for root probe */ | ||
527 | htw_stop(); | ||
528 | set_root_gid_to_guest_gid(); | ||
529 | if (cpu_has_guestid) | ||
530 | guestctl1 = read_c0_guestctl1(); | ||
531 | |||
532 | /* Read each entry from guest TLB */ | ||
533 | for (i = index; i < end; ++i, ++buf) { | ||
534 | write_gc0_index(i); | ||
535 | |||
536 | mtc0_tlbr_hazard(); | ||
537 | guest_tlb_read(); | ||
538 | tlb_read_hazard(); | ||
539 | |||
540 | if (cpu_has_guestid && | ||
541 | (read_c0_guestctl1() ^ guestctl1) & MIPS_GCTL1_RID) { | ||
542 | /* Entry invalid or belongs to another guest */ | ||
543 | buf->tlb_hi = UNIQUE_GUEST_ENTRYHI(i); | ||
544 | buf->tlb_lo[0] = 0; | ||
545 | buf->tlb_lo[1] = 0; | ||
546 | buf->tlb_mask = 0; | ||
547 | } else { | ||
548 | /* Entry belongs to the right guest */ | ||
549 | buf->tlb_hi = read_gc0_entryhi(); | ||
550 | buf->tlb_lo[0] = read_gc0_entrylo0(); | ||
551 | buf->tlb_lo[1] = read_gc0_entrylo1(); | ||
552 | buf->tlb_mask = read_gc0_pagemask(); | ||
553 | } | ||
554 | } | ||
555 | |||
556 | /* Clear root GuestID again */ | ||
557 | clear_root_gid(); | ||
558 | htw_start(); | ||
559 | |||
560 | /* Restore clobbered registers */ | ||
561 | write_gc0_index(old_index); | ||
562 | write_gc0_entryhi(old_entryhi); | ||
563 | write_gc0_entrylo0(old_entrylo0); | ||
564 | write_gc0_entrylo1(old_entrylo1); | ||
565 | write_gc0_pagemask(old_pagemask); | ||
566 | |||
567 | tlbw_use_hazard(); | ||
568 | } | ||
569 | EXPORT_SYMBOL_GPL(kvm_vz_save_guesttlb); | ||
570 | |||
571 | /** | ||
572 | * kvm_vz_load_guesttlb() - Save a range of guest TLB entries. | ||
573 | * @buf: Buffer to read TLB entries from. | ||
574 | * @index: Start index. | ||
575 | * @count: Number of entries to load. | ||
576 | * | ||
577 | * Load a range of guest TLB entries. The caller must ensure interrupts are | ||
578 | * disabled. | ||
579 | */ | ||
580 | void kvm_vz_load_guesttlb(const struct kvm_mips_tlb *buf, unsigned int index, | ||
581 | unsigned int count) | ||
582 | { | ||
583 | unsigned int end = index + count; | ||
584 | unsigned long old_entryhi, old_entrylo0, old_entrylo1, old_pagemask; | ||
585 | int old_index, i; | ||
586 | |||
587 | /* Save registers we're about to clobber */ | ||
588 | old_index = read_gc0_index(); | ||
589 | old_entryhi = read_gc0_entryhi(); | ||
590 | old_entrylo0 = read_gc0_entrylo0(); | ||
591 | old_entrylo1 = read_gc0_entrylo1(); | ||
592 | old_pagemask = read_gc0_pagemask(); | ||
593 | |||
594 | /* Set root GuestID for root probe */ | ||
595 | htw_stop(); | ||
596 | set_root_gid_to_guest_gid(); | ||
597 | |||
598 | /* Write each entry to guest TLB */ | ||
599 | for (i = index; i < end; ++i, ++buf) { | ||
600 | write_gc0_index(i); | ||
601 | write_gc0_entryhi(buf->tlb_hi); | ||
602 | write_gc0_entrylo0(buf->tlb_lo[0]); | ||
603 | write_gc0_entrylo1(buf->tlb_lo[1]); | ||
604 | write_gc0_pagemask(buf->tlb_mask); | ||
605 | |||
606 | mtc0_tlbw_hazard(); | ||
607 | guest_tlb_write_indexed(); | ||
608 | } | ||
609 | |||
610 | /* Clear root GuestID again */ | ||
611 | clear_root_gid(); | ||
612 | htw_start(); | ||
613 | |||
614 | /* Restore clobbered registers */ | ||
615 | write_gc0_index(old_index); | ||
616 | write_gc0_entryhi(old_entryhi); | ||
617 | write_gc0_entrylo0(old_entrylo0); | ||
618 | write_gc0_entrylo1(old_entrylo1); | ||
619 | write_gc0_pagemask(old_pagemask); | ||
620 | |||
621 | tlbw_use_hazard(); | ||
622 | } | ||
623 | EXPORT_SYMBOL_GPL(kvm_vz_load_guesttlb); | ||
624 | |||
625 | #ifdef CONFIG_CPU_LOONGSON64 | ||
626 | void kvm_loongson_clear_guest_vtlb(void) | ||
627 | { | ||
628 | int idx = read_gc0_index(); | ||
629 | |||
630 | /* Set root GuestID for root probe and write of guest TLB entry */ | ||
631 | set_root_gid_to_guest_gid(); | ||
632 | |||
633 | write_gc0_index(0); | ||
634 | guest_tlbinvf(); | ||
635 | write_gc0_index(idx); | ||
636 | |||
637 | clear_root_gid(); | ||
638 | set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); | ||
639 | } | ||
640 | EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_vtlb); | ||
641 | |||
642 | void kvm_loongson_clear_guest_ftlb(void) | ||
643 | { | ||
644 | int i; | ||
645 | int idx = read_gc0_index(); | ||
646 | |||
647 | /* Set root GuestID for root probe and write of guest TLB entry */ | ||
648 | set_root_gid_to_guest_gid(); | ||
649 | |||
650 | for (i = current_cpu_data.tlbsizevtlb; | ||
651 | i < (current_cpu_data.tlbsizevtlb + | ||
652 | current_cpu_data.tlbsizeftlbsets); | ||
653 | i++) { | ||
654 | write_gc0_index(i); | ||
655 | guest_tlbinvf(); | ||
656 | } | ||
657 | write_gc0_index(idx); | ||
658 | |||
659 | clear_root_gid(); | ||
660 | set_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB); | ||
661 | } | ||
662 | EXPORT_SYMBOL_GPL(kvm_loongson_clear_guest_ftlb); | ||
663 | #endif | ||
664 | |||
665 | #endif | ||
666 | |||
667 | /** | ||
668 | * kvm_mips_suspend_mm() - Suspend the active mm. | ||
669 | * @cpu The CPU we're running on. | ||
670 | * | ||
671 | * Suspend the active_mm, ready for a switch to a KVM guest virtual address | ||
672 | * space. This is left active for the duration of guest context, including time | ||
673 | * with interrupts enabled, so we need to be careful not to confuse e.g. cache | ||
674 | * management IPIs. | ||
675 | * | ||
676 | * kvm_mips_resume_mm() should be called before context switching to a different | ||
677 | * process so we don't need to worry about reference counting. | ||
678 | * | ||
679 | * This needs to be in static kernel code to avoid exporting init_mm. | ||
680 | */ | ||
681 | void kvm_mips_suspend_mm(int cpu) | ||
682 | { | ||
683 | cpumask_clear_cpu(cpu, mm_cpumask(current->active_mm)); | ||
684 | current->active_mm = &init_mm; | ||
685 | } | ||
686 | EXPORT_SYMBOL_GPL(kvm_mips_suspend_mm); | ||
687 | |||
688 | /** | ||
689 | * kvm_mips_resume_mm() - Resume the current process mm. | ||
690 | * @cpu The CPU we're running on. | ||
691 | * | ||
692 | * Resume the mm of the current process, after a switch back from a KVM guest | ||
693 | * virtual address space (see kvm_mips_suspend_mm()). | ||
694 | */ | ||
695 | void kvm_mips_resume_mm(int cpu) | ||
696 | { | ||
697 | cpumask_set_cpu(cpu, mm_cpumask(current->mm)); | ||
698 | current->active_mm = current->mm; | ||
699 | } | ||
700 | EXPORT_SYMBOL_GPL(kvm_mips_resume_mm); | ||
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h new file mode 100644 index 000000000..a8c7fd7bf --- /dev/null +++ b/arch/mips/kvm/trace.h | |||
@@ -0,0 +1,346 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
8 | */ | ||
9 | |||
10 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | ||
11 | #define _TRACE_KVM_H | ||
12 | |||
13 | #include <linux/tracepoint.h> | ||
14 | |||
15 | #undef TRACE_SYSTEM | ||
16 | #define TRACE_SYSTEM kvm | ||
17 | #define TRACE_INCLUDE_PATH . | ||
18 | #define TRACE_INCLUDE_FILE trace | ||
19 | |||
20 | /* | ||
21 | * arch/mips/kvm/mips.c | ||
22 | */ | ||
23 | extern bool kvm_trace_guest_mode_change; | ||
24 | int kvm_guest_mode_change_trace_reg(void); | ||
25 | void kvm_guest_mode_change_trace_unreg(void); | ||
26 | |||
27 | /* | ||
28 | * Tracepoints for VM enters | ||
29 | */ | ||
30 | DECLARE_EVENT_CLASS(kvm_transition, | ||
31 | TP_PROTO(struct kvm_vcpu *vcpu), | ||
32 | TP_ARGS(vcpu), | ||
33 | TP_STRUCT__entry( | ||
34 | __field(unsigned long, pc) | ||
35 | ), | ||
36 | |||
37 | TP_fast_assign( | ||
38 | __entry->pc = vcpu->arch.pc; | ||
39 | ), | ||
40 | |||
41 | TP_printk("PC: 0x%08lx", | ||
42 | __entry->pc) | ||
43 | ); | ||
44 | |||
45 | DEFINE_EVENT(kvm_transition, kvm_enter, | ||
46 | TP_PROTO(struct kvm_vcpu *vcpu), | ||
47 | TP_ARGS(vcpu)); | ||
48 | |||
49 | DEFINE_EVENT(kvm_transition, kvm_reenter, | ||
50 | TP_PROTO(struct kvm_vcpu *vcpu), | ||
51 | TP_ARGS(vcpu)); | ||
52 | |||
53 | DEFINE_EVENT(kvm_transition, kvm_out, | ||
54 | TP_PROTO(struct kvm_vcpu *vcpu), | ||
55 | TP_ARGS(vcpu)); | ||
56 | |||
57 | /* The first 32 exit reasons correspond to Cause.ExcCode */ | ||
58 | #define KVM_TRACE_EXIT_INT 0 | ||
59 | #define KVM_TRACE_EXIT_TLBMOD 1 | ||
60 | #define KVM_TRACE_EXIT_TLBMISS_LD 2 | ||
61 | #define KVM_TRACE_EXIT_TLBMISS_ST 3 | ||
62 | #define KVM_TRACE_EXIT_ADDRERR_LD 4 | ||
63 | #define KVM_TRACE_EXIT_ADDRERR_ST 5 | ||
64 | #define KVM_TRACE_EXIT_SYSCALL 8 | ||
65 | #define KVM_TRACE_EXIT_BREAK_INST 9 | ||
66 | #define KVM_TRACE_EXIT_RESVD_INST 10 | ||
67 | #define KVM_TRACE_EXIT_COP_UNUSABLE 11 | ||
68 | #define KVM_TRACE_EXIT_TRAP_INST 13 | ||
69 | #define KVM_TRACE_EXIT_MSA_FPE 14 | ||
70 | #define KVM_TRACE_EXIT_FPE 15 | ||
71 | #define KVM_TRACE_EXIT_MSA_DISABLED 21 | ||
72 | #define KVM_TRACE_EXIT_GUEST_EXIT 27 | ||
73 | /* Further exit reasons */ | ||
74 | #define KVM_TRACE_EXIT_WAIT 32 | ||
75 | #define KVM_TRACE_EXIT_CACHE 33 | ||
76 | #define KVM_TRACE_EXIT_SIGNAL 34 | ||
77 | /* 32 exit reasons correspond to GuestCtl0.GExcCode (VZ) */ | ||
78 | #define KVM_TRACE_EXIT_GEXCCODE_BASE 64 | ||
79 | #define KVM_TRACE_EXIT_GPSI 64 /* 0 */ | ||
80 | #define KVM_TRACE_EXIT_GSFC 65 /* 1 */ | ||
81 | #define KVM_TRACE_EXIT_HC 66 /* 2 */ | ||
82 | #define KVM_TRACE_EXIT_GRR 67 /* 3 */ | ||
83 | #define KVM_TRACE_EXIT_GVA 72 /* 8 */ | ||
84 | #define KVM_TRACE_EXIT_GHFC 73 /* 9 */ | ||
85 | #define KVM_TRACE_EXIT_GPA 74 /* 10 */ | ||
86 | |||
87 | /* Tracepoints for VM exits */ | ||
88 | #define kvm_trace_symbol_exit_types \ | ||
89 | { KVM_TRACE_EXIT_INT, "Interrupt" }, \ | ||
90 | { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \ | ||
91 | { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \ | ||
92 | { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \ | ||
93 | { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \ | ||
94 | { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \ | ||
95 | { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \ | ||
96 | { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \ | ||
97 | { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \ | ||
98 | { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \ | ||
99 | { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \ | ||
100 | { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \ | ||
101 | { KVM_TRACE_EXIT_FPE, "FPE" }, \ | ||
102 | { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \ | ||
103 | { KVM_TRACE_EXIT_GUEST_EXIT, "Guest Exit" }, \ | ||
104 | { KVM_TRACE_EXIT_WAIT, "WAIT" }, \ | ||
105 | { KVM_TRACE_EXIT_CACHE, "CACHE" }, \ | ||
106 | { KVM_TRACE_EXIT_SIGNAL, "Signal" }, \ | ||
107 | { KVM_TRACE_EXIT_GPSI, "GPSI" }, \ | ||
108 | { KVM_TRACE_EXIT_GSFC, "GSFC" }, \ | ||
109 | { KVM_TRACE_EXIT_HC, "HC" }, \ | ||
110 | { KVM_TRACE_EXIT_GRR, "GRR" }, \ | ||
111 | { KVM_TRACE_EXIT_GVA, "GVA" }, \ | ||
112 | { KVM_TRACE_EXIT_GHFC, "GHFC" }, \ | ||
113 | { KVM_TRACE_EXIT_GPA, "GPA" } | ||
114 | |||
115 | TRACE_EVENT(kvm_exit, | ||
116 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason), | ||
117 | TP_ARGS(vcpu, reason), | ||
118 | TP_STRUCT__entry( | ||
119 | __field(unsigned long, pc) | ||
120 | __field(unsigned int, reason) | ||
121 | ), | ||
122 | |||
123 | TP_fast_assign( | ||
124 | __entry->pc = vcpu->arch.pc; | ||
125 | __entry->reason = reason; | ||
126 | ), | ||
127 | |||
128 | TP_printk("[%s]PC: 0x%08lx", | ||
129 | __print_symbolic(__entry->reason, | ||
130 | kvm_trace_symbol_exit_types), | ||
131 | __entry->pc) | ||
132 | ); | ||
133 | |||
134 | #define KVM_TRACE_MFC0 0 | ||
135 | #define KVM_TRACE_MTC0 1 | ||
136 | #define KVM_TRACE_DMFC0 2 | ||
137 | #define KVM_TRACE_DMTC0 3 | ||
138 | #define KVM_TRACE_RDHWR 4 | ||
139 | |||
140 | #define KVM_TRACE_HWR_COP0 0 | ||
141 | #define KVM_TRACE_HWR_HWR 1 | ||
142 | |||
143 | #define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \ | ||
144 | ((REG) << 3) | (SEL)) | ||
145 | #define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \ | ||
146 | ((REG) << 3) | (SEL)) | ||
147 | |||
148 | #define kvm_trace_symbol_hwr_ops \ | ||
149 | { KVM_TRACE_MFC0, "MFC0" }, \ | ||
150 | { KVM_TRACE_MTC0, "MTC0" }, \ | ||
151 | { KVM_TRACE_DMFC0, "DMFC0" }, \ | ||
152 | { KVM_TRACE_DMTC0, "DMTC0" }, \ | ||
153 | { KVM_TRACE_RDHWR, "RDHWR" } | ||
154 | |||
155 | #define kvm_trace_symbol_hwr_cop \ | ||
156 | { KVM_TRACE_HWR_COP0, "COP0" }, \ | ||
157 | { KVM_TRACE_HWR_HWR, "HWR" } | ||
158 | |||
159 | #define kvm_trace_symbol_hwr_regs \ | ||
160 | { KVM_TRACE_COP0( 0, 0), "Index" }, \ | ||
161 | { KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \ | ||
162 | { KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \ | ||
163 | { KVM_TRACE_COP0( 4, 0), "Context" }, \ | ||
164 | { KVM_TRACE_COP0( 4, 2), "UserLocal" }, \ | ||
165 | { KVM_TRACE_COP0( 5, 0), "PageMask" }, \ | ||
166 | { KVM_TRACE_COP0( 6, 0), "Wired" }, \ | ||
167 | { KVM_TRACE_COP0( 7, 0), "HWREna" }, \ | ||
168 | { KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \ | ||
169 | { KVM_TRACE_COP0( 9, 0), "Count" }, \ | ||
170 | { KVM_TRACE_COP0(10, 0), "EntryHi" }, \ | ||
171 | { KVM_TRACE_COP0(11, 0), "Compare" }, \ | ||
172 | { KVM_TRACE_COP0(12, 0), "Status" }, \ | ||
173 | { KVM_TRACE_COP0(12, 1), "IntCtl" }, \ | ||
174 | { KVM_TRACE_COP0(12, 2), "SRSCtl" }, \ | ||
175 | { KVM_TRACE_COP0(13, 0), "Cause" }, \ | ||
176 | { KVM_TRACE_COP0(14, 0), "EPC" }, \ | ||
177 | { KVM_TRACE_COP0(15, 0), "PRId" }, \ | ||
178 | { KVM_TRACE_COP0(15, 1), "EBase" }, \ | ||
179 | { KVM_TRACE_COP0(16, 0), "Config" }, \ | ||
180 | { KVM_TRACE_COP0(16, 1), "Config1" }, \ | ||
181 | { KVM_TRACE_COP0(16, 2), "Config2" }, \ | ||
182 | { KVM_TRACE_COP0(16, 3), "Config3" }, \ | ||
183 | { KVM_TRACE_COP0(16, 4), "Config4" }, \ | ||
184 | { KVM_TRACE_COP0(16, 5), "Config5" }, \ | ||
185 | { KVM_TRACE_COP0(16, 7), "Config7" }, \ | ||
186 | { KVM_TRACE_COP0(17, 1), "MAAR" }, \ | ||
187 | { KVM_TRACE_COP0(17, 2), "MAARI" }, \ | ||
188 | { KVM_TRACE_COP0(26, 0), "ECC" }, \ | ||
189 | { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \ | ||
190 | { KVM_TRACE_COP0(31, 2), "KScratch1" }, \ | ||
191 | { KVM_TRACE_COP0(31, 3), "KScratch2" }, \ | ||
192 | { KVM_TRACE_COP0(31, 4), "KScratch3" }, \ | ||
193 | { KVM_TRACE_COP0(31, 5), "KScratch4" }, \ | ||
194 | { KVM_TRACE_COP0(31, 6), "KScratch5" }, \ | ||
195 | { KVM_TRACE_COP0(31, 7), "KScratch6" }, \ | ||
196 | { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \ | ||
197 | { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \ | ||
198 | { KVM_TRACE_HWR( 2, 0), "CC" }, \ | ||
199 | { KVM_TRACE_HWR( 3, 0), "CCRes" }, \ | ||
200 | { KVM_TRACE_HWR(29, 0), "ULR" } | ||
201 | |||
202 | TRACE_EVENT(kvm_hwr, | ||
203 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg, | ||
204 | unsigned long val), | ||
205 | TP_ARGS(vcpu, op, reg, val), | ||
206 | TP_STRUCT__entry( | ||
207 | __field(unsigned long, val) | ||
208 | __field(u16, reg) | ||
209 | __field(u8, op) | ||
210 | ), | ||
211 | |||
212 | TP_fast_assign( | ||
213 | __entry->val = val; | ||
214 | __entry->reg = reg; | ||
215 | __entry->op = op; | ||
216 | ), | ||
217 | |||
218 | TP_printk("%s %s (%s:%u:%u) 0x%08lx", | ||
219 | __print_symbolic(__entry->op, | ||
220 | kvm_trace_symbol_hwr_ops), | ||
221 | __print_symbolic(__entry->reg, | ||
222 | kvm_trace_symbol_hwr_regs), | ||
223 | __print_symbolic(__entry->reg >> 8, | ||
224 | kvm_trace_symbol_hwr_cop), | ||
225 | (__entry->reg >> 3) & 0x1f, | ||
226 | __entry->reg & 0x7, | ||
227 | __entry->val) | ||
228 | ); | ||
229 | |||
230 | #define KVM_TRACE_AUX_RESTORE 0 | ||
231 | #define KVM_TRACE_AUX_SAVE 1 | ||
232 | #define KVM_TRACE_AUX_ENABLE 2 | ||
233 | #define KVM_TRACE_AUX_DISABLE 3 | ||
234 | #define KVM_TRACE_AUX_DISCARD 4 | ||
235 | |||
236 | #define KVM_TRACE_AUX_FPU 1 | ||
237 | #define KVM_TRACE_AUX_MSA 2 | ||
238 | #define KVM_TRACE_AUX_FPU_MSA 3 | ||
239 | |||
240 | #define kvm_trace_symbol_aux_op \ | ||
241 | { KVM_TRACE_AUX_RESTORE, "restore" }, \ | ||
242 | { KVM_TRACE_AUX_SAVE, "save" }, \ | ||
243 | { KVM_TRACE_AUX_ENABLE, "enable" }, \ | ||
244 | { KVM_TRACE_AUX_DISABLE, "disable" }, \ | ||
245 | { KVM_TRACE_AUX_DISCARD, "discard" } | ||
246 | |||
247 | #define kvm_trace_symbol_aux_state \ | ||
248 | { KVM_TRACE_AUX_FPU, "FPU" }, \ | ||
249 | { KVM_TRACE_AUX_MSA, "MSA" }, \ | ||
250 | { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" } | ||
251 | |||
252 | TRACE_EVENT(kvm_aux, | ||
253 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, | ||
254 | unsigned int state), | ||
255 | TP_ARGS(vcpu, op, state), | ||
256 | TP_STRUCT__entry( | ||
257 | __field(unsigned long, pc) | ||
258 | __field(u8, op) | ||
259 | __field(u8, state) | ||
260 | ), | ||
261 | |||
262 | TP_fast_assign( | ||
263 | __entry->pc = vcpu->arch.pc; | ||
264 | __entry->op = op; | ||
265 | __entry->state = state; | ||
266 | ), | ||
267 | |||
268 | TP_printk("%s %s PC: 0x%08lx", | ||
269 | __print_symbolic(__entry->op, | ||
270 | kvm_trace_symbol_aux_op), | ||
271 | __print_symbolic(__entry->state, | ||
272 | kvm_trace_symbol_aux_state), | ||
273 | __entry->pc) | ||
274 | ); | ||
275 | |||
276 | TRACE_EVENT(kvm_asid_change, | ||
277 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid, | ||
278 | unsigned int new_asid), | ||
279 | TP_ARGS(vcpu, old_asid, new_asid), | ||
280 | TP_STRUCT__entry( | ||
281 | __field(unsigned long, pc) | ||
282 | __field(u8, old_asid) | ||
283 | __field(u8, new_asid) | ||
284 | ), | ||
285 | |||
286 | TP_fast_assign( | ||
287 | __entry->pc = vcpu->arch.pc; | ||
288 | __entry->old_asid = old_asid; | ||
289 | __entry->new_asid = new_asid; | ||
290 | ), | ||
291 | |||
292 | TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x", | ||
293 | __entry->pc, | ||
294 | __entry->old_asid, | ||
295 | __entry->new_asid) | ||
296 | ); | ||
297 | |||
298 | TRACE_EVENT(kvm_guestid_change, | ||
299 | TP_PROTO(struct kvm_vcpu *vcpu, unsigned int guestid), | ||
300 | TP_ARGS(vcpu, guestid), | ||
301 | TP_STRUCT__entry( | ||
302 | __field(unsigned int, guestid) | ||
303 | ), | ||
304 | |||
305 | TP_fast_assign( | ||
306 | __entry->guestid = guestid; | ||
307 | ), | ||
308 | |||
309 | TP_printk("GuestID: 0x%02x", | ||
310 | __entry->guestid) | ||
311 | ); | ||
312 | |||
313 | TRACE_EVENT_FN(kvm_guest_mode_change, | ||
314 | TP_PROTO(struct kvm_vcpu *vcpu), | ||
315 | TP_ARGS(vcpu), | ||
316 | TP_STRUCT__entry( | ||
317 | __field(unsigned long, epc) | ||
318 | __field(unsigned long, pc) | ||
319 | __field(unsigned long, badvaddr) | ||
320 | __field(unsigned int, status) | ||
321 | __field(unsigned int, cause) | ||
322 | ), | ||
323 | |||
324 | TP_fast_assign( | ||
325 | __entry->epc = kvm_read_c0_guest_epc(vcpu->arch.cop0); | ||
326 | __entry->pc = vcpu->arch.pc; | ||
327 | __entry->badvaddr = kvm_read_c0_guest_badvaddr(vcpu->arch.cop0); | ||
328 | __entry->status = kvm_read_c0_guest_status(vcpu->arch.cop0); | ||
329 | __entry->cause = kvm_read_c0_guest_cause(vcpu->arch.cop0); | ||
330 | ), | ||
331 | |||
332 | TP_printk("EPC: 0x%08lx PC: 0x%08lx Status: 0x%08x Cause: 0x%08x BadVAddr: 0x%08lx", | ||
333 | __entry->epc, | ||
334 | __entry->pc, | ||
335 | __entry->status, | ||
336 | __entry->cause, | ||
337 | __entry->badvaddr), | ||
338 | |||
339 | kvm_guest_mode_change_trace_reg, | ||
340 | kvm_guest_mode_change_trace_unreg | ||
341 | ); | ||
342 | |||
343 | #endif /* _TRACE_KVM_H */ | ||
344 | |||
345 | /* This part must be outside protection */ | ||
346 | #include <trace/define_trace.h> | ||
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c new file mode 100644 index 000000000..0788c00d7 --- /dev/null +++ b/arch/mips/kvm/trap_emul.c | |||
@@ -0,0 +1,1306 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/kvm_host.h> | ||
15 | #include <linux/log2.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <linux/vmalloc.h> | ||
18 | #include <asm/mmu_context.h> | ||
19 | #include <asm/pgalloc.h> | ||
20 | |||
21 | #include "interrupt.h" | ||
22 | |||
23 | static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva) | ||
24 | { | ||
25 | gpa_t gpa; | ||
26 | gva_t kseg = KSEGX(gva); | ||
27 | gva_t gkseg = KVM_GUEST_KSEGX(gva); | ||
28 | |||
29 | if ((kseg == CKSEG0) || (kseg == CKSEG1)) | ||
30 | gpa = CPHYSADDR(gva); | ||
31 | else if (gkseg == KVM_GUEST_KSEG0) | ||
32 | gpa = KVM_GUEST_CPHYSADDR(gva); | ||
33 | else { | ||
34 | kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva); | ||
35 | kvm_mips_dump_host_tlbs(); | ||
36 | gpa = KVM_INVALID_ADDR; | ||
37 | } | ||
38 | |||
39 | kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa); | ||
40 | |||
41 | return gpa; | ||
42 | } | ||
43 | |||
44 | static int kvm_trap_emul_no_handler(struct kvm_vcpu *vcpu) | ||
45 | { | ||
46 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
47 | u32 cause = vcpu->arch.host_cp0_cause; | ||
48 | u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; | ||
49 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
50 | u32 inst = 0; | ||
51 | |||
52 | /* | ||
53 | * Fetch the instruction. | ||
54 | */ | ||
55 | if (cause & CAUSEF_BD) | ||
56 | opc += 1; | ||
57 | kvm_get_badinstr(opc, vcpu, &inst); | ||
58 | |||
59 | kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", | ||
60 | exccode, opc, inst, badvaddr, | ||
61 | kvm_read_c0_guest_status(vcpu->arch.cop0)); | ||
62 | kvm_arch_vcpu_dump_regs(vcpu); | ||
63 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
64 | return RESUME_HOST; | ||
65 | } | ||
66 | |||
67 | static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu) | ||
68 | { | ||
69 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
70 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
71 | u32 cause = vcpu->arch.host_cp0_cause; | ||
72 | enum emulation_result er = EMULATE_DONE; | ||
73 | int ret = RESUME_GUEST; | ||
74 | |||
75 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { | ||
76 | /* FPU Unusable */ | ||
77 | if (!kvm_mips_guest_has_fpu(&vcpu->arch) || | ||
78 | (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) { | ||
79 | /* | ||
80 | * Unusable/no FPU in guest: | ||
81 | * deliver guest COP1 Unusable Exception | ||
82 | */ | ||
83 | er = kvm_mips_emulate_fpu_exc(cause, opc, vcpu); | ||
84 | } else { | ||
85 | /* Restore FPU state */ | ||
86 | kvm_own_fpu(vcpu); | ||
87 | er = EMULATE_DONE; | ||
88 | } | ||
89 | } else { | ||
90 | er = kvm_mips_emulate_inst(cause, opc, vcpu); | ||
91 | } | ||
92 | |||
93 | switch (er) { | ||
94 | case EMULATE_DONE: | ||
95 | ret = RESUME_GUEST; | ||
96 | break; | ||
97 | |||
98 | case EMULATE_FAIL: | ||
99 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
100 | ret = RESUME_HOST; | ||
101 | break; | ||
102 | |||
103 | case EMULATE_WAIT: | ||
104 | vcpu->run->exit_reason = KVM_EXIT_INTR; | ||
105 | ret = RESUME_HOST; | ||
106 | break; | ||
107 | |||
108 | case EMULATE_HYPERCALL: | ||
109 | ret = kvm_mips_handle_hypcall(vcpu); | ||
110 | break; | ||
111 | |||
112 | default: | ||
113 | BUG(); | ||
114 | } | ||
115 | return ret; | ||
116 | } | ||
117 | |||
118 | static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) | ||
119 | { | ||
120 | enum emulation_result er; | ||
121 | union mips_instruction inst; | ||
122 | int err; | ||
123 | |||
124 | /* A code fetch fault doesn't count as an MMIO */ | ||
125 | if (kvm_is_ifetch_fault(&vcpu->arch)) { | ||
126 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
127 | return RESUME_HOST; | ||
128 | } | ||
129 | |||
130 | /* Fetch the instruction. */ | ||
131 | if (cause & CAUSEF_BD) | ||
132 | opc += 1; | ||
133 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
134 | if (err) { | ||
135 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
136 | return RESUME_HOST; | ||
137 | } | ||
138 | |||
139 | /* Emulate the load */ | ||
140 | er = kvm_mips_emulate_load(inst, cause, vcpu); | ||
141 | if (er == EMULATE_FAIL) { | ||
142 | kvm_err("Emulate load from MMIO space failed\n"); | ||
143 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
144 | } else { | ||
145 | vcpu->run->exit_reason = KVM_EXIT_MMIO; | ||
146 | } | ||
147 | return RESUME_HOST; | ||
148 | } | ||
149 | |||
150 | static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_vcpu *vcpu) | ||
151 | { | ||
152 | enum emulation_result er; | ||
153 | union mips_instruction inst; | ||
154 | int err; | ||
155 | |||
156 | /* Fetch the instruction. */ | ||
157 | if (cause & CAUSEF_BD) | ||
158 | opc += 1; | ||
159 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
160 | if (err) { | ||
161 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
162 | return RESUME_HOST; | ||
163 | } | ||
164 | |||
165 | /* Emulate the store */ | ||
166 | er = kvm_mips_emulate_store(inst, cause, vcpu); | ||
167 | if (er == EMULATE_FAIL) { | ||
168 | kvm_err("Emulate store to MMIO space failed\n"); | ||
169 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
170 | } else { | ||
171 | vcpu->run->exit_reason = KVM_EXIT_MMIO; | ||
172 | } | ||
173 | return RESUME_HOST; | ||
174 | } | ||
175 | |||
176 | static int kvm_mips_bad_access(u32 cause, u32 *opc, | ||
177 | struct kvm_vcpu *vcpu, bool store) | ||
178 | { | ||
179 | if (store) | ||
180 | return kvm_mips_bad_store(cause, opc, vcpu); | ||
181 | else | ||
182 | return kvm_mips_bad_load(cause, opc, vcpu); | ||
183 | } | ||
184 | |||
185 | static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu) | ||
186 | { | ||
187 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
188 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
189 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
190 | u32 cause = vcpu->arch.host_cp0_cause; | ||
191 | struct kvm_mips_tlb *tlb; | ||
192 | unsigned long entryhi; | ||
193 | int index; | ||
194 | |||
195 | if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | ||
196 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | ||
197 | /* | ||
198 | * First find the mapping in the guest TLB. If the failure to | ||
199 | * write was due to the guest TLB, it should be up to the guest | ||
200 | * to handle it. | ||
201 | */ | ||
202 | entryhi = (badvaddr & VPN2_MASK) | | ||
203 | (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID); | ||
204 | index = kvm_mips_guest_tlb_lookup(vcpu, entryhi); | ||
205 | |||
206 | /* | ||
207 | * These should never happen. | ||
208 | * They would indicate stale host TLB entries. | ||
209 | */ | ||
210 | if (unlikely(index < 0)) { | ||
211 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
212 | return RESUME_HOST; | ||
213 | } | ||
214 | tlb = vcpu->arch.guest_tlb + index; | ||
215 | if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) { | ||
216 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
217 | return RESUME_HOST; | ||
218 | } | ||
219 | |||
220 | /* | ||
221 | * Guest entry not dirty? That would explain the TLB modified | ||
222 | * exception. Relay that on to the guest so it can handle it. | ||
223 | */ | ||
224 | if (!TLB_IS_DIRTY(*tlb, badvaddr)) { | ||
225 | kvm_mips_emulate_tlbmod(cause, opc, vcpu); | ||
226 | return RESUME_GUEST; | ||
227 | } | ||
228 | |||
229 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr, | ||
230 | true)) | ||
231 | /* Not writable, needs handling as MMIO */ | ||
232 | return kvm_mips_bad_store(cause, opc, vcpu); | ||
233 | return RESUME_GUEST; | ||
234 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | ||
235 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0) | ||
236 | /* Not writable, needs handling as MMIO */ | ||
237 | return kvm_mips_bad_store(cause, opc, vcpu); | ||
238 | return RESUME_GUEST; | ||
239 | } else { | ||
240 | /* host kernel addresses are all handled as MMIO */ | ||
241 | return kvm_mips_bad_store(cause, opc, vcpu); | ||
242 | } | ||
243 | } | ||
244 | |||
245 | static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store) | ||
246 | { | ||
247 | struct kvm_run *run = vcpu->run; | ||
248 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
249 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
250 | u32 cause = vcpu->arch.host_cp0_cause; | ||
251 | enum emulation_result er = EMULATE_DONE; | ||
252 | int ret = RESUME_GUEST; | ||
253 | |||
254 | if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) | ||
255 | && KVM_GUEST_KERNEL_MODE(vcpu)) { | ||
256 | if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) { | ||
257 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
258 | ret = RESUME_HOST; | ||
259 | } | ||
260 | } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0 | ||
261 | || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) { | ||
262 | kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n", | ||
263 | store ? "ST" : "LD", cause, opc, badvaddr); | ||
264 | |||
265 | /* | ||
266 | * User Address (UA) fault, this could happen if | ||
267 | * (1) TLB entry not present/valid in both Guest and shadow host | ||
268 | * TLBs, in this case we pass on the fault to the guest | ||
269 | * kernel and let it handle it. | ||
270 | * (2) TLB entry is present in the Guest TLB but not in the | ||
271 | * shadow, in this case we inject the TLB from the Guest TLB | ||
272 | * into the shadow host TLB | ||
273 | */ | ||
274 | |||
275 | er = kvm_mips_handle_tlbmiss(cause, opc, vcpu, store); | ||
276 | if (er == EMULATE_DONE) | ||
277 | ret = RESUME_GUEST; | ||
278 | else { | ||
279 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
280 | ret = RESUME_HOST; | ||
281 | } | ||
282 | } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) { | ||
283 | /* | ||
284 | * All KSEG0 faults are handled by KVM, as the guest kernel does | ||
285 | * not expect to ever get them | ||
286 | */ | ||
287 | if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0) | ||
288 | ret = kvm_mips_bad_access(cause, opc, vcpu, store); | ||
289 | } else if (KVM_GUEST_KERNEL_MODE(vcpu) | ||
290 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | ||
291 | /* | ||
292 | * With EVA we may get a TLB exception instead of an address | ||
293 | * error when the guest performs MMIO to KSeg1 addresses. | ||
294 | */ | ||
295 | ret = kvm_mips_bad_access(cause, opc, vcpu, store); | ||
296 | } else { | ||
297 | kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n", | ||
298 | store ? "ST" : "LD", cause, opc, badvaddr); | ||
299 | kvm_mips_dump_host_tlbs(); | ||
300 | kvm_arch_vcpu_dump_regs(vcpu); | ||
301 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
302 | ret = RESUME_HOST; | ||
303 | } | ||
304 | return ret; | ||
305 | } | ||
306 | |||
307 | static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | ||
308 | { | ||
309 | return kvm_trap_emul_handle_tlb_miss(vcpu, true); | ||
310 | } | ||
311 | |||
312 | static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | ||
313 | { | ||
314 | return kvm_trap_emul_handle_tlb_miss(vcpu, false); | ||
315 | } | ||
316 | |||
317 | static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu) | ||
318 | { | ||
319 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
320 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
321 | u32 cause = vcpu->arch.host_cp0_cause; | ||
322 | int ret = RESUME_GUEST; | ||
323 | |||
324 | if (KVM_GUEST_KERNEL_MODE(vcpu) | ||
325 | && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) { | ||
326 | ret = kvm_mips_bad_store(cause, opc, vcpu); | ||
327 | } else { | ||
328 | kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n", | ||
329 | cause, opc, badvaddr); | ||
330 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
331 | ret = RESUME_HOST; | ||
332 | } | ||
333 | return ret; | ||
334 | } | ||
335 | |||
336 | static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu) | ||
337 | { | ||
338 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
339 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
340 | u32 cause = vcpu->arch.host_cp0_cause; | ||
341 | int ret = RESUME_GUEST; | ||
342 | |||
343 | if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) { | ||
344 | ret = kvm_mips_bad_load(cause, opc, vcpu); | ||
345 | } else { | ||
346 | kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n", | ||
347 | cause, opc, badvaddr); | ||
348 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
349 | ret = RESUME_HOST; | ||
350 | } | ||
351 | return ret; | ||
352 | } | ||
353 | |||
354 | static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu) | ||
355 | { | ||
356 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
357 | u32 cause = vcpu->arch.host_cp0_cause; | ||
358 | enum emulation_result er = EMULATE_DONE; | ||
359 | int ret = RESUME_GUEST; | ||
360 | |||
361 | er = kvm_mips_emulate_syscall(cause, opc, vcpu); | ||
362 | if (er == EMULATE_DONE) | ||
363 | ret = RESUME_GUEST; | ||
364 | else { | ||
365 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
366 | ret = RESUME_HOST; | ||
367 | } | ||
368 | return ret; | ||
369 | } | ||
370 | |||
371 | static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu) | ||
372 | { | ||
373 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
374 | u32 cause = vcpu->arch.host_cp0_cause; | ||
375 | enum emulation_result er = EMULATE_DONE; | ||
376 | int ret = RESUME_GUEST; | ||
377 | |||
378 | er = kvm_mips_handle_ri(cause, opc, vcpu); | ||
379 | if (er == EMULATE_DONE) | ||
380 | ret = RESUME_GUEST; | ||
381 | else { | ||
382 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
383 | ret = RESUME_HOST; | ||
384 | } | ||
385 | return ret; | ||
386 | } | ||
387 | |||
388 | static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu) | ||
389 | { | ||
390 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
391 | u32 cause = vcpu->arch.host_cp0_cause; | ||
392 | enum emulation_result er = EMULATE_DONE; | ||
393 | int ret = RESUME_GUEST; | ||
394 | |||
395 | er = kvm_mips_emulate_bp_exc(cause, opc, vcpu); | ||
396 | if (er == EMULATE_DONE) | ||
397 | ret = RESUME_GUEST; | ||
398 | else { | ||
399 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
400 | ret = RESUME_HOST; | ||
401 | } | ||
402 | return ret; | ||
403 | } | ||
404 | |||
405 | static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu) | ||
406 | { | ||
407 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; | ||
408 | u32 cause = vcpu->arch.host_cp0_cause; | ||
409 | enum emulation_result er = EMULATE_DONE; | ||
410 | int ret = RESUME_GUEST; | ||
411 | |||
412 | er = kvm_mips_emulate_trap_exc(cause, opc, vcpu); | ||
413 | if (er == EMULATE_DONE) { | ||
414 | ret = RESUME_GUEST; | ||
415 | } else { | ||
416 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
417 | ret = RESUME_HOST; | ||
418 | } | ||
419 | return ret; | ||
420 | } | ||
421 | |||
422 | static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu) | ||
423 | { | ||
424 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; | ||
425 | u32 cause = vcpu->arch.host_cp0_cause; | ||
426 | enum emulation_result er = EMULATE_DONE; | ||
427 | int ret = RESUME_GUEST; | ||
428 | |||
429 | er = kvm_mips_emulate_msafpe_exc(cause, opc, vcpu); | ||
430 | if (er == EMULATE_DONE) { | ||
431 | ret = RESUME_GUEST; | ||
432 | } else { | ||
433 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
434 | ret = RESUME_HOST; | ||
435 | } | ||
436 | return ret; | ||
437 | } | ||
438 | |||
439 | static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu) | ||
440 | { | ||
441 | u32 __user *opc = (u32 __user *)vcpu->arch.pc; | ||
442 | u32 cause = vcpu->arch.host_cp0_cause; | ||
443 | enum emulation_result er = EMULATE_DONE; | ||
444 | int ret = RESUME_GUEST; | ||
445 | |||
446 | er = kvm_mips_emulate_fpe_exc(cause, opc, vcpu); | ||
447 | if (er == EMULATE_DONE) { | ||
448 | ret = RESUME_GUEST; | ||
449 | } else { | ||
450 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
451 | ret = RESUME_HOST; | ||
452 | } | ||
453 | return ret; | ||
454 | } | ||
455 | |||
456 | /** | ||
457 | * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root. | ||
458 | * @vcpu: Virtual CPU context. | ||
459 | * | ||
460 | * Handle when the guest attempts to use MSA when it is disabled. | ||
461 | */ | ||
462 | static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu) | ||
463 | { | ||
464 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
465 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
466 | u32 cause = vcpu->arch.host_cp0_cause; | ||
467 | enum emulation_result er = EMULATE_DONE; | ||
468 | int ret = RESUME_GUEST; | ||
469 | |||
470 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || | ||
471 | (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) { | ||
472 | /* | ||
473 | * No MSA in guest, or FPU enabled and not in FR=1 mode, | ||
474 | * guest reserved instruction exception | ||
475 | */ | ||
476 | er = kvm_mips_emulate_ri_exc(cause, opc, vcpu); | ||
477 | } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) { | ||
478 | /* MSA disabled by guest, guest MSA disabled exception */ | ||
479 | er = kvm_mips_emulate_msadis_exc(cause, opc, vcpu); | ||
480 | } else { | ||
481 | /* Restore MSA/FPU state */ | ||
482 | kvm_own_msa(vcpu); | ||
483 | er = EMULATE_DONE; | ||
484 | } | ||
485 | |||
486 | switch (er) { | ||
487 | case EMULATE_DONE: | ||
488 | ret = RESUME_GUEST; | ||
489 | break; | ||
490 | |||
491 | case EMULATE_FAIL: | ||
492 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
493 | ret = RESUME_HOST; | ||
494 | break; | ||
495 | |||
496 | default: | ||
497 | BUG(); | ||
498 | } | ||
499 | return ret; | ||
500 | } | ||
501 | |||
502 | static int kvm_trap_emul_hardware_enable(void) | ||
503 | { | ||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | static void kvm_trap_emul_hardware_disable(void) | ||
508 | { | ||
509 | } | ||
510 | |||
511 | static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext) | ||
512 | { | ||
513 | int r; | ||
514 | |||
515 | switch (ext) { | ||
516 | case KVM_CAP_MIPS_TE: | ||
517 | r = 1; | ||
518 | break; | ||
519 | case KVM_CAP_IOEVENTFD: | ||
520 | r = 1; | ||
521 | break; | ||
522 | default: | ||
523 | r = 0; | ||
524 | break; | ||
525 | } | ||
526 | |||
527 | return r; | ||
528 | } | ||
529 | |||
530 | static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu) | ||
531 | { | ||
532 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
533 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
534 | |||
535 | /* | ||
536 | * Allocate GVA -> HPA page tables. | ||
537 | * MIPS doesn't use the mm_struct pointer argument. | ||
538 | */ | ||
539 | kern_mm->pgd = pgd_alloc(kern_mm); | ||
540 | if (!kern_mm->pgd) | ||
541 | return -ENOMEM; | ||
542 | |||
543 | user_mm->pgd = pgd_alloc(user_mm); | ||
544 | if (!user_mm->pgd) { | ||
545 | pgd_free(kern_mm, kern_mm->pgd); | ||
546 | return -ENOMEM; | ||
547 | } | ||
548 | |||
549 | return 0; | ||
550 | } | ||
551 | |||
552 | static void kvm_mips_emul_free_gva_pt(pgd_t *pgd) | ||
553 | { | ||
554 | /* Don't free host kernel page tables copied from init_mm.pgd */ | ||
555 | const unsigned long end = 0x80000000; | ||
556 | unsigned long pgd_va, pud_va, pmd_va; | ||
557 | p4d_t *p4d; | ||
558 | pud_t *pud; | ||
559 | pmd_t *pmd; | ||
560 | pte_t *pte; | ||
561 | int i, j, k; | ||
562 | |||
563 | for (i = 0; i < USER_PTRS_PER_PGD; i++) { | ||
564 | if (pgd_none(pgd[i])) | ||
565 | continue; | ||
566 | |||
567 | pgd_va = (unsigned long)i << PGDIR_SHIFT; | ||
568 | if (pgd_va >= end) | ||
569 | break; | ||
570 | p4d = p4d_offset(pgd, 0); | ||
571 | pud = pud_offset(p4d + i, 0); | ||
572 | for (j = 0; j < PTRS_PER_PUD; j++) { | ||
573 | if (pud_none(pud[j])) | ||
574 | continue; | ||
575 | |||
576 | pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT); | ||
577 | if (pud_va >= end) | ||
578 | break; | ||
579 | pmd = pmd_offset(pud + j, 0); | ||
580 | for (k = 0; k < PTRS_PER_PMD; k++) { | ||
581 | if (pmd_none(pmd[k])) | ||
582 | continue; | ||
583 | |||
584 | pmd_va = pud_va | (k << PMD_SHIFT); | ||
585 | if (pmd_va >= end) | ||
586 | break; | ||
587 | pte = pte_offset_kernel(pmd + k, 0); | ||
588 | pte_free_kernel(NULL, pte); | ||
589 | } | ||
590 | pmd_free(NULL, pmd); | ||
591 | } | ||
592 | pud_free(NULL, pud); | ||
593 | } | ||
594 | pgd_free(NULL, pgd); | ||
595 | } | ||
596 | |||
597 | static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
598 | { | ||
599 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd); | ||
600 | kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd); | ||
601 | } | ||
602 | |||
603 | static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu) | ||
604 | { | ||
605 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
606 | u32 config, config1; | ||
607 | int vcpu_id = vcpu->vcpu_id; | ||
608 | |||
609 | /* Start off the timer at 100 MHz */ | ||
610 | kvm_mips_init_count(vcpu, 100*1000*1000); | ||
611 | |||
612 | /* | ||
613 | * Arch specific stuff, set up config registers properly so that the | ||
614 | * guest will come up as expected | ||
615 | */ | ||
616 | #ifndef CONFIG_CPU_MIPSR6 | ||
617 | /* r2-r5, simulate a MIPS 24kc */ | ||
618 | kvm_write_c0_guest_prid(cop0, 0x00019300); | ||
619 | #else | ||
620 | /* r6+, simulate a generic QEMU machine */ | ||
621 | kvm_write_c0_guest_prid(cop0, 0x00010000); | ||
622 | #endif | ||
623 | /* | ||
624 | * Have config1, Cacheable, noncoherent, write-back, write allocate. | ||
625 | * Endianness, arch revision & virtually tagged icache should match | ||
626 | * host. | ||
627 | */ | ||
628 | config = read_c0_config() & MIPS_CONF_AR; | ||
629 | config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB; | ||
630 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
631 | config |= CONF_BE; | ||
632 | #endif | ||
633 | if (cpu_has_vtag_icache) | ||
634 | config |= MIPS_CONF_VI; | ||
635 | kvm_write_c0_guest_config(cop0, config); | ||
636 | |||
637 | /* Read the cache characteristics from the host Config1 Register */ | ||
638 | config1 = (read_c0_config1() & ~0x7f); | ||
639 | |||
640 | /* DCache line size not correctly reported in Config1 on Octeon CPUs */ | ||
641 | if (cpu_dcache_line_size()) { | ||
642 | config1 &= ~MIPS_CONF1_DL; | ||
643 | config1 |= ((ilog2(cpu_dcache_line_size()) - 1) << | ||
644 | MIPS_CONF1_DL_SHF) & MIPS_CONF1_DL; | ||
645 | } | ||
646 | |||
647 | /* Set up MMU size */ | ||
648 | config1 &= ~(0x3f << 25); | ||
649 | config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25); | ||
650 | |||
651 | /* We unset some bits that we aren't emulating */ | ||
652 | config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC | | ||
653 | MIPS_CONF1_WR | MIPS_CONF1_CA); | ||
654 | kvm_write_c0_guest_config1(cop0, config1); | ||
655 | |||
656 | /* Have config3, no tertiary/secondary caches implemented */ | ||
657 | kvm_write_c0_guest_config2(cop0, MIPS_CONF_M); | ||
658 | /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */ | ||
659 | |||
660 | /* Have config4, UserLocal */ | ||
661 | kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI); | ||
662 | |||
663 | /* Have config5 */ | ||
664 | kvm_write_c0_guest_config4(cop0, MIPS_CONF_M); | ||
665 | |||
666 | /* No config6 */ | ||
667 | kvm_write_c0_guest_config5(cop0, 0); | ||
668 | |||
669 | /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */ | ||
670 | kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10)); | ||
671 | |||
672 | /* Status */ | ||
673 | kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL); | ||
674 | |||
675 | /* | ||
676 | * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5) | ||
677 | */ | ||
678 | kvm_write_c0_guest_intctl(cop0, 0xFC000000); | ||
679 | |||
680 | /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */ | ||
681 | kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | | ||
682 | (vcpu_id & MIPS_EBASE_CPUNUM)); | ||
683 | |||
684 | /* Put PC at guest reset vector */ | ||
685 | vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000); | ||
686 | |||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm) | ||
691 | { | ||
692 | /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */ | ||
693 | kvm_flush_remote_tlbs(kvm); | ||
694 | } | ||
695 | |||
696 | static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm, | ||
697 | const struct kvm_memory_slot *slot) | ||
698 | { | ||
699 | kvm_trap_emul_flush_shadow_all(kvm); | ||
700 | } | ||
701 | |||
702 | static u64 kvm_trap_emul_get_one_regs[] = { | ||
703 | KVM_REG_MIPS_CP0_INDEX, | ||
704 | KVM_REG_MIPS_CP0_ENTRYLO0, | ||
705 | KVM_REG_MIPS_CP0_ENTRYLO1, | ||
706 | KVM_REG_MIPS_CP0_CONTEXT, | ||
707 | KVM_REG_MIPS_CP0_USERLOCAL, | ||
708 | KVM_REG_MIPS_CP0_PAGEMASK, | ||
709 | KVM_REG_MIPS_CP0_WIRED, | ||
710 | KVM_REG_MIPS_CP0_HWRENA, | ||
711 | KVM_REG_MIPS_CP0_BADVADDR, | ||
712 | KVM_REG_MIPS_CP0_COUNT, | ||
713 | KVM_REG_MIPS_CP0_ENTRYHI, | ||
714 | KVM_REG_MIPS_CP0_COMPARE, | ||
715 | KVM_REG_MIPS_CP0_STATUS, | ||
716 | KVM_REG_MIPS_CP0_INTCTL, | ||
717 | KVM_REG_MIPS_CP0_CAUSE, | ||
718 | KVM_REG_MIPS_CP0_EPC, | ||
719 | KVM_REG_MIPS_CP0_PRID, | ||
720 | KVM_REG_MIPS_CP0_EBASE, | ||
721 | KVM_REG_MIPS_CP0_CONFIG, | ||
722 | KVM_REG_MIPS_CP0_CONFIG1, | ||
723 | KVM_REG_MIPS_CP0_CONFIG2, | ||
724 | KVM_REG_MIPS_CP0_CONFIG3, | ||
725 | KVM_REG_MIPS_CP0_CONFIG4, | ||
726 | KVM_REG_MIPS_CP0_CONFIG5, | ||
727 | KVM_REG_MIPS_CP0_CONFIG7, | ||
728 | KVM_REG_MIPS_CP0_ERROREPC, | ||
729 | KVM_REG_MIPS_CP0_KSCRATCH1, | ||
730 | KVM_REG_MIPS_CP0_KSCRATCH2, | ||
731 | KVM_REG_MIPS_CP0_KSCRATCH3, | ||
732 | KVM_REG_MIPS_CP0_KSCRATCH4, | ||
733 | KVM_REG_MIPS_CP0_KSCRATCH5, | ||
734 | KVM_REG_MIPS_CP0_KSCRATCH6, | ||
735 | |||
736 | KVM_REG_MIPS_COUNT_CTL, | ||
737 | KVM_REG_MIPS_COUNT_RESUME, | ||
738 | KVM_REG_MIPS_COUNT_HZ, | ||
739 | }; | ||
740 | |||
741 | static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu) | ||
742 | { | ||
743 | return ARRAY_SIZE(kvm_trap_emul_get_one_regs); | ||
744 | } | ||
745 | |||
746 | static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu, | ||
747 | u64 __user *indices) | ||
748 | { | ||
749 | if (copy_to_user(indices, kvm_trap_emul_get_one_regs, | ||
750 | sizeof(kvm_trap_emul_get_one_regs))) | ||
751 | return -EFAULT; | ||
752 | indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs); | ||
753 | |||
754 | return 0; | ||
755 | } | ||
756 | |||
757 | static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu, | ||
758 | const struct kvm_one_reg *reg, | ||
759 | s64 *v) | ||
760 | { | ||
761 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
762 | |||
763 | switch (reg->id) { | ||
764 | case KVM_REG_MIPS_CP0_INDEX: | ||
765 | *v = (long)kvm_read_c0_guest_index(cop0); | ||
766 | break; | ||
767 | case KVM_REG_MIPS_CP0_ENTRYLO0: | ||
768 | *v = kvm_read_c0_guest_entrylo0(cop0); | ||
769 | break; | ||
770 | case KVM_REG_MIPS_CP0_ENTRYLO1: | ||
771 | *v = kvm_read_c0_guest_entrylo1(cop0); | ||
772 | break; | ||
773 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
774 | *v = (long)kvm_read_c0_guest_context(cop0); | ||
775 | break; | ||
776 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
777 | *v = (long)kvm_read_c0_guest_userlocal(cop0); | ||
778 | break; | ||
779 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
780 | *v = (long)kvm_read_c0_guest_pagemask(cop0); | ||
781 | break; | ||
782 | case KVM_REG_MIPS_CP0_WIRED: | ||
783 | *v = (long)kvm_read_c0_guest_wired(cop0); | ||
784 | break; | ||
785 | case KVM_REG_MIPS_CP0_HWRENA: | ||
786 | *v = (long)kvm_read_c0_guest_hwrena(cop0); | ||
787 | break; | ||
788 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
789 | *v = (long)kvm_read_c0_guest_badvaddr(cop0); | ||
790 | break; | ||
791 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
792 | *v = (long)kvm_read_c0_guest_entryhi(cop0); | ||
793 | break; | ||
794 | case KVM_REG_MIPS_CP0_COMPARE: | ||
795 | *v = (long)kvm_read_c0_guest_compare(cop0); | ||
796 | break; | ||
797 | case KVM_REG_MIPS_CP0_STATUS: | ||
798 | *v = (long)kvm_read_c0_guest_status(cop0); | ||
799 | break; | ||
800 | case KVM_REG_MIPS_CP0_INTCTL: | ||
801 | *v = (long)kvm_read_c0_guest_intctl(cop0); | ||
802 | break; | ||
803 | case KVM_REG_MIPS_CP0_CAUSE: | ||
804 | *v = (long)kvm_read_c0_guest_cause(cop0); | ||
805 | break; | ||
806 | case KVM_REG_MIPS_CP0_EPC: | ||
807 | *v = (long)kvm_read_c0_guest_epc(cop0); | ||
808 | break; | ||
809 | case KVM_REG_MIPS_CP0_PRID: | ||
810 | *v = (long)kvm_read_c0_guest_prid(cop0); | ||
811 | break; | ||
812 | case KVM_REG_MIPS_CP0_EBASE: | ||
813 | *v = (long)kvm_read_c0_guest_ebase(cop0); | ||
814 | break; | ||
815 | case KVM_REG_MIPS_CP0_CONFIG: | ||
816 | *v = (long)kvm_read_c0_guest_config(cop0); | ||
817 | break; | ||
818 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
819 | *v = (long)kvm_read_c0_guest_config1(cop0); | ||
820 | break; | ||
821 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
822 | *v = (long)kvm_read_c0_guest_config2(cop0); | ||
823 | break; | ||
824 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
825 | *v = (long)kvm_read_c0_guest_config3(cop0); | ||
826 | break; | ||
827 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
828 | *v = (long)kvm_read_c0_guest_config4(cop0); | ||
829 | break; | ||
830 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
831 | *v = (long)kvm_read_c0_guest_config5(cop0); | ||
832 | break; | ||
833 | case KVM_REG_MIPS_CP0_CONFIG7: | ||
834 | *v = (long)kvm_read_c0_guest_config7(cop0); | ||
835 | break; | ||
836 | case KVM_REG_MIPS_CP0_COUNT: | ||
837 | *v = kvm_mips_read_count(vcpu); | ||
838 | break; | ||
839 | case KVM_REG_MIPS_COUNT_CTL: | ||
840 | *v = vcpu->arch.count_ctl; | ||
841 | break; | ||
842 | case KVM_REG_MIPS_COUNT_RESUME: | ||
843 | *v = ktime_to_ns(vcpu->arch.count_resume); | ||
844 | break; | ||
845 | case KVM_REG_MIPS_COUNT_HZ: | ||
846 | *v = vcpu->arch.count_hz; | ||
847 | break; | ||
848 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
849 | *v = (long)kvm_read_c0_guest_errorepc(cop0); | ||
850 | break; | ||
851 | case KVM_REG_MIPS_CP0_KSCRATCH1: | ||
852 | *v = (long)kvm_read_c0_guest_kscratch1(cop0); | ||
853 | break; | ||
854 | case KVM_REG_MIPS_CP0_KSCRATCH2: | ||
855 | *v = (long)kvm_read_c0_guest_kscratch2(cop0); | ||
856 | break; | ||
857 | case KVM_REG_MIPS_CP0_KSCRATCH3: | ||
858 | *v = (long)kvm_read_c0_guest_kscratch3(cop0); | ||
859 | break; | ||
860 | case KVM_REG_MIPS_CP0_KSCRATCH4: | ||
861 | *v = (long)kvm_read_c0_guest_kscratch4(cop0); | ||
862 | break; | ||
863 | case KVM_REG_MIPS_CP0_KSCRATCH5: | ||
864 | *v = (long)kvm_read_c0_guest_kscratch5(cop0); | ||
865 | break; | ||
866 | case KVM_REG_MIPS_CP0_KSCRATCH6: | ||
867 | *v = (long)kvm_read_c0_guest_kscratch6(cop0); | ||
868 | break; | ||
869 | default: | ||
870 | return -EINVAL; | ||
871 | } | ||
872 | return 0; | ||
873 | } | ||
874 | |||
875 | static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu, | ||
876 | const struct kvm_one_reg *reg, | ||
877 | s64 v) | ||
878 | { | ||
879 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
880 | int ret = 0; | ||
881 | unsigned int cur, change; | ||
882 | |||
883 | switch (reg->id) { | ||
884 | case KVM_REG_MIPS_CP0_INDEX: | ||
885 | kvm_write_c0_guest_index(cop0, v); | ||
886 | break; | ||
887 | case KVM_REG_MIPS_CP0_ENTRYLO0: | ||
888 | kvm_write_c0_guest_entrylo0(cop0, v); | ||
889 | break; | ||
890 | case KVM_REG_MIPS_CP0_ENTRYLO1: | ||
891 | kvm_write_c0_guest_entrylo1(cop0, v); | ||
892 | break; | ||
893 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
894 | kvm_write_c0_guest_context(cop0, v); | ||
895 | break; | ||
896 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
897 | kvm_write_c0_guest_userlocal(cop0, v); | ||
898 | break; | ||
899 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
900 | kvm_write_c0_guest_pagemask(cop0, v); | ||
901 | break; | ||
902 | case KVM_REG_MIPS_CP0_WIRED: | ||
903 | kvm_write_c0_guest_wired(cop0, v); | ||
904 | break; | ||
905 | case KVM_REG_MIPS_CP0_HWRENA: | ||
906 | kvm_write_c0_guest_hwrena(cop0, v); | ||
907 | break; | ||
908 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
909 | kvm_write_c0_guest_badvaddr(cop0, v); | ||
910 | break; | ||
911 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
912 | kvm_write_c0_guest_entryhi(cop0, v); | ||
913 | break; | ||
914 | case KVM_REG_MIPS_CP0_STATUS: | ||
915 | kvm_write_c0_guest_status(cop0, v); | ||
916 | break; | ||
917 | case KVM_REG_MIPS_CP0_INTCTL: | ||
918 | /* No VInt, so no VS, read-only for now */ | ||
919 | break; | ||
920 | case KVM_REG_MIPS_CP0_EPC: | ||
921 | kvm_write_c0_guest_epc(cop0, v); | ||
922 | break; | ||
923 | case KVM_REG_MIPS_CP0_PRID: | ||
924 | kvm_write_c0_guest_prid(cop0, v); | ||
925 | break; | ||
926 | case KVM_REG_MIPS_CP0_EBASE: | ||
927 | /* | ||
928 | * Allow core number to be written, but the exception base must | ||
929 | * remain in guest KSeg0. | ||
930 | */ | ||
931 | kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM, | ||
932 | v); | ||
933 | break; | ||
934 | case KVM_REG_MIPS_CP0_COUNT: | ||
935 | kvm_mips_write_count(vcpu, v); | ||
936 | break; | ||
937 | case KVM_REG_MIPS_CP0_COMPARE: | ||
938 | kvm_mips_write_compare(vcpu, v, false); | ||
939 | break; | ||
940 | case KVM_REG_MIPS_CP0_CAUSE: | ||
941 | /* | ||
942 | * If the timer is stopped or started (DC bit) it must look | ||
943 | * atomic with changes to the interrupt pending bits (TI, IRQ5). | ||
944 | * A timer interrupt should not happen in between. | ||
945 | */ | ||
946 | if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) { | ||
947 | if (v & CAUSEF_DC) { | ||
948 | /* disable timer first */ | ||
949 | kvm_mips_count_disable_cause(vcpu); | ||
950 | kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, | ||
951 | v); | ||
952 | } else { | ||
953 | /* enable timer last */ | ||
954 | kvm_change_c0_guest_cause(cop0, (u32)~CAUSEF_DC, | ||
955 | v); | ||
956 | kvm_mips_count_enable_cause(vcpu); | ||
957 | } | ||
958 | } else { | ||
959 | kvm_write_c0_guest_cause(cop0, v); | ||
960 | } | ||
961 | break; | ||
962 | case KVM_REG_MIPS_CP0_CONFIG: | ||
963 | /* read-only for now */ | ||
964 | break; | ||
965 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
966 | cur = kvm_read_c0_guest_config1(cop0); | ||
967 | change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu); | ||
968 | if (change) { | ||
969 | v = cur ^ change; | ||
970 | kvm_write_c0_guest_config1(cop0, v); | ||
971 | } | ||
972 | break; | ||
973 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
974 | /* read-only for now */ | ||
975 | break; | ||
976 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
977 | cur = kvm_read_c0_guest_config3(cop0); | ||
978 | change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu); | ||
979 | if (change) { | ||
980 | v = cur ^ change; | ||
981 | kvm_write_c0_guest_config3(cop0, v); | ||
982 | } | ||
983 | break; | ||
984 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
985 | cur = kvm_read_c0_guest_config4(cop0); | ||
986 | change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu); | ||
987 | if (change) { | ||
988 | v = cur ^ change; | ||
989 | kvm_write_c0_guest_config4(cop0, v); | ||
990 | } | ||
991 | break; | ||
992 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
993 | cur = kvm_read_c0_guest_config5(cop0); | ||
994 | change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu); | ||
995 | if (change) { | ||
996 | v = cur ^ change; | ||
997 | kvm_write_c0_guest_config5(cop0, v); | ||
998 | } | ||
999 | break; | ||
1000 | case KVM_REG_MIPS_CP0_CONFIG7: | ||
1001 | /* writes ignored */ | ||
1002 | break; | ||
1003 | case KVM_REG_MIPS_COUNT_CTL: | ||
1004 | ret = kvm_mips_set_count_ctl(vcpu, v); | ||
1005 | break; | ||
1006 | case KVM_REG_MIPS_COUNT_RESUME: | ||
1007 | ret = kvm_mips_set_count_resume(vcpu, v); | ||
1008 | break; | ||
1009 | case KVM_REG_MIPS_COUNT_HZ: | ||
1010 | ret = kvm_mips_set_count_hz(vcpu, v); | ||
1011 | break; | ||
1012 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
1013 | kvm_write_c0_guest_errorepc(cop0, v); | ||
1014 | break; | ||
1015 | case KVM_REG_MIPS_CP0_KSCRATCH1: | ||
1016 | kvm_write_c0_guest_kscratch1(cop0, v); | ||
1017 | break; | ||
1018 | case KVM_REG_MIPS_CP0_KSCRATCH2: | ||
1019 | kvm_write_c0_guest_kscratch2(cop0, v); | ||
1020 | break; | ||
1021 | case KVM_REG_MIPS_CP0_KSCRATCH3: | ||
1022 | kvm_write_c0_guest_kscratch3(cop0, v); | ||
1023 | break; | ||
1024 | case KVM_REG_MIPS_CP0_KSCRATCH4: | ||
1025 | kvm_write_c0_guest_kscratch4(cop0, v); | ||
1026 | break; | ||
1027 | case KVM_REG_MIPS_CP0_KSCRATCH5: | ||
1028 | kvm_write_c0_guest_kscratch5(cop0, v); | ||
1029 | break; | ||
1030 | case KVM_REG_MIPS_CP0_KSCRATCH6: | ||
1031 | kvm_write_c0_guest_kscratch6(cop0, v); | ||
1032 | break; | ||
1033 | default: | ||
1034 | return -EINVAL; | ||
1035 | } | ||
1036 | return ret; | ||
1037 | } | ||
1038 | |||
1039 | static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
1040 | { | ||
1041 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
1042 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
1043 | struct mm_struct *mm; | ||
1044 | |||
1045 | /* | ||
1046 | * Were we in guest context? If so, restore the appropriate ASID based | ||
1047 | * on the mode of the Guest (Kernel/User). | ||
1048 | */ | ||
1049 | if (current->flags & PF_VCPU) { | ||
1050 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; | ||
1051 | check_switch_mmu_context(mm); | ||
1052 | kvm_mips_suspend_mm(cpu); | ||
1053 | ehb(); | ||
1054 | } | ||
1055 | |||
1056 | return 0; | ||
1057 | } | ||
1058 | |||
1059 | static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu) | ||
1060 | { | ||
1061 | kvm_lose_fpu(vcpu); | ||
1062 | |||
1063 | if (current->flags & PF_VCPU) { | ||
1064 | /* Restore normal Linux process memory map */ | ||
1065 | check_switch_mmu_context(current->mm); | ||
1066 | kvm_mips_resume_mm(cpu); | ||
1067 | ehb(); | ||
1068 | } | ||
1069 | |||
1070 | return 0; | ||
1071 | } | ||
1072 | |||
1073 | static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu, | ||
1074 | bool reload_asid) | ||
1075 | { | ||
1076 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
1077 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
1078 | struct mm_struct *mm; | ||
1079 | int i; | ||
1080 | |||
1081 | if (likely(!kvm_request_pending(vcpu))) | ||
1082 | return; | ||
1083 | |||
1084 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { | ||
1085 | /* | ||
1086 | * Both kernel & user GVA mappings must be invalidated. The | ||
1087 | * caller is just about to check whether the ASID is stale | ||
1088 | * anyway so no need to reload it here. | ||
1089 | */ | ||
1090 | kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN); | ||
1091 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER); | ||
1092 | for_each_possible_cpu(i) { | ||
1093 | set_cpu_context(i, kern_mm, 0); | ||
1094 | set_cpu_context(i, user_mm, 0); | ||
1095 | } | ||
1096 | |||
1097 | /* Generate new ASID for current mode */ | ||
1098 | if (reload_asid) { | ||
1099 | mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm; | ||
1100 | get_new_mmu_context(mm); | ||
1101 | htw_stop(); | ||
1102 | write_c0_entryhi(cpu_asid(cpu, mm)); | ||
1103 | TLBMISS_HANDLER_SETUP_PGD(mm->pgd); | ||
1104 | htw_start(); | ||
1105 | } | ||
1106 | } | ||
1107 | } | ||
1108 | |||
1109 | /** | ||
1110 | * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space. | ||
1111 | * @vcpu: VCPU pointer. | ||
1112 | * | ||
1113 | * Call before a GVA space access outside of guest mode, to ensure that | ||
1114 | * asynchronous TLB flush requests are handled or delayed until completion of | ||
1115 | * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()). | ||
1116 | * | ||
1117 | * Should be called with IRQs already enabled. | ||
1118 | */ | ||
1119 | void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu) | ||
1120 | { | ||
1121 | /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */ | ||
1122 | WARN_ON_ONCE(irqs_disabled()); | ||
1123 | |||
1124 | /* | ||
1125 | * The caller is about to access the GVA space, so we set the mode to | ||
1126 | * force TLB flush requests to send an IPI, and also disable IRQs to | ||
1127 | * delay IPI handling until kvm_trap_emul_gva_lockless_end(). | ||
1128 | */ | ||
1129 | local_irq_disable(); | ||
1130 | |||
1131 | /* | ||
1132 | * Make sure the read of VCPU requests is not reordered ahead of the | ||
1133 | * write to vcpu->mode, or we could miss a TLB flush request while | ||
1134 | * the requester sees the VCPU as outside of guest mode and not needing | ||
1135 | * an IPI. | ||
1136 | */ | ||
1137 | smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES); | ||
1138 | |||
1139 | /* | ||
1140 | * If a TLB flush has been requested (potentially while | ||
1141 | * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it | ||
1142 | * before accessing the GVA space, and be sure to reload the ASID if | ||
1143 | * necessary as it'll be immediately used. | ||
1144 | * | ||
1145 | * TLB flush requests after this check will trigger an IPI due to the | ||
1146 | * mode change above, which will be delayed due to IRQs disabled. | ||
1147 | */ | ||
1148 | kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true); | ||
1149 | } | ||
1150 | |||
1151 | /** | ||
1152 | * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space. | ||
1153 | * @vcpu: VCPU pointer. | ||
1154 | * | ||
1155 | * Called after a GVA space access outside of guest mode. Should have a matching | ||
1156 | * call to kvm_trap_emul_gva_lockless_begin(). | ||
1157 | */ | ||
1158 | void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu) | ||
1159 | { | ||
1160 | /* | ||
1161 | * Make sure the write to vcpu->mode is not reordered in front of GVA | ||
1162 | * accesses, or a TLB flush requester may not think it necessary to send | ||
1163 | * an IPI. | ||
1164 | */ | ||
1165 | smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE); | ||
1166 | |||
1167 | /* | ||
1168 | * Now that the access to GVA space is complete, its safe for pending | ||
1169 | * TLB flush request IPIs to be handled (which indicates completion). | ||
1170 | */ | ||
1171 | local_irq_enable(); | ||
1172 | } | ||
1173 | |||
1174 | static void kvm_trap_emul_vcpu_reenter(struct kvm_vcpu *vcpu) | ||
1175 | { | ||
1176 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; | ||
1177 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; | ||
1178 | struct mm_struct *mm; | ||
1179 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1180 | int i, cpu = smp_processor_id(); | ||
1181 | unsigned int gasid; | ||
1182 | |||
1183 | /* | ||
1184 | * No need to reload ASID, IRQs are disabled already so there's no rush, | ||
1185 | * and we'll check if we need to regenerate below anyway before | ||
1186 | * re-entering the guest. | ||
1187 | */ | ||
1188 | kvm_trap_emul_check_requests(vcpu, cpu, false); | ||
1189 | |||
1190 | if (KVM_GUEST_KERNEL_MODE(vcpu)) { | ||
1191 | mm = kern_mm; | ||
1192 | } else { | ||
1193 | mm = user_mm; | ||
1194 | |||
1195 | /* | ||
1196 | * Lazy host ASID regeneration / PT flush for guest user mode. | ||
1197 | * If the guest ASID has changed since the last guest usermode | ||
1198 | * execution, invalidate the stale TLB entries and flush GVA PT | ||
1199 | * entries too. | ||
1200 | */ | ||
1201 | gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID; | ||
1202 | if (gasid != vcpu->arch.last_user_gasid) { | ||
1203 | kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER); | ||
1204 | for_each_possible_cpu(i) | ||
1205 | set_cpu_context(i, user_mm, 0); | ||
1206 | vcpu->arch.last_user_gasid = gasid; | ||
1207 | } | ||
1208 | } | ||
1209 | |||
1210 | /* | ||
1211 | * Check if ASID is stale. This may happen due to a TLB flush request or | ||
1212 | * a lazy user MM invalidation. | ||
1213 | */ | ||
1214 | check_mmu_context(mm); | ||
1215 | } | ||
1216 | |||
1217 | static int kvm_trap_emul_vcpu_run(struct kvm_vcpu *vcpu) | ||
1218 | { | ||
1219 | int cpu = smp_processor_id(); | ||
1220 | int r; | ||
1221 | |||
1222 | /* Check if we have any exceptions/interrupts pending */ | ||
1223 | kvm_mips_deliver_interrupts(vcpu, | ||
1224 | kvm_read_c0_guest_cause(vcpu->arch.cop0)); | ||
1225 | |||
1226 | kvm_trap_emul_vcpu_reenter(vcpu); | ||
1227 | |||
1228 | /* | ||
1229 | * We use user accessors to access guest memory, but we don't want to | ||
1230 | * invoke Linux page faulting. | ||
1231 | */ | ||
1232 | pagefault_disable(); | ||
1233 | |||
1234 | /* Disable hardware page table walking while in guest */ | ||
1235 | htw_stop(); | ||
1236 | |||
1237 | /* | ||
1238 | * While in guest context we're in the guest's address space, not the | ||
1239 | * host process address space, so we need to be careful not to confuse | ||
1240 | * e.g. cache management IPIs. | ||
1241 | */ | ||
1242 | kvm_mips_suspend_mm(cpu); | ||
1243 | |||
1244 | r = vcpu->arch.vcpu_run(vcpu); | ||
1245 | |||
1246 | /* We may have migrated while handling guest exits */ | ||
1247 | cpu = smp_processor_id(); | ||
1248 | |||
1249 | /* Restore normal Linux process memory map */ | ||
1250 | check_switch_mmu_context(current->mm); | ||
1251 | kvm_mips_resume_mm(cpu); | ||
1252 | |||
1253 | htw_start(); | ||
1254 | |||
1255 | pagefault_enable(); | ||
1256 | |||
1257 | return r; | ||
1258 | } | ||
1259 | |||
1260 | static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { | ||
1261 | /* exit handlers */ | ||
1262 | .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable, | ||
1263 | .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod, | ||
1264 | .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss, | ||
1265 | .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss, | ||
1266 | .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st, | ||
1267 | .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld, | ||
1268 | .handle_syscall = kvm_trap_emul_handle_syscall, | ||
1269 | .handle_res_inst = kvm_trap_emul_handle_res_inst, | ||
1270 | .handle_break = kvm_trap_emul_handle_break, | ||
1271 | .handle_trap = kvm_trap_emul_handle_trap, | ||
1272 | .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe, | ||
1273 | .handle_fpe = kvm_trap_emul_handle_fpe, | ||
1274 | .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled, | ||
1275 | .handle_guest_exit = kvm_trap_emul_no_handler, | ||
1276 | |||
1277 | .hardware_enable = kvm_trap_emul_hardware_enable, | ||
1278 | .hardware_disable = kvm_trap_emul_hardware_disable, | ||
1279 | .check_extension = kvm_trap_emul_check_extension, | ||
1280 | .vcpu_init = kvm_trap_emul_vcpu_init, | ||
1281 | .vcpu_uninit = kvm_trap_emul_vcpu_uninit, | ||
1282 | .vcpu_setup = kvm_trap_emul_vcpu_setup, | ||
1283 | .flush_shadow_all = kvm_trap_emul_flush_shadow_all, | ||
1284 | .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot, | ||
1285 | .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb, | ||
1286 | .queue_timer_int = kvm_mips_queue_timer_int_cb, | ||
1287 | .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb, | ||
1288 | .queue_io_int = kvm_mips_queue_io_int_cb, | ||
1289 | .dequeue_io_int = kvm_mips_dequeue_io_int_cb, | ||
1290 | .irq_deliver = kvm_mips_irq_deliver_cb, | ||
1291 | .irq_clear = kvm_mips_irq_clear_cb, | ||
1292 | .num_regs = kvm_trap_emul_num_regs, | ||
1293 | .copy_reg_indices = kvm_trap_emul_copy_reg_indices, | ||
1294 | .get_one_reg = kvm_trap_emul_get_one_reg, | ||
1295 | .set_one_reg = kvm_trap_emul_set_one_reg, | ||
1296 | .vcpu_load = kvm_trap_emul_vcpu_load, | ||
1297 | .vcpu_put = kvm_trap_emul_vcpu_put, | ||
1298 | .vcpu_run = kvm_trap_emul_vcpu_run, | ||
1299 | .vcpu_reenter = kvm_trap_emul_vcpu_reenter, | ||
1300 | }; | ||
1301 | |||
1302 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | ||
1303 | { | ||
1304 | *install_callbacks = &kvm_trap_emul_callbacks; | ||
1305 | return 0; | ||
1306 | } | ||
diff --git a/arch/mips/kvm/vz.c b/arch/mips/kvm/vz.c new file mode 100644 index 000000000..2ffbe9264 --- /dev/null +++ b/arch/mips/kvm/vz.c | |||
@@ -0,0 +1,3331 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: Support for hardware virtualization extensions | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Yann Le Du <ledu@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/errno.h> | ||
13 | #include <linux/err.h> | ||
14 | #include <linux/module.h> | ||
15 | #include <linux/preempt.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <asm/cacheflush.h> | ||
18 | #include <asm/cacheops.h> | ||
19 | #include <asm/cmpxchg.h> | ||
20 | #include <asm/fpu.h> | ||
21 | #include <asm/hazards.h> | ||
22 | #include <asm/inst.h> | ||
23 | #include <asm/mmu_context.h> | ||
24 | #include <asm/r4kcache.h> | ||
25 | #include <asm/time.h> | ||
26 | #include <asm/tlb.h> | ||
27 | #include <asm/tlbex.h> | ||
28 | |||
29 | #include <linux/kvm_host.h> | ||
30 | |||
31 | #include "interrupt.h" | ||
32 | #ifdef CONFIG_CPU_LOONGSON64 | ||
33 | #include "loongson_regs.h" | ||
34 | #endif | ||
35 | |||
36 | #include "trace.h" | ||
37 | |||
38 | /* Pointers to last VCPU loaded on each physical CPU */ | ||
39 | static struct kvm_vcpu *last_vcpu[NR_CPUS]; | ||
40 | /* Pointers to last VCPU executed on each physical CPU */ | ||
41 | static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; | ||
42 | |||
43 | /* | ||
44 | * Number of guest VTLB entries to use, so we can catch inconsistency between | ||
45 | * CPUs. | ||
46 | */ | ||
47 | static unsigned int kvm_vz_guest_vtlb_size; | ||
48 | |||
49 | static inline long kvm_vz_read_gc0_ebase(void) | ||
50 | { | ||
51 | if (sizeof(long) == 8 && cpu_has_ebase_wg) | ||
52 | return read_gc0_ebase_64(); | ||
53 | else | ||
54 | return read_gc0_ebase(); | ||
55 | } | ||
56 | |||
57 | static inline void kvm_vz_write_gc0_ebase(long v) | ||
58 | { | ||
59 | /* | ||
60 | * First write with WG=1 to write upper bits, then write again in case | ||
61 | * WG should be left at 0. | ||
62 | * write_gc0_ebase_64() is no longer UNDEFINED since R6. | ||
63 | */ | ||
64 | if (sizeof(long) == 8 && | ||
65 | (cpu_has_mips64r6 || cpu_has_ebase_wg)) { | ||
66 | write_gc0_ebase_64(v | MIPS_EBASE_WG); | ||
67 | write_gc0_ebase_64(v); | ||
68 | } else { | ||
69 | write_gc0_ebase(v | MIPS_EBASE_WG); | ||
70 | write_gc0_ebase(v); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * These Config bits may be writable by the guest: | ||
76 | * Config: [K23, KU] (!TLB), K0 | ||
77 | * Config1: (none) | ||
78 | * Config2: [TU, SU] (impl) | ||
79 | * Config3: ISAOnExc | ||
80 | * Config4: FTLBPageSize | ||
81 | * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR | ||
82 | */ | ||
83 | |||
84 | static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu) | ||
85 | { | ||
86 | return CONF_CM_CMASK; | ||
87 | } | ||
88 | |||
89 | static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu) | ||
90 | { | ||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu) | ||
95 | { | ||
96 | return 0; | ||
97 | } | ||
98 | |||
99 | static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu) | ||
100 | { | ||
101 | return MIPS_CONF3_ISA_OE; | ||
102 | } | ||
103 | |||
104 | static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu) | ||
105 | { | ||
106 | /* no need to be exact */ | ||
107 | return MIPS_CONF4_VFTLBPAGESIZE; | ||
108 | } | ||
109 | |||
110 | static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) | ||
111 | { | ||
112 | unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI; | ||
113 | |||
114 | /* Permit MSAEn changes if MSA supported and enabled */ | ||
115 | if (kvm_mips_guest_has_msa(&vcpu->arch)) | ||
116 | mask |= MIPS_CONF5_MSAEN; | ||
117 | |||
118 | /* | ||
119 | * Permit guest FPU mode changes if FPU is enabled and the relevant | ||
120 | * feature exists according to FIR register. | ||
121 | */ | ||
122 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | ||
123 | if (cpu_has_ufr) | ||
124 | mask |= MIPS_CONF5_UFR; | ||
125 | if (cpu_has_fre) | ||
126 | mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE; | ||
127 | } | ||
128 | |||
129 | return mask; | ||
130 | } | ||
131 | |||
132 | static inline unsigned int kvm_vz_config6_guest_wrmask(struct kvm_vcpu *vcpu) | ||
133 | { | ||
134 | return LOONGSON_CONF6_INTIMER | LOONGSON_CONF6_EXTIMER; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * VZ optionally allows these additional Config bits to be written by root: | ||
139 | * Config: M, [MT] | ||
140 | * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP | ||
141 | * Config2: M | ||
142 | * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC, | ||
143 | * VInt, SP, CDMM, MT, SM, TL] | ||
144 | * Config4: M, [VTLBSizeExt, MMUSizeExt] | ||
145 | * Config5: MRP | ||
146 | */ | ||
147 | |||
148 | static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu) | ||
149 | { | ||
150 | return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M; | ||
151 | } | ||
152 | |||
153 | static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu) | ||
154 | { | ||
155 | unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M; | ||
156 | |||
157 | /* Permit FPU to be present if FPU is supported */ | ||
158 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) | ||
159 | mask |= MIPS_CONF1_FP; | ||
160 | |||
161 | return mask; | ||
162 | } | ||
163 | |||
164 | static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) | ||
165 | { | ||
166 | return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M; | ||
167 | } | ||
168 | |||
169 | static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) | ||
170 | { | ||
171 | unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | | ||
172 | MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC; | ||
173 | |||
174 | /* Permit MSA to be present if MSA is supported */ | ||
175 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | ||
176 | mask |= MIPS_CONF3_MSA; | ||
177 | |||
178 | return mask; | ||
179 | } | ||
180 | |||
181 | static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu) | ||
182 | { | ||
183 | return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M; | ||
184 | } | ||
185 | |||
186 | static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) | ||
187 | { | ||
188 | return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP; | ||
189 | } | ||
190 | |||
191 | static inline unsigned int kvm_vz_config6_user_wrmask(struct kvm_vcpu *vcpu) | ||
192 | { | ||
193 | return kvm_vz_config6_guest_wrmask(vcpu) | | ||
194 | LOONGSON_CONF6_SFBEN | LOONGSON_CONF6_FTLBDIS; | ||
195 | } | ||
196 | |||
197 | static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) | ||
198 | { | ||
199 | /* VZ guest has already converted gva to gpa */ | ||
200 | return gva; | ||
201 | } | ||
202 | |||
203 | static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) | ||
204 | { | ||
205 | set_bit(priority, &vcpu->arch.pending_exceptions); | ||
206 | clear_bit(priority, &vcpu->arch.pending_exceptions_clr); | ||
207 | } | ||
208 | |||
209 | static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) | ||
210 | { | ||
211 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
212 | set_bit(priority, &vcpu->arch.pending_exceptions_clr); | ||
213 | } | ||
214 | |||
215 | static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu) | ||
216 | { | ||
217 | /* | ||
218 | * timer expiry is asynchronous to vcpu execution therefore defer guest | ||
219 | * cp0 accesses | ||
220 | */ | ||
221 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); | ||
222 | } | ||
223 | |||
224 | static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) | ||
225 | { | ||
226 | /* | ||
227 | * timer expiry is asynchronous to vcpu execution therefore defer guest | ||
228 | * cp0 accesses | ||
229 | */ | ||
230 | kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); | ||
231 | } | ||
232 | |||
233 | static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, | ||
234 | struct kvm_mips_interrupt *irq) | ||
235 | { | ||
236 | int intr = (int)irq->irq; | ||
237 | |||
238 | /* | ||
239 | * interrupts are asynchronous to vcpu execution therefore defer guest | ||
240 | * cp0 accesses | ||
241 | */ | ||
242 | kvm_vz_queue_irq(vcpu, kvm_irq_to_priority(intr)); | ||
243 | } | ||
244 | |||
245 | static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, | ||
246 | struct kvm_mips_interrupt *irq) | ||
247 | { | ||
248 | int intr = (int)irq->irq; | ||
249 | |||
250 | /* | ||
251 | * interrupts are asynchronous to vcpu execution therefore defer guest | ||
252 | * cp0 accesses | ||
253 | */ | ||
254 | kvm_vz_dequeue_irq(vcpu, kvm_irq_to_priority(-intr)); | ||
255 | } | ||
256 | |||
257 | static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, | ||
258 | u32 cause) | ||
259 | { | ||
260 | u32 irq = (priority < MIPS_EXC_MAX) ? | ||
261 | kvm_priority_to_irq[priority] : 0; | ||
262 | |||
263 | switch (priority) { | ||
264 | case MIPS_EXC_INT_TIMER: | ||
265 | set_gc0_cause(C_TI); | ||
266 | break; | ||
267 | |||
268 | case MIPS_EXC_INT_IO_1: | ||
269 | case MIPS_EXC_INT_IO_2: | ||
270 | case MIPS_EXC_INT_IPI_1: | ||
271 | case MIPS_EXC_INT_IPI_2: | ||
272 | if (cpu_has_guestctl2) | ||
273 | set_c0_guestctl2(irq); | ||
274 | else | ||
275 | set_gc0_cause(irq); | ||
276 | break; | ||
277 | |||
278 | default: | ||
279 | break; | ||
280 | } | ||
281 | |||
282 | clear_bit(priority, &vcpu->arch.pending_exceptions); | ||
283 | return 1; | ||
284 | } | ||
285 | |||
286 | static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, | ||
287 | u32 cause) | ||
288 | { | ||
289 | u32 irq = (priority < MIPS_EXC_MAX) ? | ||
290 | kvm_priority_to_irq[priority] : 0; | ||
291 | |||
292 | switch (priority) { | ||
293 | case MIPS_EXC_INT_TIMER: | ||
294 | /* | ||
295 | * Call to kvm_write_c0_guest_compare() clears Cause.TI in | ||
296 | * kvm_mips_emulate_CP0(). Explicitly clear irq associated with | ||
297 | * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not | ||
298 | * supported or if not using GuestCtl2 Hardware Clear. | ||
299 | */ | ||
300 | if (cpu_has_guestctl2) { | ||
301 | if (!(read_c0_guestctl2() & (irq << 14))) | ||
302 | clear_c0_guestctl2(irq); | ||
303 | } else { | ||
304 | clear_gc0_cause(irq); | ||
305 | } | ||
306 | break; | ||
307 | |||
308 | case MIPS_EXC_INT_IO_1: | ||
309 | case MIPS_EXC_INT_IO_2: | ||
310 | case MIPS_EXC_INT_IPI_1: | ||
311 | case MIPS_EXC_INT_IPI_2: | ||
312 | /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ | ||
313 | if (cpu_has_guestctl2) { | ||
314 | if (!(read_c0_guestctl2() & (irq << 14))) | ||
315 | clear_c0_guestctl2(irq); | ||
316 | } else { | ||
317 | clear_gc0_cause(irq); | ||
318 | } | ||
319 | break; | ||
320 | |||
321 | default: | ||
322 | break; | ||
323 | } | ||
324 | |||
325 | clear_bit(priority, &vcpu->arch.pending_exceptions_clr); | ||
326 | return 1; | ||
327 | } | ||
328 | |||
329 | /* | ||
330 | * VZ guest timer handling. | ||
331 | */ | ||
332 | |||
333 | /** | ||
334 | * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer. | ||
335 | * @vcpu: Virtual CPU. | ||
336 | * | ||
337 | * Returns: true if the VZ GTOffset & real guest CP0_Count should be used | ||
338 | * instead of software emulation of guest timer. | ||
339 | * false otherwise. | ||
340 | */ | ||
341 | static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu) | ||
342 | { | ||
343 | if (kvm_mips_count_disabled(vcpu)) | ||
344 | return false; | ||
345 | |||
346 | /* Chosen frequency must match real frequency */ | ||
347 | if (mips_hpt_frequency != vcpu->arch.count_hz) | ||
348 | return false; | ||
349 | |||
350 | /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */ | ||
351 | if (current_cpu_data.gtoffset_mask != 0xffffffff) | ||
352 | return false; | ||
353 | |||
354 | return true; | ||
355 | } | ||
356 | |||
357 | /** | ||
358 | * _kvm_vz_restore_stimer() - Restore soft timer state. | ||
359 | * @vcpu: Virtual CPU. | ||
360 | * @compare: CP0_Compare register value, restored by caller. | ||
361 | * @cause: CP0_Cause register to restore. | ||
362 | * | ||
363 | * Restore VZ state relating to the soft timer. The hard timer can be enabled | ||
364 | * later. | ||
365 | */ | ||
366 | static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare, | ||
367 | u32 cause) | ||
368 | { | ||
369 | /* | ||
370 | * Avoid spurious counter interrupts by setting Guest CP0_Count to just | ||
371 | * after Guest CP0_Compare. | ||
372 | */ | ||
373 | write_c0_gtoffset(compare - read_c0_count()); | ||
374 | |||
375 | back_to_back_c0_hazard(); | ||
376 | write_gc0_cause(cause); | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * _kvm_vz_restore_htimer() - Restore hard timer state. | ||
381 | * @vcpu: Virtual CPU. | ||
382 | * @compare: CP0_Compare register value, restored by caller. | ||
383 | * @cause: CP0_Cause register to restore. | ||
384 | * | ||
385 | * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the | ||
386 | * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause. | ||
387 | */ | ||
388 | static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu, | ||
389 | u32 compare, u32 cause) | ||
390 | { | ||
391 | u32 start_count, after_count; | ||
392 | ktime_t freeze_time; | ||
393 | unsigned long flags; | ||
394 | |||
395 | /* | ||
396 | * Freeze the soft-timer and sync the guest CP0_Count with it. We do | ||
397 | * this with interrupts disabled to avoid latency. | ||
398 | */ | ||
399 | local_irq_save(flags); | ||
400 | freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count); | ||
401 | write_c0_gtoffset(start_count - read_c0_count()); | ||
402 | local_irq_restore(flags); | ||
403 | |||
404 | /* restore guest CP0_Cause, as TI may already be set */ | ||
405 | back_to_back_c0_hazard(); | ||
406 | write_gc0_cause(cause); | ||
407 | |||
408 | /* | ||
409 | * The above sequence isn't atomic and would result in lost timer | ||
410 | * interrupts if we're not careful. Detect if a timer interrupt is due | ||
411 | * and assert it. | ||
412 | */ | ||
413 | back_to_back_c0_hazard(); | ||
414 | after_count = read_gc0_count(); | ||
415 | if (after_count - start_count > compare - start_count - 1) | ||
416 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); | ||
417 | } | ||
418 | |||
419 | /** | ||
420 | * kvm_vz_restore_timer() - Restore timer state. | ||
421 | * @vcpu: Virtual CPU. | ||
422 | * | ||
423 | * Restore soft timer state from saved context. | ||
424 | */ | ||
425 | static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) | ||
426 | { | ||
427 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
428 | u32 cause, compare; | ||
429 | |||
430 | compare = kvm_read_sw_gc0_compare(cop0); | ||
431 | cause = kvm_read_sw_gc0_cause(cop0); | ||
432 | |||
433 | write_gc0_compare(compare); | ||
434 | _kvm_vz_restore_stimer(vcpu, compare, cause); | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * kvm_vz_acquire_htimer() - Switch to hard timer state. | ||
439 | * @vcpu: Virtual CPU. | ||
440 | * | ||
441 | * Restore hard timer state on top of existing soft timer state if possible. | ||
442 | * | ||
443 | * Since hard timer won't remain active over preemption, preemption should be | ||
444 | * disabled by the caller. | ||
445 | */ | ||
446 | void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) | ||
447 | { | ||
448 | u32 gctl0; | ||
449 | |||
450 | gctl0 = read_c0_guestctl0(); | ||
451 | if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) { | ||
452 | /* enable guest access to hard timer */ | ||
453 | write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT); | ||
454 | |||
455 | _kvm_vz_restore_htimer(vcpu, read_gc0_compare(), | ||
456 | read_gc0_cause()); | ||
457 | } | ||
458 | } | ||
459 | |||
460 | /** | ||
461 | * _kvm_vz_save_htimer() - Switch to software emulation of guest timer. | ||
462 | * @vcpu: Virtual CPU. | ||
463 | * @compare: Pointer to write compare value to. | ||
464 | * @cause: Pointer to write cause value to. | ||
465 | * | ||
466 | * Save VZ guest timer state and switch to software emulation of guest CP0 | ||
467 | * timer. The hard timer must already be in use, so preemption should be | ||
468 | * disabled. | ||
469 | */ | ||
470 | static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu, | ||
471 | u32 *out_compare, u32 *out_cause) | ||
472 | { | ||
473 | u32 cause, compare, before_count, end_count; | ||
474 | ktime_t before_time; | ||
475 | |||
476 | compare = read_gc0_compare(); | ||
477 | *out_compare = compare; | ||
478 | |||
479 | before_time = ktime_get(); | ||
480 | |||
481 | /* | ||
482 | * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time | ||
483 | * at which no pending timer interrupt is missing. | ||
484 | */ | ||
485 | before_count = read_gc0_count(); | ||
486 | back_to_back_c0_hazard(); | ||
487 | cause = read_gc0_cause(); | ||
488 | *out_cause = cause; | ||
489 | |||
490 | /* | ||
491 | * Record a final CP0_Count which we will transfer to the soft-timer. | ||
492 | * This is recorded *after* saving CP0_Cause, so we don't get any timer | ||
493 | * interrupts from just after the final CP0_Count point. | ||
494 | */ | ||
495 | back_to_back_c0_hazard(); | ||
496 | end_count = read_gc0_count(); | ||
497 | |||
498 | /* | ||
499 | * The above sequence isn't atomic, so we could miss a timer interrupt | ||
500 | * between reading CP0_Cause and end_count. Detect and record any timer | ||
501 | * interrupt due between before_count and end_count. | ||
502 | */ | ||
503 | if (end_count - before_count > compare - before_count - 1) | ||
504 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); | ||
505 | |||
506 | /* | ||
507 | * Restore soft-timer, ignoring a small amount of negative drift due to | ||
508 | * delay between freeze_hrtimer and setting CP0_GTOffset. | ||
509 | */ | ||
510 | kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); | ||
511 | } | ||
512 | |||
513 | /** | ||
514 | * kvm_vz_save_timer() - Save guest timer state. | ||
515 | * @vcpu: Virtual CPU. | ||
516 | * | ||
517 | * Save VZ guest timer state and switch to soft guest timer if hard timer was in | ||
518 | * use. | ||
519 | */ | ||
520 | static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) | ||
521 | { | ||
522 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
523 | u32 gctl0, compare, cause; | ||
524 | |||
525 | gctl0 = read_c0_guestctl0(); | ||
526 | if (gctl0 & MIPS_GCTL0_GT) { | ||
527 | /* disable guest use of hard timer */ | ||
528 | write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); | ||
529 | |||
530 | /* save hard timer state */ | ||
531 | _kvm_vz_save_htimer(vcpu, &compare, &cause); | ||
532 | } else { | ||
533 | compare = read_gc0_compare(); | ||
534 | cause = read_gc0_cause(); | ||
535 | } | ||
536 | |||
537 | /* save timer-related state to VCPU context */ | ||
538 | kvm_write_sw_gc0_cause(cop0, cause); | ||
539 | kvm_write_sw_gc0_compare(cop0, compare); | ||
540 | } | ||
541 | |||
542 | /** | ||
543 | * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use. | ||
544 | * @vcpu: Virtual CPU. | ||
545 | * | ||
546 | * Transfers the state of the hard guest timer to the soft guest timer, leaving | ||
547 | * guest state intact so it can continue to be used with the soft timer. | ||
548 | */ | ||
549 | void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) | ||
550 | { | ||
551 | u32 gctl0, compare, cause; | ||
552 | |||
553 | preempt_disable(); | ||
554 | gctl0 = read_c0_guestctl0(); | ||
555 | if (gctl0 & MIPS_GCTL0_GT) { | ||
556 | /* disable guest use of timer */ | ||
557 | write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); | ||
558 | |||
559 | /* switch to soft timer */ | ||
560 | _kvm_vz_save_htimer(vcpu, &compare, &cause); | ||
561 | |||
562 | /* leave soft timer in usable state */ | ||
563 | _kvm_vz_restore_stimer(vcpu, compare, cause); | ||
564 | } | ||
565 | preempt_enable(); | ||
566 | } | ||
567 | |||
568 | /** | ||
569 | * is_eva_access() - Find whether an instruction is an EVA memory accessor. | ||
570 | * @inst: 32-bit instruction encoding. | ||
571 | * | ||
572 | * Finds whether @inst encodes an EVA memory access instruction, which would | ||
573 | * indicate that emulation of it should access the user mode address space | ||
574 | * instead of the kernel mode address space. This matters for MUSUK segments | ||
575 | * which are TLB mapped for user mode but unmapped for kernel mode. | ||
576 | * | ||
577 | * Returns: Whether @inst encodes an EVA accessor instruction. | ||
578 | */ | ||
579 | static bool is_eva_access(union mips_instruction inst) | ||
580 | { | ||
581 | if (inst.spec3_format.opcode != spec3_op) | ||
582 | return false; | ||
583 | |||
584 | switch (inst.spec3_format.func) { | ||
585 | case lwle_op: | ||
586 | case lwre_op: | ||
587 | case cachee_op: | ||
588 | case sbe_op: | ||
589 | case she_op: | ||
590 | case sce_op: | ||
591 | case swe_op: | ||
592 | case swle_op: | ||
593 | case swre_op: | ||
594 | case prefe_op: | ||
595 | case lbue_op: | ||
596 | case lhue_op: | ||
597 | case lbe_op: | ||
598 | case lhe_op: | ||
599 | case lle_op: | ||
600 | case lwe_op: | ||
601 | return true; | ||
602 | default: | ||
603 | return false; | ||
604 | } | ||
605 | } | ||
606 | |||
607 | /** | ||
608 | * is_eva_am_mapped() - Find whether an access mode is mapped. | ||
609 | * @vcpu: KVM VCPU state. | ||
610 | * @am: 3-bit encoded access mode. | ||
611 | * @eu: Segment becomes unmapped and uncached when Status.ERL=1. | ||
612 | * | ||
613 | * Decode @am to find whether it encodes a mapped segment for the current VCPU | ||
614 | * state. Where necessary @eu and the actual instruction causing the fault are | ||
615 | * taken into account to make the decision. | ||
616 | * | ||
617 | * Returns: Whether the VCPU faulted on a TLB mapped address. | ||
618 | */ | ||
619 | static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu) | ||
620 | { | ||
621 | u32 am_lookup; | ||
622 | int err; | ||
623 | |||
624 | /* | ||
625 | * Interpret access control mode. We assume address errors will already | ||
626 | * have been caught by the guest, leaving us with: | ||
627 | * AM UM SM KM 31..24 23..16 | ||
628 | * UK 0 000 Unm 0 0 | ||
629 | * MK 1 001 TLB 1 | ||
630 | * MSK 2 010 TLB TLB 1 | ||
631 | * MUSK 3 011 TLB TLB TLB 1 | ||
632 | * MUSUK 4 100 TLB TLB Unm 0 1 | ||
633 | * USK 5 101 Unm Unm 0 0 | ||
634 | * - 6 110 0 0 | ||
635 | * UUSK 7 111 Unm Unm Unm 0 0 | ||
636 | * | ||
637 | * We shift a magic value by AM across the sign bit to find if always | ||
638 | * TLB mapped, and if not shift by 8 again to find if it depends on KM. | ||
639 | */ | ||
640 | am_lookup = 0x70080000 << am; | ||
641 | if ((s32)am_lookup < 0) { | ||
642 | /* | ||
643 | * MK, MSK, MUSK | ||
644 | * Always TLB mapped, unless SegCtl.EU && ERL | ||
645 | */ | ||
646 | if (!eu || !(read_gc0_status() & ST0_ERL)) | ||
647 | return true; | ||
648 | } else { | ||
649 | am_lookup <<= 8; | ||
650 | if ((s32)am_lookup < 0) { | ||
651 | union mips_instruction inst; | ||
652 | unsigned int status; | ||
653 | u32 *opc; | ||
654 | |||
655 | /* | ||
656 | * MUSUK | ||
657 | * TLB mapped if not in kernel mode | ||
658 | */ | ||
659 | status = read_gc0_status(); | ||
660 | if (!(status & (ST0_EXL | ST0_ERL)) && | ||
661 | (status & ST0_KSU)) | ||
662 | return true; | ||
663 | /* | ||
664 | * EVA access instructions in kernel | ||
665 | * mode access user address space. | ||
666 | */ | ||
667 | opc = (u32 *)vcpu->arch.pc; | ||
668 | if (vcpu->arch.host_cp0_cause & CAUSEF_BD) | ||
669 | opc += 1; | ||
670 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
671 | if (!err && is_eva_access(inst)) | ||
672 | return true; | ||
673 | } | ||
674 | } | ||
675 | |||
676 | return false; | ||
677 | } | ||
678 | |||
679 | /** | ||
680 | * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. | ||
681 | * @vcpu: KVM VCPU state. | ||
682 | * @gva: Guest virtual address to convert. | ||
683 | * @gpa: Output guest physical address. | ||
684 | * | ||
685 | * Convert a guest virtual address (GVA) which is valid according to the guest | ||
686 | * context, to a guest physical address (GPA). | ||
687 | * | ||
688 | * Returns: 0 on success. | ||
689 | * -errno on failure. | ||
690 | */ | ||
691 | static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, | ||
692 | unsigned long *gpa) | ||
693 | { | ||
694 | u32 gva32 = gva; | ||
695 | unsigned long segctl; | ||
696 | |||
697 | if ((long)gva == (s32)gva32) { | ||
698 | /* Handle canonical 32-bit virtual address */ | ||
699 | if (cpu_guest_has_segments) { | ||
700 | unsigned long mask, pa; | ||
701 | |||
702 | switch (gva32 >> 29) { | ||
703 | case 0: | ||
704 | case 1: /* CFG5 (1GB) */ | ||
705 | segctl = read_gc0_segctl2() >> 16; | ||
706 | mask = (unsigned long)0xfc0000000ull; | ||
707 | break; | ||
708 | case 2: | ||
709 | case 3: /* CFG4 (1GB) */ | ||
710 | segctl = read_gc0_segctl2(); | ||
711 | mask = (unsigned long)0xfc0000000ull; | ||
712 | break; | ||
713 | case 4: /* CFG3 (512MB) */ | ||
714 | segctl = read_gc0_segctl1() >> 16; | ||
715 | mask = (unsigned long)0xfe0000000ull; | ||
716 | break; | ||
717 | case 5: /* CFG2 (512MB) */ | ||
718 | segctl = read_gc0_segctl1(); | ||
719 | mask = (unsigned long)0xfe0000000ull; | ||
720 | break; | ||
721 | case 6: /* CFG1 (512MB) */ | ||
722 | segctl = read_gc0_segctl0() >> 16; | ||
723 | mask = (unsigned long)0xfe0000000ull; | ||
724 | break; | ||
725 | case 7: /* CFG0 (512MB) */ | ||
726 | segctl = read_gc0_segctl0(); | ||
727 | mask = (unsigned long)0xfe0000000ull; | ||
728 | break; | ||
729 | default: | ||
730 | /* | ||
731 | * GCC 4.9 isn't smart enough to figure out that | ||
732 | * segctl and mask are always initialised. | ||
733 | */ | ||
734 | unreachable(); | ||
735 | } | ||
736 | |||
737 | if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7, | ||
738 | segctl & 0x0008)) | ||
739 | goto tlb_mapped; | ||
740 | |||
741 | /* Unmapped, find guest physical address */ | ||
742 | pa = (segctl << 20) & mask; | ||
743 | pa |= gva32 & ~mask; | ||
744 | *gpa = pa; | ||
745 | return 0; | ||
746 | } else if ((s32)gva32 < (s32)0xc0000000) { | ||
747 | /* legacy unmapped KSeg0 or KSeg1 */ | ||
748 | *gpa = gva32 & 0x1fffffff; | ||
749 | return 0; | ||
750 | } | ||
751 | #ifdef CONFIG_64BIT | ||
752 | } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { | ||
753 | /* XKPHYS */ | ||
754 | if (cpu_guest_has_segments) { | ||
755 | /* | ||
756 | * Each of the 8 regions can be overridden by SegCtl2.XR | ||
757 | * to use SegCtl1.XAM. | ||
758 | */ | ||
759 | segctl = read_gc0_segctl2(); | ||
760 | if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { | ||
761 | segctl = read_gc0_segctl1(); | ||
762 | if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7, | ||
763 | 0)) | ||
764 | goto tlb_mapped; | ||
765 | } | ||
766 | |||
767 | } | ||
768 | /* | ||
769 | * Traditionally fully unmapped. | ||
770 | * Bits 61:59 specify the CCA, which we can just mask off here. | ||
771 | * Bits 58:PABITS should be zero, but we shouldn't have got here | ||
772 | * if it wasn't. | ||
773 | */ | ||
774 | *gpa = gva & 0x07ffffffffffffff; | ||
775 | return 0; | ||
776 | #endif | ||
777 | } | ||
778 | |||
779 | tlb_mapped: | ||
780 | return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); | ||
781 | } | ||
782 | |||
783 | /** | ||
784 | * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. | ||
785 | * @vcpu: KVM VCPU state. | ||
786 | * @badvaddr: Root BadVAddr. | ||
787 | * @gpa: Output guest physical address. | ||
788 | * | ||
789 | * VZ implementations are permitted to report guest virtual addresses (GVA) in | ||
790 | * BadVAddr on a root exception during guest execution, instead of the more | ||
791 | * convenient guest physical addresses (GPA). When we get a GVA, this function | ||
792 | * converts it to a GPA, taking into account guest segmentation and guest TLB | ||
793 | * state. | ||
794 | * | ||
795 | * Returns: 0 on success. | ||
796 | * -errno on failure. | ||
797 | */ | ||
798 | static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr, | ||
799 | unsigned long *gpa) | ||
800 | { | ||
801 | unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & | ||
802 | MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; | ||
803 | |||
804 | /* If BadVAddr is GPA, then all is well in the world */ | ||
805 | if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) { | ||
806 | *gpa = badvaddr; | ||
807 | return 0; | ||
808 | } | ||
809 | |||
810 | /* Otherwise we'd expect it to be GVA ... */ | ||
811 | if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, | ||
812 | "Unexpected gexccode %#x\n", gexccode)) | ||
813 | return -EINVAL; | ||
814 | |||
815 | /* ... and we need to perform the GVA->GPA translation in software */ | ||
816 | return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa); | ||
817 | } | ||
818 | |||
819 | static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu) | ||
820 | { | ||
821 | u32 *opc = (u32 *) vcpu->arch.pc; | ||
822 | u32 cause = vcpu->arch.host_cp0_cause; | ||
823 | u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; | ||
824 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
825 | u32 inst = 0; | ||
826 | |||
827 | /* | ||
828 | * Fetch the instruction. | ||
829 | */ | ||
830 | if (cause & CAUSEF_BD) | ||
831 | opc += 1; | ||
832 | kvm_get_badinstr(opc, vcpu, &inst); | ||
833 | |||
834 | kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", | ||
835 | exccode, opc, inst, badvaddr, | ||
836 | read_gc0_status()); | ||
837 | kvm_arch_vcpu_dump_regs(vcpu); | ||
838 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
839 | return RESUME_HOST; | ||
840 | } | ||
841 | |||
842 | static unsigned long mips_process_maar(unsigned int op, unsigned long val) | ||
843 | { | ||
844 | /* Mask off unused bits */ | ||
845 | unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL; | ||
846 | |||
847 | if (read_gc0_pagegrain() & PG_ELPA) | ||
848 | mask |= 0x00ffffff00000000ull; | ||
849 | if (cpu_guest_has_mvh) | ||
850 | mask |= MIPS_MAAR_VH; | ||
851 | |||
852 | /* Set or clear VH */ | ||
853 | if (op == mtc_op) { | ||
854 | /* clear VH */ | ||
855 | val &= ~MIPS_MAAR_VH; | ||
856 | } else if (op == dmtc_op) { | ||
857 | /* set VH to match VL */ | ||
858 | val &= ~MIPS_MAAR_VH; | ||
859 | if (val & MIPS_MAAR_VL) | ||
860 | val |= MIPS_MAAR_VH; | ||
861 | } | ||
862 | |||
863 | return val & mask; | ||
864 | } | ||
865 | |||
866 | static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) | ||
867 | { | ||
868 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
869 | |||
870 | val &= MIPS_MAARI_INDEX; | ||
871 | if (val == MIPS_MAARI_INDEX) | ||
872 | kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); | ||
873 | else if (val < ARRAY_SIZE(vcpu->arch.maar)) | ||
874 | kvm_write_sw_gc0_maari(cop0, val); | ||
875 | } | ||
876 | |||
877 | static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, | ||
878 | u32 *opc, u32 cause, | ||
879 | struct kvm_vcpu *vcpu) | ||
880 | { | ||
881 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
882 | enum emulation_result er = EMULATE_DONE; | ||
883 | u32 rt, rd, sel; | ||
884 | unsigned long curr_pc; | ||
885 | unsigned long val; | ||
886 | |||
887 | /* | ||
888 | * Update PC and hold onto current PC in case there is | ||
889 | * an error and we want to rollback the PC | ||
890 | */ | ||
891 | curr_pc = vcpu->arch.pc; | ||
892 | er = update_pc(vcpu, cause); | ||
893 | if (er == EMULATE_FAIL) | ||
894 | return er; | ||
895 | |||
896 | if (inst.co_format.co) { | ||
897 | switch (inst.co_format.func) { | ||
898 | case wait_op: | ||
899 | er = kvm_mips_emul_wait(vcpu); | ||
900 | break; | ||
901 | default: | ||
902 | er = EMULATE_FAIL; | ||
903 | } | ||
904 | } else { | ||
905 | rt = inst.c0r_format.rt; | ||
906 | rd = inst.c0r_format.rd; | ||
907 | sel = inst.c0r_format.sel; | ||
908 | |||
909 | switch (inst.c0r_format.rs) { | ||
910 | case dmfc_op: | ||
911 | case mfc_op: | ||
912 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | ||
913 | cop0->stat[rd][sel]++; | ||
914 | #endif | ||
915 | if (rd == MIPS_CP0_COUNT && | ||
916 | sel == 0) { /* Count */ | ||
917 | val = kvm_mips_read_count(vcpu); | ||
918 | } else if (rd == MIPS_CP0_COMPARE && | ||
919 | sel == 0) { /* Compare */ | ||
920 | val = read_gc0_compare(); | ||
921 | } else if (rd == MIPS_CP0_LLADDR && | ||
922 | sel == 0) { /* LLAddr */ | ||
923 | if (cpu_guest_has_rw_llb) | ||
924 | val = read_gc0_lladdr() & | ||
925 | MIPS_LLADDR_LLB; | ||
926 | else | ||
927 | val = 0; | ||
928 | } else if (rd == MIPS_CP0_LLADDR && | ||
929 | sel == 1 && /* MAAR */ | ||
930 | cpu_guest_has_maar && | ||
931 | !cpu_guest_has_dyn_maar) { | ||
932 | /* MAARI must be in range */ | ||
933 | BUG_ON(kvm_read_sw_gc0_maari(cop0) >= | ||
934 | ARRAY_SIZE(vcpu->arch.maar)); | ||
935 | val = vcpu->arch.maar[ | ||
936 | kvm_read_sw_gc0_maari(cop0)]; | ||
937 | } else if ((rd == MIPS_CP0_PRID && | ||
938 | (sel == 0 || /* PRid */ | ||
939 | sel == 2 || /* CDMMBase */ | ||
940 | sel == 3)) || /* CMGCRBase */ | ||
941 | (rd == MIPS_CP0_STATUS && | ||
942 | (sel == 2 || /* SRSCtl */ | ||
943 | sel == 3)) || /* SRSMap */ | ||
944 | (rd == MIPS_CP0_CONFIG && | ||
945 | (sel == 6 || /* Config6 */ | ||
946 | sel == 7)) || /* Config7 */ | ||
947 | (rd == MIPS_CP0_LLADDR && | ||
948 | (sel == 2) && /* MAARI */ | ||
949 | cpu_guest_has_maar && | ||
950 | !cpu_guest_has_dyn_maar) || | ||
951 | (rd == MIPS_CP0_ERRCTL && | ||
952 | (sel == 0))) { /* ErrCtl */ | ||
953 | val = cop0->reg[rd][sel]; | ||
954 | #ifdef CONFIG_CPU_LOONGSON64 | ||
955 | } else if (rd == MIPS_CP0_DIAG && | ||
956 | (sel == 0)) { /* Diag */ | ||
957 | val = cop0->reg[rd][sel]; | ||
958 | #endif | ||
959 | } else { | ||
960 | val = 0; | ||
961 | er = EMULATE_FAIL; | ||
962 | } | ||
963 | |||
964 | if (er != EMULATE_FAIL) { | ||
965 | /* Sign extend */ | ||
966 | if (inst.c0r_format.rs == mfc_op) | ||
967 | val = (int)val; | ||
968 | vcpu->arch.gprs[rt] = val; | ||
969 | } | ||
970 | |||
971 | trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ? | ||
972 | KVM_TRACE_MFC0 : KVM_TRACE_DMFC0, | ||
973 | KVM_TRACE_COP0(rd, sel), val); | ||
974 | break; | ||
975 | |||
976 | case dmtc_op: | ||
977 | case mtc_op: | ||
978 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS | ||
979 | cop0->stat[rd][sel]++; | ||
980 | #endif | ||
981 | val = vcpu->arch.gprs[rt]; | ||
982 | trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ? | ||
983 | KVM_TRACE_MTC0 : KVM_TRACE_DMTC0, | ||
984 | KVM_TRACE_COP0(rd, sel), val); | ||
985 | |||
986 | if (rd == MIPS_CP0_COUNT && | ||
987 | sel == 0) { /* Count */ | ||
988 | kvm_vz_lose_htimer(vcpu); | ||
989 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); | ||
990 | } else if (rd == MIPS_CP0_COMPARE && | ||
991 | sel == 0) { /* Compare */ | ||
992 | kvm_mips_write_compare(vcpu, | ||
993 | vcpu->arch.gprs[rt], | ||
994 | true); | ||
995 | } else if (rd == MIPS_CP0_LLADDR && | ||
996 | sel == 0) { /* LLAddr */ | ||
997 | /* | ||
998 | * P5600 generates GPSI on guest MTC0 LLAddr. | ||
999 | * Only allow the guest to clear LLB. | ||
1000 | */ | ||
1001 | if (cpu_guest_has_rw_llb && | ||
1002 | !(val & MIPS_LLADDR_LLB)) | ||
1003 | write_gc0_lladdr(0); | ||
1004 | } else if (rd == MIPS_CP0_LLADDR && | ||
1005 | sel == 1 && /* MAAR */ | ||
1006 | cpu_guest_has_maar && | ||
1007 | !cpu_guest_has_dyn_maar) { | ||
1008 | val = mips_process_maar(inst.c0r_format.rs, | ||
1009 | val); | ||
1010 | |||
1011 | /* MAARI must be in range */ | ||
1012 | BUG_ON(kvm_read_sw_gc0_maari(cop0) >= | ||
1013 | ARRAY_SIZE(vcpu->arch.maar)); | ||
1014 | vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = | ||
1015 | val; | ||
1016 | } else if (rd == MIPS_CP0_LLADDR && | ||
1017 | (sel == 2) && /* MAARI */ | ||
1018 | cpu_guest_has_maar && | ||
1019 | !cpu_guest_has_dyn_maar) { | ||
1020 | kvm_write_maari(vcpu, val); | ||
1021 | } else if (rd == MIPS_CP0_CONFIG && | ||
1022 | (sel == 6)) { | ||
1023 | cop0->reg[rd][sel] = (int)val; | ||
1024 | } else if (rd == MIPS_CP0_ERRCTL && | ||
1025 | (sel == 0)) { /* ErrCtl */ | ||
1026 | /* ignore the written value */ | ||
1027 | #ifdef CONFIG_CPU_LOONGSON64 | ||
1028 | } else if (rd == MIPS_CP0_DIAG && | ||
1029 | (sel == 0)) { /* Diag */ | ||
1030 | unsigned long flags; | ||
1031 | |||
1032 | local_irq_save(flags); | ||
1033 | if (val & LOONGSON_DIAG_BTB) { | ||
1034 | /* Flush BTB */ | ||
1035 | set_c0_diag(LOONGSON_DIAG_BTB); | ||
1036 | } | ||
1037 | if (val & LOONGSON_DIAG_ITLB) { | ||
1038 | /* Flush ITLB */ | ||
1039 | set_c0_diag(LOONGSON_DIAG_ITLB); | ||
1040 | } | ||
1041 | if (val & LOONGSON_DIAG_DTLB) { | ||
1042 | /* Flush DTLB */ | ||
1043 | set_c0_diag(LOONGSON_DIAG_DTLB); | ||
1044 | } | ||
1045 | if (val & LOONGSON_DIAG_VTLB) { | ||
1046 | /* Flush VTLB */ | ||
1047 | kvm_loongson_clear_guest_vtlb(); | ||
1048 | } | ||
1049 | if (val & LOONGSON_DIAG_FTLB) { | ||
1050 | /* Flush FTLB */ | ||
1051 | kvm_loongson_clear_guest_ftlb(); | ||
1052 | } | ||
1053 | local_irq_restore(flags); | ||
1054 | #endif | ||
1055 | } else { | ||
1056 | er = EMULATE_FAIL; | ||
1057 | } | ||
1058 | break; | ||
1059 | |||
1060 | default: | ||
1061 | er = EMULATE_FAIL; | ||
1062 | break; | ||
1063 | } | ||
1064 | } | ||
1065 | /* Rollback PC only if emulation was unsuccessful */ | ||
1066 | if (er == EMULATE_FAIL) { | ||
1067 | kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n", | ||
1068 | curr_pc, __func__, inst.word); | ||
1069 | |||
1070 | vcpu->arch.pc = curr_pc; | ||
1071 | } | ||
1072 | |||
1073 | return er; | ||
1074 | } | ||
1075 | |||
1076 | static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, | ||
1077 | u32 *opc, u32 cause, | ||
1078 | struct kvm_vcpu *vcpu) | ||
1079 | { | ||
1080 | enum emulation_result er = EMULATE_DONE; | ||
1081 | u32 cache, op_inst, op, base; | ||
1082 | s16 offset; | ||
1083 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
1084 | unsigned long va, curr_pc; | ||
1085 | |||
1086 | /* | ||
1087 | * Update PC and hold onto current PC in case there is | ||
1088 | * an error and we want to rollback the PC | ||
1089 | */ | ||
1090 | curr_pc = vcpu->arch.pc; | ||
1091 | er = update_pc(vcpu, cause); | ||
1092 | if (er == EMULATE_FAIL) | ||
1093 | return er; | ||
1094 | |||
1095 | base = inst.i_format.rs; | ||
1096 | op_inst = inst.i_format.rt; | ||
1097 | if (cpu_has_mips_r6) | ||
1098 | offset = inst.spec3_format.simmediate; | ||
1099 | else | ||
1100 | offset = inst.i_format.simmediate; | ||
1101 | cache = op_inst & CacheOp_Cache; | ||
1102 | op = op_inst & CacheOp_Op; | ||
1103 | |||
1104 | va = arch->gprs[base] + offset; | ||
1105 | |||
1106 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | ||
1107 | cache, op, base, arch->gprs[base], offset); | ||
1108 | |||
1109 | /* Secondary or tirtiary cache ops ignored */ | ||
1110 | if (cache != Cache_I && cache != Cache_D) | ||
1111 | return EMULATE_DONE; | ||
1112 | |||
1113 | switch (op_inst) { | ||
1114 | case Index_Invalidate_I: | ||
1115 | flush_icache_line_indexed(va); | ||
1116 | return EMULATE_DONE; | ||
1117 | case Index_Writeback_Inv_D: | ||
1118 | flush_dcache_line_indexed(va); | ||
1119 | return EMULATE_DONE; | ||
1120 | case Hit_Invalidate_I: | ||
1121 | case Hit_Invalidate_D: | ||
1122 | case Hit_Writeback_Inv_D: | ||
1123 | if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) { | ||
1124 | /* We can just flush entire icache */ | ||
1125 | local_flush_icache_range(0, 0); | ||
1126 | return EMULATE_DONE; | ||
1127 | } | ||
1128 | |||
1129 | /* So far, other platforms support guest hit cache ops */ | ||
1130 | break; | ||
1131 | default: | ||
1132 | break; | ||
1133 | } | ||
1134 | |||
1135 | kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", | ||
1136 | curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], | ||
1137 | offset); | ||
1138 | /* Rollback PC */ | ||
1139 | vcpu->arch.pc = curr_pc; | ||
1140 | |||
1141 | return EMULATE_FAIL; | ||
1142 | } | ||
1143 | |||
1144 | #ifdef CONFIG_CPU_LOONGSON64 | ||
1145 | static enum emulation_result kvm_vz_gpsi_lwc2(union mips_instruction inst, | ||
1146 | u32 *opc, u32 cause, | ||
1147 | struct kvm_vcpu *vcpu) | ||
1148 | { | ||
1149 | unsigned int rs, rd; | ||
1150 | unsigned int hostcfg; | ||
1151 | unsigned long curr_pc; | ||
1152 | enum emulation_result er = EMULATE_DONE; | ||
1153 | |||
1154 | /* | ||
1155 | * Update PC and hold onto current PC in case there is | ||
1156 | * an error and we want to rollback the PC | ||
1157 | */ | ||
1158 | curr_pc = vcpu->arch.pc; | ||
1159 | er = update_pc(vcpu, cause); | ||
1160 | if (er == EMULATE_FAIL) | ||
1161 | return er; | ||
1162 | |||
1163 | rs = inst.loongson3_lscsr_format.rs; | ||
1164 | rd = inst.loongson3_lscsr_format.rd; | ||
1165 | switch (inst.loongson3_lscsr_format.fr) { | ||
1166 | case 0x8: /* Read CPUCFG */ | ||
1167 | ++vcpu->stat.vz_cpucfg_exits; | ||
1168 | hostcfg = read_cpucfg(vcpu->arch.gprs[rs]); | ||
1169 | |||
1170 | switch (vcpu->arch.gprs[rs]) { | ||
1171 | case LOONGSON_CFG0: | ||
1172 | vcpu->arch.gprs[rd] = 0x14c000; | ||
1173 | break; | ||
1174 | case LOONGSON_CFG1: | ||
1175 | hostcfg &= (LOONGSON_CFG1_FP | LOONGSON_CFG1_MMI | | ||
1176 | LOONGSON_CFG1_MSA1 | LOONGSON_CFG1_MSA2 | | ||
1177 | LOONGSON_CFG1_SFBP); | ||
1178 | vcpu->arch.gprs[rd] = hostcfg; | ||
1179 | break; | ||
1180 | case LOONGSON_CFG2: | ||
1181 | hostcfg &= (LOONGSON_CFG2_LEXT1 | LOONGSON_CFG2_LEXT2 | | ||
1182 | LOONGSON_CFG2_LEXT3 | LOONGSON_CFG2_LSPW); | ||
1183 | vcpu->arch.gprs[rd] = hostcfg; | ||
1184 | break; | ||
1185 | case LOONGSON_CFG3: | ||
1186 | vcpu->arch.gprs[rd] = hostcfg; | ||
1187 | break; | ||
1188 | default: | ||
1189 | /* Don't export any other advanced features to guest */ | ||
1190 | vcpu->arch.gprs[rd] = 0; | ||
1191 | break; | ||
1192 | } | ||
1193 | break; | ||
1194 | |||
1195 | default: | ||
1196 | kvm_err("lwc2 emulate not impl %d rs %lx @%lx\n", | ||
1197 | inst.loongson3_lscsr_format.fr, vcpu->arch.gprs[rs], curr_pc); | ||
1198 | er = EMULATE_FAIL; | ||
1199 | break; | ||
1200 | } | ||
1201 | |||
1202 | /* Rollback PC only if emulation was unsuccessful */ | ||
1203 | if (er == EMULATE_FAIL) { | ||
1204 | kvm_err("[%#lx]%s: unsupported lwc2 instruction 0x%08x 0x%08x\n", | ||
1205 | curr_pc, __func__, inst.word, inst.loongson3_lscsr_format.fr); | ||
1206 | |||
1207 | vcpu->arch.pc = curr_pc; | ||
1208 | } | ||
1209 | |||
1210 | return er; | ||
1211 | } | ||
1212 | #endif | ||
1213 | |||
1214 | static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, | ||
1215 | struct kvm_vcpu *vcpu) | ||
1216 | { | ||
1217 | enum emulation_result er = EMULATE_DONE; | ||
1218 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
1219 | union mips_instruction inst; | ||
1220 | int rd, rt, sel; | ||
1221 | int err; | ||
1222 | |||
1223 | /* | ||
1224 | * Fetch the instruction. | ||
1225 | */ | ||
1226 | if (cause & CAUSEF_BD) | ||
1227 | opc += 1; | ||
1228 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
1229 | if (err) | ||
1230 | return EMULATE_FAIL; | ||
1231 | |||
1232 | switch (inst.r_format.opcode) { | ||
1233 | case cop0_op: | ||
1234 | er = kvm_vz_gpsi_cop0(inst, opc, cause, vcpu); | ||
1235 | break; | ||
1236 | #ifndef CONFIG_CPU_MIPSR6 | ||
1237 | case cache_op: | ||
1238 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); | ||
1239 | er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu); | ||
1240 | break; | ||
1241 | #endif | ||
1242 | #ifdef CONFIG_CPU_LOONGSON64 | ||
1243 | case lwc2_op: | ||
1244 | er = kvm_vz_gpsi_lwc2(inst, opc, cause, vcpu); | ||
1245 | break; | ||
1246 | #endif | ||
1247 | case spec3_op: | ||
1248 | switch (inst.spec3_format.func) { | ||
1249 | #ifdef CONFIG_CPU_MIPSR6 | ||
1250 | case cache6_op: | ||
1251 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); | ||
1252 | er = kvm_vz_gpsi_cache(inst, opc, cause, vcpu); | ||
1253 | break; | ||
1254 | #endif | ||
1255 | case rdhwr_op: | ||
1256 | if (inst.r_format.rs || (inst.r_format.re >> 3)) | ||
1257 | goto unknown; | ||
1258 | |||
1259 | rd = inst.r_format.rd; | ||
1260 | rt = inst.r_format.rt; | ||
1261 | sel = inst.r_format.re & 0x7; | ||
1262 | |||
1263 | switch (rd) { | ||
1264 | case MIPS_HWR_CC: /* Read count register */ | ||
1265 | arch->gprs[rt] = | ||
1266 | (long)(int)kvm_mips_read_count(vcpu); | ||
1267 | break; | ||
1268 | default: | ||
1269 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, | ||
1270 | KVM_TRACE_HWR(rd, sel), 0); | ||
1271 | goto unknown; | ||
1272 | } | ||
1273 | |||
1274 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, | ||
1275 | KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); | ||
1276 | |||
1277 | er = update_pc(vcpu, cause); | ||
1278 | break; | ||
1279 | default: | ||
1280 | goto unknown; | ||
1281 | } | ||
1282 | break; | ||
1283 | unknown: | ||
1284 | |||
1285 | default: | ||
1286 | kvm_err("GPSI exception not supported (%p/%#x)\n", | ||
1287 | opc, inst.word); | ||
1288 | kvm_arch_vcpu_dump_regs(vcpu); | ||
1289 | er = EMULATE_FAIL; | ||
1290 | break; | ||
1291 | } | ||
1292 | |||
1293 | return er; | ||
1294 | } | ||
1295 | |||
1296 | static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, | ||
1297 | struct kvm_vcpu *vcpu) | ||
1298 | { | ||
1299 | enum emulation_result er = EMULATE_DONE; | ||
1300 | struct kvm_vcpu_arch *arch = &vcpu->arch; | ||
1301 | union mips_instruction inst; | ||
1302 | int err; | ||
1303 | |||
1304 | /* | ||
1305 | * Fetch the instruction. | ||
1306 | */ | ||
1307 | if (cause & CAUSEF_BD) | ||
1308 | opc += 1; | ||
1309 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
1310 | if (err) | ||
1311 | return EMULATE_FAIL; | ||
1312 | |||
1313 | /* complete MTC0 on behalf of guest and advance EPC */ | ||
1314 | if (inst.c0r_format.opcode == cop0_op && | ||
1315 | inst.c0r_format.rs == mtc_op && | ||
1316 | inst.c0r_format.z == 0) { | ||
1317 | int rt = inst.c0r_format.rt; | ||
1318 | int rd = inst.c0r_format.rd; | ||
1319 | int sel = inst.c0r_format.sel; | ||
1320 | unsigned int val = arch->gprs[rt]; | ||
1321 | unsigned int old_val, change; | ||
1322 | |||
1323 | trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel), | ||
1324 | val); | ||
1325 | |||
1326 | if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { | ||
1327 | /* FR bit should read as zero if no FPU */ | ||
1328 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1329 | val &= ~(ST0_CU1 | ST0_FR); | ||
1330 | |||
1331 | /* | ||
1332 | * Also don't allow FR to be set if host doesn't support | ||
1333 | * it. | ||
1334 | */ | ||
1335 | if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) | ||
1336 | val &= ~ST0_FR; | ||
1337 | |||
1338 | old_val = read_gc0_status(); | ||
1339 | change = val ^ old_val; | ||
1340 | |||
1341 | if (change & ST0_FR) { | ||
1342 | /* | ||
1343 | * FPU and Vector register state is made | ||
1344 | * UNPREDICTABLE by a change of FR, so don't | ||
1345 | * even bother saving it. | ||
1346 | */ | ||
1347 | kvm_drop_fpu(vcpu); | ||
1348 | } | ||
1349 | |||
1350 | /* | ||
1351 | * If MSA state is already live, it is undefined how it | ||
1352 | * interacts with FR=0 FPU state, and we don't want to | ||
1353 | * hit reserved instruction exceptions trying to save | ||
1354 | * the MSA state later when CU=1 && FR=1, so play it | ||
1355 | * safe and save it first. | ||
1356 | */ | ||
1357 | if (change & ST0_CU1 && !(val & ST0_FR) && | ||
1358 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) | ||
1359 | kvm_lose_fpu(vcpu); | ||
1360 | |||
1361 | write_gc0_status(val); | ||
1362 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { | ||
1363 | u32 old_cause = read_gc0_cause(); | ||
1364 | u32 change = old_cause ^ val; | ||
1365 | |||
1366 | /* DC bit enabling/disabling timer? */ | ||
1367 | if (change & CAUSEF_DC) { | ||
1368 | if (val & CAUSEF_DC) { | ||
1369 | kvm_vz_lose_htimer(vcpu); | ||
1370 | kvm_mips_count_disable_cause(vcpu); | ||
1371 | } else { | ||
1372 | kvm_mips_count_enable_cause(vcpu); | ||
1373 | } | ||
1374 | } | ||
1375 | |||
1376 | /* Only certain bits are RW to the guest */ | ||
1377 | change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP | | ||
1378 | CAUSEF_IP0 | CAUSEF_IP1); | ||
1379 | |||
1380 | /* WP can only be cleared */ | ||
1381 | change &= ~CAUSEF_WP | old_cause; | ||
1382 | |||
1383 | write_gc0_cause(old_cause ^ change); | ||
1384 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */ | ||
1385 | write_gc0_intctl(val); | ||
1386 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { | ||
1387 | old_val = read_gc0_config5(); | ||
1388 | change = val ^ old_val; | ||
1389 | /* Handle changes in FPU/MSA modes */ | ||
1390 | preempt_disable(); | ||
1391 | |||
1392 | /* | ||
1393 | * Propagate FRE changes immediately if the FPU | ||
1394 | * context is already loaded. | ||
1395 | */ | ||
1396 | if (change & MIPS_CONF5_FRE && | ||
1397 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) | ||
1398 | change_c0_config5(MIPS_CONF5_FRE, val); | ||
1399 | |||
1400 | preempt_enable(); | ||
1401 | |||
1402 | val = old_val ^ | ||
1403 | (change & kvm_vz_config5_guest_wrmask(vcpu)); | ||
1404 | write_gc0_config5(val); | ||
1405 | } else { | ||
1406 | kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n", | ||
1407 | opc, inst.word); | ||
1408 | er = EMULATE_FAIL; | ||
1409 | } | ||
1410 | |||
1411 | if (er != EMULATE_FAIL) | ||
1412 | er = update_pc(vcpu, cause); | ||
1413 | } else { | ||
1414 | kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n", | ||
1415 | opc, inst.word); | ||
1416 | er = EMULATE_FAIL; | ||
1417 | } | ||
1418 | |||
1419 | return er; | ||
1420 | } | ||
1421 | |||
1422 | static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc, | ||
1423 | struct kvm_vcpu *vcpu) | ||
1424 | { | ||
1425 | /* | ||
1426 | * Presumably this is due to MC (guest mode change), so lets trace some | ||
1427 | * relevant info. | ||
1428 | */ | ||
1429 | trace_kvm_guest_mode_change(vcpu); | ||
1430 | |||
1431 | return EMULATE_DONE; | ||
1432 | } | ||
1433 | |||
1434 | static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, | ||
1435 | struct kvm_vcpu *vcpu) | ||
1436 | { | ||
1437 | enum emulation_result er; | ||
1438 | union mips_instruction inst; | ||
1439 | unsigned long curr_pc; | ||
1440 | int err; | ||
1441 | |||
1442 | if (cause & CAUSEF_BD) | ||
1443 | opc += 1; | ||
1444 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
1445 | if (err) | ||
1446 | return EMULATE_FAIL; | ||
1447 | |||
1448 | /* | ||
1449 | * Update PC and hold onto current PC in case there is | ||
1450 | * an error and we want to rollback the PC | ||
1451 | */ | ||
1452 | curr_pc = vcpu->arch.pc; | ||
1453 | er = update_pc(vcpu, cause); | ||
1454 | if (er == EMULATE_FAIL) | ||
1455 | return er; | ||
1456 | |||
1457 | er = kvm_mips_emul_hypcall(vcpu, inst); | ||
1458 | if (er == EMULATE_FAIL) | ||
1459 | vcpu->arch.pc = curr_pc; | ||
1460 | |||
1461 | return er; | ||
1462 | } | ||
1463 | |||
1464 | static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode, | ||
1465 | u32 cause, | ||
1466 | u32 *opc, | ||
1467 | struct kvm_vcpu *vcpu) | ||
1468 | { | ||
1469 | u32 inst; | ||
1470 | |||
1471 | /* | ||
1472 | * Fetch the instruction. | ||
1473 | */ | ||
1474 | if (cause & CAUSEF_BD) | ||
1475 | opc += 1; | ||
1476 | kvm_get_badinstr(opc, vcpu, &inst); | ||
1477 | |||
1478 | kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", | ||
1479 | gexccode, opc, inst, read_gc0_status()); | ||
1480 | |||
1481 | return EMULATE_FAIL; | ||
1482 | } | ||
1483 | |||
1484 | static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) | ||
1485 | { | ||
1486 | u32 *opc = (u32 *) vcpu->arch.pc; | ||
1487 | u32 cause = vcpu->arch.host_cp0_cause; | ||
1488 | enum emulation_result er = EMULATE_DONE; | ||
1489 | u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & | ||
1490 | MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; | ||
1491 | int ret = RESUME_GUEST; | ||
1492 | |||
1493 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); | ||
1494 | switch (gexccode) { | ||
1495 | case MIPS_GCTL0_GEXC_GPSI: | ||
1496 | ++vcpu->stat.vz_gpsi_exits; | ||
1497 | er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); | ||
1498 | break; | ||
1499 | case MIPS_GCTL0_GEXC_GSFC: | ||
1500 | ++vcpu->stat.vz_gsfc_exits; | ||
1501 | er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); | ||
1502 | break; | ||
1503 | case MIPS_GCTL0_GEXC_HC: | ||
1504 | ++vcpu->stat.vz_hc_exits; | ||
1505 | er = kvm_trap_vz_handle_hc(cause, opc, vcpu); | ||
1506 | break; | ||
1507 | case MIPS_GCTL0_GEXC_GRR: | ||
1508 | ++vcpu->stat.vz_grr_exits; | ||
1509 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, | ||
1510 | vcpu); | ||
1511 | break; | ||
1512 | case MIPS_GCTL0_GEXC_GVA: | ||
1513 | ++vcpu->stat.vz_gva_exits; | ||
1514 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, | ||
1515 | vcpu); | ||
1516 | break; | ||
1517 | case MIPS_GCTL0_GEXC_GHFC: | ||
1518 | ++vcpu->stat.vz_ghfc_exits; | ||
1519 | er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu); | ||
1520 | break; | ||
1521 | case MIPS_GCTL0_GEXC_GPA: | ||
1522 | ++vcpu->stat.vz_gpa_exits; | ||
1523 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, | ||
1524 | vcpu); | ||
1525 | break; | ||
1526 | default: | ||
1527 | ++vcpu->stat.vz_resvd_exits; | ||
1528 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, | ||
1529 | vcpu); | ||
1530 | break; | ||
1531 | |||
1532 | } | ||
1533 | |||
1534 | if (er == EMULATE_DONE) { | ||
1535 | ret = RESUME_GUEST; | ||
1536 | } else if (er == EMULATE_HYPERCALL) { | ||
1537 | ret = kvm_mips_handle_hypcall(vcpu); | ||
1538 | } else { | ||
1539 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1540 | ret = RESUME_HOST; | ||
1541 | } | ||
1542 | return ret; | ||
1543 | } | ||
1544 | |||
1545 | /** | ||
1546 | * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. | ||
1547 | * @vcpu: Virtual CPU context. | ||
1548 | * | ||
1549 | * Handle when the guest attempts to use a coprocessor which hasn't been allowed | ||
1550 | * by the root context. | ||
1551 | */ | ||
1552 | static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) | ||
1553 | { | ||
1554 | u32 cause = vcpu->arch.host_cp0_cause; | ||
1555 | enum emulation_result er = EMULATE_FAIL; | ||
1556 | int ret = RESUME_GUEST; | ||
1557 | |||
1558 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { | ||
1559 | /* | ||
1560 | * If guest FPU not present, the FPU operation should have been | ||
1561 | * treated as a reserved instruction! | ||
1562 | * If FPU already in use, we shouldn't get this at all. | ||
1563 | */ | ||
1564 | if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || | ||
1565 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { | ||
1566 | preempt_enable(); | ||
1567 | return EMULATE_FAIL; | ||
1568 | } | ||
1569 | |||
1570 | kvm_own_fpu(vcpu); | ||
1571 | er = EMULATE_DONE; | ||
1572 | } | ||
1573 | /* other coprocessors not handled */ | ||
1574 | |||
1575 | switch (er) { | ||
1576 | case EMULATE_DONE: | ||
1577 | ret = RESUME_GUEST; | ||
1578 | break; | ||
1579 | |||
1580 | case EMULATE_FAIL: | ||
1581 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1582 | ret = RESUME_HOST; | ||
1583 | break; | ||
1584 | |||
1585 | default: | ||
1586 | BUG(); | ||
1587 | } | ||
1588 | return ret; | ||
1589 | } | ||
1590 | |||
1591 | /** | ||
1592 | * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. | ||
1593 | * @vcpu: Virtual CPU context. | ||
1594 | * | ||
1595 | * Handle when the guest attempts to use MSA when it is disabled in the root | ||
1596 | * context. | ||
1597 | */ | ||
1598 | static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) | ||
1599 | { | ||
1600 | /* | ||
1601 | * If MSA not present or not exposed to guest or FR=0, the MSA operation | ||
1602 | * should have been treated as a reserved instruction! | ||
1603 | * Same if CU1=1, FR=0. | ||
1604 | * If MSA already in use, we shouldn't get this at all. | ||
1605 | */ | ||
1606 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || | ||
1607 | (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || | ||
1608 | !(read_gc0_config5() & MIPS_CONF5_MSAEN) || | ||
1609 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { | ||
1610 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1611 | return RESUME_HOST; | ||
1612 | } | ||
1613 | |||
1614 | kvm_own_msa(vcpu); | ||
1615 | |||
1616 | return RESUME_GUEST; | ||
1617 | } | ||
1618 | |||
1619 | static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) | ||
1620 | { | ||
1621 | struct kvm_run *run = vcpu->run; | ||
1622 | u32 *opc = (u32 *) vcpu->arch.pc; | ||
1623 | u32 cause = vcpu->arch.host_cp0_cause; | ||
1624 | ulong badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
1625 | union mips_instruction inst; | ||
1626 | enum emulation_result er = EMULATE_DONE; | ||
1627 | int err, ret = RESUME_GUEST; | ||
1628 | |||
1629 | if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { | ||
1630 | /* A code fetch fault doesn't count as an MMIO */ | ||
1631 | if (kvm_is_ifetch_fault(&vcpu->arch)) { | ||
1632 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1633 | return RESUME_HOST; | ||
1634 | } | ||
1635 | |||
1636 | /* Fetch the instruction */ | ||
1637 | if (cause & CAUSEF_BD) | ||
1638 | opc += 1; | ||
1639 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
1640 | if (err) { | ||
1641 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1642 | return RESUME_HOST; | ||
1643 | } | ||
1644 | |||
1645 | /* Treat as MMIO */ | ||
1646 | er = kvm_mips_emulate_load(inst, cause, vcpu); | ||
1647 | if (er == EMULATE_FAIL) { | ||
1648 | kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", | ||
1649 | opc, badvaddr); | ||
1650 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1651 | } | ||
1652 | } | ||
1653 | |||
1654 | if (er == EMULATE_DONE) { | ||
1655 | ret = RESUME_GUEST; | ||
1656 | } else if (er == EMULATE_DO_MMIO) { | ||
1657 | run->exit_reason = KVM_EXIT_MMIO; | ||
1658 | ret = RESUME_HOST; | ||
1659 | } else { | ||
1660 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1661 | ret = RESUME_HOST; | ||
1662 | } | ||
1663 | return ret; | ||
1664 | } | ||
1665 | |||
1666 | static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) | ||
1667 | { | ||
1668 | struct kvm_run *run = vcpu->run; | ||
1669 | u32 *opc = (u32 *) vcpu->arch.pc; | ||
1670 | u32 cause = vcpu->arch.host_cp0_cause; | ||
1671 | ulong badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
1672 | union mips_instruction inst; | ||
1673 | enum emulation_result er = EMULATE_DONE; | ||
1674 | int err; | ||
1675 | int ret = RESUME_GUEST; | ||
1676 | |||
1677 | /* Just try the access again if we couldn't do the translation */ | ||
1678 | if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) | ||
1679 | return RESUME_GUEST; | ||
1680 | vcpu->arch.host_cp0_badvaddr = badvaddr; | ||
1681 | |||
1682 | if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { | ||
1683 | /* Fetch the instruction */ | ||
1684 | if (cause & CAUSEF_BD) | ||
1685 | opc += 1; | ||
1686 | err = kvm_get_badinstr(opc, vcpu, &inst.word); | ||
1687 | if (err) { | ||
1688 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1689 | return RESUME_HOST; | ||
1690 | } | ||
1691 | |||
1692 | /* Treat as MMIO */ | ||
1693 | er = kvm_mips_emulate_store(inst, cause, vcpu); | ||
1694 | if (er == EMULATE_FAIL) { | ||
1695 | kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", | ||
1696 | opc, badvaddr); | ||
1697 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1698 | } | ||
1699 | } | ||
1700 | |||
1701 | if (er == EMULATE_DONE) { | ||
1702 | ret = RESUME_GUEST; | ||
1703 | } else if (er == EMULATE_DO_MMIO) { | ||
1704 | run->exit_reason = KVM_EXIT_MMIO; | ||
1705 | ret = RESUME_HOST; | ||
1706 | } else { | ||
1707 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1708 | ret = RESUME_HOST; | ||
1709 | } | ||
1710 | return ret; | ||
1711 | } | ||
1712 | |||
1713 | static u64 kvm_vz_get_one_regs[] = { | ||
1714 | KVM_REG_MIPS_CP0_INDEX, | ||
1715 | KVM_REG_MIPS_CP0_ENTRYLO0, | ||
1716 | KVM_REG_MIPS_CP0_ENTRYLO1, | ||
1717 | KVM_REG_MIPS_CP0_CONTEXT, | ||
1718 | KVM_REG_MIPS_CP0_PAGEMASK, | ||
1719 | KVM_REG_MIPS_CP0_PAGEGRAIN, | ||
1720 | KVM_REG_MIPS_CP0_WIRED, | ||
1721 | KVM_REG_MIPS_CP0_HWRENA, | ||
1722 | KVM_REG_MIPS_CP0_BADVADDR, | ||
1723 | KVM_REG_MIPS_CP0_COUNT, | ||
1724 | KVM_REG_MIPS_CP0_ENTRYHI, | ||
1725 | KVM_REG_MIPS_CP0_COMPARE, | ||
1726 | KVM_REG_MIPS_CP0_STATUS, | ||
1727 | KVM_REG_MIPS_CP0_INTCTL, | ||
1728 | KVM_REG_MIPS_CP0_CAUSE, | ||
1729 | KVM_REG_MIPS_CP0_EPC, | ||
1730 | KVM_REG_MIPS_CP0_PRID, | ||
1731 | KVM_REG_MIPS_CP0_EBASE, | ||
1732 | KVM_REG_MIPS_CP0_CONFIG, | ||
1733 | KVM_REG_MIPS_CP0_CONFIG1, | ||
1734 | KVM_REG_MIPS_CP0_CONFIG2, | ||
1735 | KVM_REG_MIPS_CP0_CONFIG3, | ||
1736 | KVM_REG_MIPS_CP0_CONFIG4, | ||
1737 | KVM_REG_MIPS_CP0_CONFIG5, | ||
1738 | KVM_REG_MIPS_CP0_CONFIG6, | ||
1739 | #ifdef CONFIG_64BIT | ||
1740 | KVM_REG_MIPS_CP0_XCONTEXT, | ||
1741 | #endif | ||
1742 | KVM_REG_MIPS_CP0_ERROREPC, | ||
1743 | |||
1744 | KVM_REG_MIPS_COUNT_CTL, | ||
1745 | KVM_REG_MIPS_COUNT_RESUME, | ||
1746 | KVM_REG_MIPS_COUNT_HZ, | ||
1747 | }; | ||
1748 | |||
1749 | static u64 kvm_vz_get_one_regs_contextconfig[] = { | ||
1750 | KVM_REG_MIPS_CP0_CONTEXTCONFIG, | ||
1751 | #ifdef CONFIG_64BIT | ||
1752 | KVM_REG_MIPS_CP0_XCONTEXTCONFIG, | ||
1753 | #endif | ||
1754 | }; | ||
1755 | |||
1756 | static u64 kvm_vz_get_one_regs_segments[] = { | ||
1757 | KVM_REG_MIPS_CP0_SEGCTL0, | ||
1758 | KVM_REG_MIPS_CP0_SEGCTL1, | ||
1759 | KVM_REG_MIPS_CP0_SEGCTL2, | ||
1760 | }; | ||
1761 | |||
1762 | static u64 kvm_vz_get_one_regs_htw[] = { | ||
1763 | KVM_REG_MIPS_CP0_PWBASE, | ||
1764 | KVM_REG_MIPS_CP0_PWFIELD, | ||
1765 | KVM_REG_MIPS_CP0_PWSIZE, | ||
1766 | KVM_REG_MIPS_CP0_PWCTL, | ||
1767 | }; | ||
1768 | |||
1769 | static u64 kvm_vz_get_one_regs_kscratch[] = { | ||
1770 | KVM_REG_MIPS_CP0_KSCRATCH1, | ||
1771 | KVM_REG_MIPS_CP0_KSCRATCH2, | ||
1772 | KVM_REG_MIPS_CP0_KSCRATCH3, | ||
1773 | KVM_REG_MIPS_CP0_KSCRATCH4, | ||
1774 | KVM_REG_MIPS_CP0_KSCRATCH5, | ||
1775 | KVM_REG_MIPS_CP0_KSCRATCH6, | ||
1776 | }; | ||
1777 | |||
1778 | static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) | ||
1779 | { | ||
1780 | unsigned long ret; | ||
1781 | |||
1782 | ret = ARRAY_SIZE(kvm_vz_get_one_regs); | ||
1783 | if (cpu_guest_has_userlocal) | ||
1784 | ++ret; | ||
1785 | if (cpu_guest_has_badinstr) | ||
1786 | ++ret; | ||
1787 | if (cpu_guest_has_badinstrp) | ||
1788 | ++ret; | ||
1789 | if (cpu_guest_has_contextconfig) | ||
1790 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); | ||
1791 | if (cpu_guest_has_segments) | ||
1792 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); | ||
1793 | if (cpu_guest_has_htw || cpu_guest_has_ldpte) | ||
1794 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw); | ||
1795 | if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) | ||
1796 | ret += 1 + ARRAY_SIZE(vcpu->arch.maar); | ||
1797 | ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); | ||
1798 | |||
1799 | return ret; | ||
1800 | } | ||
1801 | |||
1802 | static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) | ||
1803 | { | ||
1804 | u64 index; | ||
1805 | unsigned int i; | ||
1806 | |||
1807 | if (copy_to_user(indices, kvm_vz_get_one_regs, | ||
1808 | sizeof(kvm_vz_get_one_regs))) | ||
1809 | return -EFAULT; | ||
1810 | indices += ARRAY_SIZE(kvm_vz_get_one_regs); | ||
1811 | |||
1812 | if (cpu_guest_has_userlocal) { | ||
1813 | index = KVM_REG_MIPS_CP0_USERLOCAL; | ||
1814 | if (copy_to_user(indices, &index, sizeof(index))) | ||
1815 | return -EFAULT; | ||
1816 | ++indices; | ||
1817 | } | ||
1818 | if (cpu_guest_has_badinstr) { | ||
1819 | index = KVM_REG_MIPS_CP0_BADINSTR; | ||
1820 | if (copy_to_user(indices, &index, sizeof(index))) | ||
1821 | return -EFAULT; | ||
1822 | ++indices; | ||
1823 | } | ||
1824 | if (cpu_guest_has_badinstrp) { | ||
1825 | index = KVM_REG_MIPS_CP0_BADINSTRP; | ||
1826 | if (copy_to_user(indices, &index, sizeof(index))) | ||
1827 | return -EFAULT; | ||
1828 | ++indices; | ||
1829 | } | ||
1830 | if (cpu_guest_has_contextconfig) { | ||
1831 | if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig, | ||
1832 | sizeof(kvm_vz_get_one_regs_contextconfig))) | ||
1833 | return -EFAULT; | ||
1834 | indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); | ||
1835 | } | ||
1836 | if (cpu_guest_has_segments) { | ||
1837 | if (copy_to_user(indices, kvm_vz_get_one_regs_segments, | ||
1838 | sizeof(kvm_vz_get_one_regs_segments))) | ||
1839 | return -EFAULT; | ||
1840 | indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments); | ||
1841 | } | ||
1842 | if (cpu_guest_has_htw || cpu_guest_has_ldpte) { | ||
1843 | if (copy_to_user(indices, kvm_vz_get_one_regs_htw, | ||
1844 | sizeof(kvm_vz_get_one_regs_htw))) | ||
1845 | return -EFAULT; | ||
1846 | indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw); | ||
1847 | } | ||
1848 | if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) { | ||
1849 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { | ||
1850 | index = KVM_REG_MIPS_CP0_MAAR(i); | ||
1851 | if (copy_to_user(indices, &index, sizeof(index))) | ||
1852 | return -EFAULT; | ||
1853 | ++indices; | ||
1854 | } | ||
1855 | |||
1856 | index = KVM_REG_MIPS_CP0_MAARI; | ||
1857 | if (copy_to_user(indices, &index, sizeof(index))) | ||
1858 | return -EFAULT; | ||
1859 | ++indices; | ||
1860 | } | ||
1861 | for (i = 0; i < 6; ++i) { | ||
1862 | if (!cpu_guest_has_kscr(i + 2)) | ||
1863 | continue; | ||
1864 | |||
1865 | if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], | ||
1866 | sizeof(kvm_vz_get_one_regs_kscratch[i]))) | ||
1867 | return -EFAULT; | ||
1868 | ++indices; | ||
1869 | } | ||
1870 | |||
1871 | return 0; | ||
1872 | } | ||
1873 | |||
1874 | static inline s64 entrylo_kvm_to_user(unsigned long v) | ||
1875 | { | ||
1876 | s64 mask, ret = v; | ||
1877 | |||
1878 | if (BITS_PER_LONG == 32) { | ||
1879 | /* | ||
1880 | * KVM API exposes 64-bit version of the register, so move the | ||
1881 | * RI/XI bits up into place. | ||
1882 | */ | ||
1883 | mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; | ||
1884 | ret &= ~mask; | ||
1885 | ret |= ((s64)v & mask) << 32; | ||
1886 | } | ||
1887 | return ret; | ||
1888 | } | ||
1889 | |||
1890 | static inline unsigned long entrylo_user_to_kvm(s64 v) | ||
1891 | { | ||
1892 | unsigned long mask, ret = v; | ||
1893 | |||
1894 | if (BITS_PER_LONG == 32) { | ||
1895 | /* | ||
1896 | * KVM API exposes 64-bit versiono of the register, so move the | ||
1897 | * RI/XI bits down into place. | ||
1898 | */ | ||
1899 | mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; | ||
1900 | ret &= ~mask; | ||
1901 | ret |= (v >> 32) & mask; | ||
1902 | } | ||
1903 | return ret; | ||
1904 | } | ||
1905 | |||
1906 | static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, | ||
1907 | const struct kvm_one_reg *reg, | ||
1908 | s64 *v) | ||
1909 | { | ||
1910 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1911 | unsigned int idx; | ||
1912 | |||
1913 | switch (reg->id) { | ||
1914 | case KVM_REG_MIPS_CP0_INDEX: | ||
1915 | *v = (long)read_gc0_index(); | ||
1916 | break; | ||
1917 | case KVM_REG_MIPS_CP0_ENTRYLO0: | ||
1918 | *v = entrylo_kvm_to_user(read_gc0_entrylo0()); | ||
1919 | break; | ||
1920 | case KVM_REG_MIPS_CP0_ENTRYLO1: | ||
1921 | *v = entrylo_kvm_to_user(read_gc0_entrylo1()); | ||
1922 | break; | ||
1923 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
1924 | *v = (long)read_gc0_context(); | ||
1925 | break; | ||
1926 | case KVM_REG_MIPS_CP0_CONTEXTCONFIG: | ||
1927 | if (!cpu_guest_has_contextconfig) | ||
1928 | return -EINVAL; | ||
1929 | *v = read_gc0_contextconfig(); | ||
1930 | break; | ||
1931 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
1932 | if (!cpu_guest_has_userlocal) | ||
1933 | return -EINVAL; | ||
1934 | *v = read_gc0_userlocal(); | ||
1935 | break; | ||
1936 | #ifdef CONFIG_64BIT | ||
1937 | case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: | ||
1938 | if (!cpu_guest_has_contextconfig) | ||
1939 | return -EINVAL; | ||
1940 | *v = read_gc0_xcontextconfig(); | ||
1941 | break; | ||
1942 | #endif | ||
1943 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
1944 | *v = (long)read_gc0_pagemask(); | ||
1945 | break; | ||
1946 | case KVM_REG_MIPS_CP0_PAGEGRAIN: | ||
1947 | *v = (long)read_gc0_pagegrain(); | ||
1948 | break; | ||
1949 | case KVM_REG_MIPS_CP0_SEGCTL0: | ||
1950 | if (!cpu_guest_has_segments) | ||
1951 | return -EINVAL; | ||
1952 | *v = read_gc0_segctl0(); | ||
1953 | break; | ||
1954 | case KVM_REG_MIPS_CP0_SEGCTL1: | ||
1955 | if (!cpu_guest_has_segments) | ||
1956 | return -EINVAL; | ||
1957 | *v = read_gc0_segctl1(); | ||
1958 | break; | ||
1959 | case KVM_REG_MIPS_CP0_SEGCTL2: | ||
1960 | if (!cpu_guest_has_segments) | ||
1961 | return -EINVAL; | ||
1962 | *v = read_gc0_segctl2(); | ||
1963 | break; | ||
1964 | case KVM_REG_MIPS_CP0_PWBASE: | ||
1965 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
1966 | return -EINVAL; | ||
1967 | *v = read_gc0_pwbase(); | ||
1968 | break; | ||
1969 | case KVM_REG_MIPS_CP0_PWFIELD: | ||
1970 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
1971 | return -EINVAL; | ||
1972 | *v = read_gc0_pwfield(); | ||
1973 | break; | ||
1974 | case KVM_REG_MIPS_CP0_PWSIZE: | ||
1975 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
1976 | return -EINVAL; | ||
1977 | *v = read_gc0_pwsize(); | ||
1978 | break; | ||
1979 | case KVM_REG_MIPS_CP0_WIRED: | ||
1980 | *v = (long)read_gc0_wired(); | ||
1981 | break; | ||
1982 | case KVM_REG_MIPS_CP0_PWCTL: | ||
1983 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
1984 | return -EINVAL; | ||
1985 | *v = read_gc0_pwctl(); | ||
1986 | break; | ||
1987 | case KVM_REG_MIPS_CP0_HWRENA: | ||
1988 | *v = (long)read_gc0_hwrena(); | ||
1989 | break; | ||
1990 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
1991 | *v = (long)read_gc0_badvaddr(); | ||
1992 | break; | ||
1993 | case KVM_REG_MIPS_CP0_BADINSTR: | ||
1994 | if (!cpu_guest_has_badinstr) | ||
1995 | return -EINVAL; | ||
1996 | *v = read_gc0_badinstr(); | ||
1997 | break; | ||
1998 | case KVM_REG_MIPS_CP0_BADINSTRP: | ||
1999 | if (!cpu_guest_has_badinstrp) | ||
2000 | return -EINVAL; | ||
2001 | *v = read_gc0_badinstrp(); | ||
2002 | break; | ||
2003 | case KVM_REG_MIPS_CP0_COUNT: | ||
2004 | *v = kvm_mips_read_count(vcpu); | ||
2005 | break; | ||
2006 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
2007 | *v = (long)read_gc0_entryhi(); | ||
2008 | break; | ||
2009 | case KVM_REG_MIPS_CP0_COMPARE: | ||
2010 | *v = (long)read_gc0_compare(); | ||
2011 | break; | ||
2012 | case KVM_REG_MIPS_CP0_STATUS: | ||
2013 | *v = (long)read_gc0_status(); | ||
2014 | break; | ||
2015 | case KVM_REG_MIPS_CP0_INTCTL: | ||
2016 | *v = read_gc0_intctl(); | ||
2017 | break; | ||
2018 | case KVM_REG_MIPS_CP0_CAUSE: | ||
2019 | *v = (long)read_gc0_cause(); | ||
2020 | break; | ||
2021 | case KVM_REG_MIPS_CP0_EPC: | ||
2022 | *v = (long)read_gc0_epc(); | ||
2023 | break; | ||
2024 | case KVM_REG_MIPS_CP0_PRID: | ||
2025 | switch (boot_cpu_type()) { | ||
2026 | case CPU_CAVIUM_OCTEON3: | ||
2027 | /* Octeon III has a read-only guest.PRid */ | ||
2028 | *v = read_gc0_prid(); | ||
2029 | break; | ||
2030 | default: | ||
2031 | *v = (long)kvm_read_c0_guest_prid(cop0); | ||
2032 | break; | ||
2033 | } | ||
2034 | break; | ||
2035 | case KVM_REG_MIPS_CP0_EBASE: | ||
2036 | *v = kvm_vz_read_gc0_ebase(); | ||
2037 | break; | ||
2038 | case KVM_REG_MIPS_CP0_CONFIG: | ||
2039 | *v = read_gc0_config(); | ||
2040 | break; | ||
2041 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
2042 | if (!cpu_guest_has_conf1) | ||
2043 | return -EINVAL; | ||
2044 | *v = read_gc0_config1(); | ||
2045 | break; | ||
2046 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
2047 | if (!cpu_guest_has_conf2) | ||
2048 | return -EINVAL; | ||
2049 | *v = read_gc0_config2(); | ||
2050 | break; | ||
2051 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
2052 | if (!cpu_guest_has_conf3) | ||
2053 | return -EINVAL; | ||
2054 | *v = read_gc0_config3(); | ||
2055 | break; | ||
2056 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
2057 | if (!cpu_guest_has_conf4) | ||
2058 | return -EINVAL; | ||
2059 | *v = read_gc0_config4(); | ||
2060 | break; | ||
2061 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
2062 | if (!cpu_guest_has_conf5) | ||
2063 | return -EINVAL; | ||
2064 | *v = read_gc0_config5(); | ||
2065 | break; | ||
2066 | case KVM_REG_MIPS_CP0_CONFIG6: | ||
2067 | *v = kvm_read_sw_gc0_config6(cop0); | ||
2068 | break; | ||
2069 | case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): | ||
2070 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) | ||
2071 | return -EINVAL; | ||
2072 | idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); | ||
2073 | if (idx >= ARRAY_SIZE(vcpu->arch.maar)) | ||
2074 | return -EINVAL; | ||
2075 | *v = vcpu->arch.maar[idx]; | ||
2076 | break; | ||
2077 | case KVM_REG_MIPS_CP0_MAARI: | ||
2078 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) | ||
2079 | return -EINVAL; | ||
2080 | *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); | ||
2081 | break; | ||
2082 | #ifdef CONFIG_64BIT | ||
2083 | case KVM_REG_MIPS_CP0_XCONTEXT: | ||
2084 | *v = read_gc0_xcontext(); | ||
2085 | break; | ||
2086 | #endif | ||
2087 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
2088 | *v = (long)read_gc0_errorepc(); | ||
2089 | break; | ||
2090 | case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: | ||
2091 | idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; | ||
2092 | if (!cpu_guest_has_kscr(idx)) | ||
2093 | return -EINVAL; | ||
2094 | switch (idx) { | ||
2095 | case 2: | ||
2096 | *v = (long)read_gc0_kscratch1(); | ||
2097 | break; | ||
2098 | case 3: | ||
2099 | *v = (long)read_gc0_kscratch2(); | ||
2100 | break; | ||
2101 | case 4: | ||
2102 | *v = (long)read_gc0_kscratch3(); | ||
2103 | break; | ||
2104 | case 5: | ||
2105 | *v = (long)read_gc0_kscratch4(); | ||
2106 | break; | ||
2107 | case 6: | ||
2108 | *v = (long)read_gc0_kscratch5(); | ||
2109 | break; | ||
2110 | case 7: | ||
2111 | *v = (long)read_gc0_kscratch6(); | ||
2112 | break; | ||
2113 | } | ||
2114 | break; | ||
2115 | case KVM_REG_MIPS_COUNT_CTL: | ||
2116 | *v = vcpu->arch.count_ctl; | ||
2117 | break; | ||
2118 | case KVM_REG_MIPS_COUNT_RESUME: | ||
2119 | *v = ktime_to_ns(vcpu->arch.count_resume); | ||
2120 | break; | ||
2121 | case KVM_REG_MIPS_COUNT_HZ: | ||
2122 | *v = vcpu->arch.count_hz; | ||
2123 | break; | ||
2124 | default: | ||
2125 | return -EINVAL; | ||
2126 | } | ||
2127 | return 0; | ||
2128 | } | ||
2129 | |||
2130 | static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, | ||
2131 | const struct kvm_one_reg *reg, | ||
2132 | s64 v) | ||
2133 | { | ||
2134 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2135 | unsigned int idx; | ||
2136 | int ret = 0; | ||
2137 | unsigned int cur, change; | ||
2138 | |||
2139 | switch (reg->id) { | ||
2140 | case KVM_REG_MIPS_CP0_INDEX: | ||
2141 | write_gc0_index(v); | ||
2142 | break; | ||
2143 | case KVM_REG_MIPS_CP0_ENTRYLO0: | ||
2144 | write_gc0_entrylo0(entrylo_user_to_kvm(v)); | ||
2145 | break; | ||
2146 | case KVM_REG_MIPS_CP0_ENTRYLO1: | ||
2147 | write_gc0_entrylo1(entrylo_user_to_kvm(v)); | ||
2148 | break; | ||
2149 | case KVM_REG_MIPS_CP0_CONTEXT: | ||
2150 | write_gc0_context(v); | ||
2151 | break; | ||
2152 | case KVM_REG_MIPS_CP0_CONTEXTCONFIG: | ||
2153 | if (!cpu_guest_has_contextconfig) | ||
2154 | return -EINVAL; | ||
2155 | write_gc0_contextconfig(v); | ||
2156 | break; | ||
2157 | case KVM_REG_MIPS_CP0_USERLOCAL: | ||
2158 | if (!cpu_guest_has_userlocal) | ||
2159 | return -EINVAL; | ||
2160 | write_gc0_userlocal(v); | ||
2161 | break; | ||
2162 | #ifdef CONFIG_64BIT | ||
2163 | case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: | ||
2164 | if (!cpu_guest_has_contextconfig) | ||
2165 | return -EINVAL; | ||
2166 | write_gc0_xcontextconfig(v); | ||
2167 | break; | ||
2168 | #endif | ||
2169 | case KVM_REG_MIPS_CP0_PAGEMASK: | ||
2170 | write_gc0_pagemask(v); | ||
2171 | break; | ||
2172 | case KVM_REG_MIPS_CP0_PAGEGRAIN: | ||
2173 | write_gc0_pagegrain(v); | ||
2174 | break; | ||
2175 | case KVM_REG_MIPS_CP0_SEGCTL0: | ||
2176 | if (!cpu_guest_has_segments) | ||
2177 | return -EINVAL; | ||
2178 | write_gc0_segctl0(v); | ||
2179 | break; | ||
2180 | case KVM_REG_MIPS_CP0_SEGCTL1: | ||
2181 | if (!cpu_guest_has_segments) | ||
2182 | return -EINVAL; | ||
2183 | write_gc0_segctl1(v); | ||
2184 | break; | ||
2185 | case KVM_REG_MIPS_CP0_SEGCTL2: | ||
2186 | if (!cpu_guest_has_segments) | ||
2187 | return -EINVAL; | ||
2188 | write_gc0_segctl2(v); | ||
2189 | break; | ||
2190 | case KVM_REG_MIPS_CP0_PWBASE: | ||
2191 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
2192 | return -EINVAL; | ||
2193 | write_gc0_pwbase(v); | ||
2194 | break; | ||
2195 | case KVM_REG_MIPS_CP0_PWFIELD: | ||
2196 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
2197 | return -EINVAL; | ||
2198 | write_gc0_pwfield(v); | ||
2199 | break; | ||
2200 | case KVM_REG_MIPS_CP0_PWSIZE: | ||
2201 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
2202 | return -EINVAL; | ||
2203 | write_gc0_pwsize(v); | ||
2204 | break; | ||
2205 | case KVM_REG_MIPS_CP0_WIRED: | ||
2206 | change_gc0_wired(MIPSR6_WIRED_WIRED, v); | ||
2207 | break; | ||
2208 | case KVM_REG_MIPS_CP0_PWCTL: | ||
2209 | if (!cpu_guest_has_htw && !cpu_guest_has_ldpte) | ||
2210 | return -EINVAL; | ||
2211 | write_gc0_pwctl(v); | ||
2212 | break; | ||
2213 | case KVM_REG_MIPS_CP0_HWRENA: | ||
2214 | write_gc0_hwrena(v); | ||
2215 | break; | ||
2216 | case KVM_REG_MIPS_CP0_BADVADDR: | ||
2217 | write_gc0_badvaddr(v); | ||
2218 | break; | ||
2219 | case KVM_REG_MIPS_CP0_BADINSTR: | ||
2220 | if (!cpu_guest_has_badinstr) | ||
2221 | return -EINVAL; | ||
2222 | write_gc0_badinstr(v); | ||
2223 | break; | ||
2224 | case KVM_REG_MIPS_CP0_BADINSTRP: | ||
2225 | if (!cpu_guest_has_badinstrp) | ||
2226 | return -EINVAL; | ||
2227 | write_gc0_badinstrp(v); | ||
2228 | break; | ||
2229 | case KVM_REG_MIPS_CP0_COUNT: | ||
2230 | kvm_mips_write_count(vcpu, v); | ||
2231 | break; | ||
2232 | case KVM_REG_MIPS_CP0_ENTRYHI: | ||
2233 | write_gc0_entryhi(v); | ||
2234 | break; | ||
2235 | case KVM_REG_MIPS_CP0_COMPARE: | ||
2236 | kvm_mips_write_compare(vcpu, v, false); | ||
2237 | break; | ||
2238 | case KVM_REG_MIPS_CP0_STATUS: | ||
2239 | write_gc0_status(v); | ||
2240 | break; | ||
2241 | case KVM_REG_MIPS_CP0_INTCTL: | ||
2242 | write_gc0_intctl(v); | ||
2243 | break; | ||
2244 | case KVM_REG_MIPS_CP0_CAUSE: | ||
2245 | /* | ||
2246 | * If the timer is stopped or started (DC bit) it must look | ||
2247 | * atomic with changes to the timer interrupt pending bit (TI). | ||
2248 | * A timer interrupt should not happen in between. | ||
2249 | */ | ||
2250 | if ((read_gc0_cause() ^ v) & CAUSEF_DC) { | ||
2251 | if (v & CAUSEF_DC) { | ||
2252 | /* disable timer first */ | ||
2253 | kvm_mips_count_disable_cause(vcpu); | ||
2254 | change_gc0_cause((u32)~CAUSEF_DC, v); | ||
2255 | } else { | ||
2256 | /* enable timer last */ | ||
2257 | change_gc0_cause((u32)~CAUSEF_DC, v); | ||
2258 | kvm_mips_count_enable_cause(vcpu); | ||
2259 | } | ||
2260 | } else { | ||
2261 | write_gc0_cause(v); | ||
2262 | } | ||
2263 | break; | ||
2264 | case KVM_REG_MIPS_CP0_EPC: | ||
2265 | write_gc0_epc(v); | ||
2266 | break; | ||
2267 | case KVM_REG_MIPS_CP0_PRID: | ||
2268 | switch (boot_cpu_type()) { | ||
2269 | case CPU_CAVIUM_OCTEON3: | ||
2270 | /* Octeon III has a guest.PRid, but its read-only */ | ||
2271 | break; | ||
2272 | default: | ||
2273 | kvm_write_c0_guest_prid(cop0, v); | ||
2274 | break; | ||
2275 | } | ||
2276 | break; | ||
2277 | case KVM_REG_MIPS_CP0_EBASE: | ||
2278 | kvm_vz_write_gc0_ebase(v); | ||
2279 | break; | ||
2280 | case KVM_REG_MIPS_CP0_CONFIG: | ||
2281 | cur = read_gc0_config(); | ||
2282 | change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); | ||
2283 | if (change) { | ||
2284 | v = cur ^ change; | ||
2285 | write_gc0_config(v); | ||
2286 | } | ||
2287 | break; | ||
2288 | case KVM_REG_MIPS_CP0_CONFIG1: | ||
2289 | if (!cpu_guest_has_conf1) | ||
2290 | break; | ||
2291 | cur = read_gc0_config1(); | ||
2292 | change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); | ||
2293 | if (change) { | ||
2294 | v = cur ^ change; | ||
2295 | write_gc0_config1(v); | ||
2296 | } | ||
2297 | break; | ||
2298 | case KVM_REG_MIPS_CP0_CONFIG2: | ||
2299 | if (!cpu_guest_has_conf2) | ||
2300 | break; | ||
2301 | cur = read_gc0_config2(); | ||
2302 | change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); | ||
2303 | if (change) { | ||
2304 | v = cur ^ change; | ||
2305 | write_gc0_config2(v); | ||
2306 | } | ||
2307 | break; | ||
2308 | case KVM_REG_MIPS_CP0_CONFIG3: | ||
2309 | if (!cpu_guest_has_conf3) | ||
2310 | break; | ||
2311 | cur = read_gc0_config3(); | ||
2312 | change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); | ||
2313 | if (change) { | ||
2314 | v = cur ^ change; | ||
2315 | write_gc0_config3(v); | ||
2316 | } | ||
2317 | break; | ||
2318 | case KVM_REG_MIPS_CP0_CONFIG4: | ||
2319 | if (!cpu_guest_has_conf4) | ||
2320 | break; | ||
2321 | cur = read_gc0_config4(); | ||
2322 | change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); | ||
2323 | if (change) { | ||
2324 | v = cur ^ change; | ||
2325 | write_gc0_config4(v); | ||
2326 | } | ||
2327 | break; | ||
2328 | case KVM_REG_MIPS_CP0_CONFIG5: | ||
2329 | if (!cpu_guest_has_conf5) | ||
2330 | break; | ||
2331 | cur = read_gc0_config5(); | ||
2332 | change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); | ||
2333 | if (change) { | ||
2334 | v = cur ^ change; | ||
2335 | write_gc0_config5(v); | ||
2336 | } | ||
2337 | break; | ||
2338 | case KVM_REG_MIPS_CP0_CONFIG6: | ||
2339 | cur = kvm_read_sw_gc0_config6(cop0); | ||
2340 | change = (cur ^ v) & kvm_vz_config6_user_wrmask(vcpu); | ||
2341 | if (change) { | ||
2342 | v = cur ^ change; | ||
2343 | kvm_write_sw_gc0_config6(cop0, (int)v); | ||
2344 | } | ||
2345 | break; | ||
2346 | case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): | ||
2347 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) | ||
2348 | return -EINVAL; | ||
2349 | idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); | ||
2350 | if (idx >= ARRAY_SIZE(vcpu->arch.maar)) | ||
2351 | return -EINVAL; | ||
2352 | vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); | ||
2353 | break; | ||
2354 | case KVM_REG_MIPS_CP0_MAARI: | ||
2355 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) | ||
2356 | return -EINVAL; | ||
2357 | kvm_write_maari(vcpu, v); | ||
2358 | break; | ||
2359 | #ifdef CONFIG_64BIT | ||
2360 | case KVM_REG_MIPS_CP0_XCONTEXT: | ||
2361 | write_gc0_xcontext(v); | ||
2362 | break; | ||
2363 | #endif | ||
2364 | case KVM_REG_MIPS_CP0_ERROREPC: | ||
2365 | write_gc0_errorepc(v); | ||
2366 | break; | ||
2367 | case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: | ||
2368 | idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; | ||
2369 | if (!cpu_guest_has_kscr(idx)) | ||
2370 | return -EINVAL; | ||
2371 | switch (idx) { | ||
2372 | case 2: | ||
2373 | write_gc0_kscratch1(v); | ||
2374 | break; | ||
2375 | case 3: | ||
2376 | write_gc0_kscratch2(v); | ||
2377 | break; | ||
2378 | case 4: | ||
2379 | write_gc0_kscratch3(v); | ||
2380 | break; | ||
2381 | case 5: | ||
2382 | write_gc0_kscratch4(v); | ||
2383 | break; | ||
2384 | case 6: | ||
2385 | write_gc0_kscratch5(v); | ||
2386 | break; | ||
2387 | case 7: | ||
2388 | write_gc0_kscratch6(v); | ||
2389 | break; | ||
2390 | } | ||
2391 | break; | ||
2392 | case KVM_REG_MIPS_COUNT_CTL: | ||
2393 | ret = kvm_mips_set_count_ctl(vcpu, v); | ||
2394 | break; | ||
2395 | case KVM_REG_MIPS_COUNT_RESUME: | ||
2396 | ret = kvm_mips_set_count_resume(vcpu, v); | ||
2397 | break; | ||
2398 | case KVM_REG_MIPS_COUNT_HZ: | ||
2399 | ret = kvm_mips_set_count_hz(vcpu, v); | ||
2400 | break; | ||
2401 | default: | ||
2402 | return -EINVAL; | ||
2403 | } | ||
2404 | return ret; | ||
2405 | } | ||
2406 | |||
2407 | #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache) | ||
2408 | static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu) | ||
2409 | { | ||
2410 | unsigned long guestid = guestid_cache(cpu); | ||
2411 | |||
2412 | if (!(++guestid & GUESTID_MASK)) { | ||
2413 | if (cpu_has_vtag_icache) | ||
2414 | flush_icache_all(); | ||
2415 | |||
2416 | if (!guestid) /* fix version if needed */ | ||
2417 | guestid = GUESTID_FIRST_VERSION; | ||
2418 | |||
2419 | ++guestid; /* guestid 0 reserved for root */ | ||
2420 | |||
2421 | /* start new guestid cycle */ | ||
2422 | kvm_vz_local_flush_roottlb_all_guests(); | ||
2423 | kvm_vz_local_flush_guesttlb_all(); | ||
2424 | } | ||
2425 | |||
2426 | guestid_cache(cpu) = guestid; | ||
2427 | } | ||
2428 | |||
2429 | /* Returns 1 if the guest TLB may be clobbered */ | ||
2430 | static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) | ||
2431 | { | ||
2432 | int ret = 0; | ||
2433 | int i; | ||
2434 | |||
2435 | if (!kvm_request_pending(vcpu)) | ||
2436 | return 0; | ||
2437 | |||
2438 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { | ||
2439 | if (cpu_has_guestid) { | ||
2440 | /* Drop all GuestIDs for this VCPU */ | ||
2441 | for_each_possible_cpu(i) | ||
2442 | vcpu->arch.vzguestid[i] = 0; | ||
2443 | /* This will clobber guest TLB contents too */ | ||
2444 | ret = 1; | ||
2445 | } | ||
2446 | /* | ||
2447 | * For Root ASID Dealias (RAD) we don't do anything here, but we | ||
2448 | * still need the request to ensure we recheck asid_flush_mask. | ||
2449 | * We can still return 0 as only the root TLB will be affected | ||
2450 | * by a root ASID flush. | ||
2451 | */ | ||
2452 | } | ||
2453 | |||
2454 | return ret; | ||
2455 | } | ||
2456 | |||
2457 | static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu) | ||
2458 | { | ||
2459 | unsigned int wired = read_gc0_wired(); | ||
2460 | struct kvm_mips_tlb *tlbs; | ||
2461 | int i; | ||
2462 | |||
2463 | /* Expand the wired TLB array if necessary */ | ||
2464 | wired &= MIPSR6_WIRED_WIRED; | ||
2465 | if (wired > vcpu->arch.wired_tlb_limit) { | ||
2466 | tlbs = krealloc(vcpu->arch.wired_tlb, wired * | ||
2467 | sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); | ||
2468 | if (WARN_ON(!tlbs)) { | ||
2469 | /* Save whatever we can */ | ||
2470 | wired = vcpu->arch.wired_tlb_limit; | ||
2471 | } else { | ||
2472 | vcpu->arch.wired_tlb = tlbs; | ||
2473 | vcpu->arch.wired_tlb_limit = wired; | ||
2474 | } | ||
2475 | } | ||
2476 | |||
2477 | if (wired) | ||
2478 | /* Save wired entries from the guest TLB */ | ||
2479 | kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); | ||
2480 | /* Invalidate any dropped entries since last time */ | ||
2481 | for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { | ||
2482 | vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); | ||
2483 | vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; | ||
2484 | vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; | ||
2485 | vcpu->arch.wired_tlb[i].tlb_mask = 0; | ||
2486 | } | ||
2487 | vcpu->arch.wired_tlb_used = wired; | ||
2488 | } | ||
2489 | |||
2490 | static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu) | ||
2491 | { | ||
2492 | /* Load wired entries into the guest TLB */ | ||
2493 | if (vcpu->arch.wired_tlb) | ||
2494 | kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, | ||
2495 | vcpu->arch.wired_tlb_used); | ||
2496 | } | ||
2497 | |||
2498 | static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) | ||
2499 | { | ||
2500 | struct kvm *kvm = vcpu->kvm; | ||
2501 | struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; | ||
2502 | bool migrated; | ||
2503 | |||
2504 | /* | ||
2505 | * Are we entering guest context on a different CPU to last time? | ||
2506 | * If so, the VCPU's guest TLB state on this CPU may be stale. | ||
2507 | */ | ||
2508 | migrated = (vcpu->arch.last_exec_cpu != cpu); | ||
2509 | vcpu->arch.last_exec_cpu = cpu; | ||
2510 | |||
2511 | /* | ||
2512 | * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and | ||
2513 | * remains set until another vcpu is loaded in. As a rule GuestRID | ||
2514 | * remains zeroed when in root context unless the kernel is busy | ||
2515 | * manipulating guest tlb entries. | ||
2516 | */ | ||
2517 | if (cpu_has_guestid) { | ||
2518 | /* | ||
2519 | * Check if our GuestID is of an older version and thus invalid. | ||
2520 | * | ||
2521 | * We also discard the stored GuestID if we've executed on | ||
2522 | * another CPU, as the guest mappings may have changed without | ||
2523 | * hypervisor knowledge. | ||
2524 | */ | ||
2525 | if (migrated || | ||
2526 | (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & | ||
2527 | GUESTID_VERSION_MASK) { | ||
2528 | kvm_vz_get_new_guestid(cpu, vcpu); | ||
2529 | vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); | ||
2530 | trace_kvm_guestid_change(vcpu, | ||
2531 | vcpu->arch.vzguestid[cpu]); | ||
2532 | } | ||
2533 | |||
2534 | /* Restore GuestID */ | ||
2535 | change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); | ||
2536 | } else { | ||
2537 | /* | ||
2538 | * The Guest TLB only stores a single guest's TLB state, so | ||
2539 | * flush it if another VCPU has executed on this CPU. | ||
2540 | * | ||
2541 | * We also flush if we've executed on another CPU, as the guest | ||
2542 | * mappings may have changed without hypervisor knowledge. | ||
2543 | */ | ||
2544 | if (migrated || last_exec_vcpu[cpu] != vcpu) | ||
2545 | kvm_vz_local_flush_guesttlb_all(); | ||
2546 | last_exec_vcpu[cpu] = vcpu; | ||
2547 | |||
2548 | /* | ||
2549 | * Root ASID dealiases guest GPA mappings in the root TLB. | ||
2550 | * Allocate new root ASID if needed. | ||
2551 | */ | ||
2552 | if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)) | ||
2553 | get_new_mmu_context(gpa_mm); | ||
2554 | else | ||
2555 | check_mmu_context(gpa_mm); | ||
2556 | } | ||
2557 | } | ||
2558 | |||
2559 | static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | ||
2560 | { | ||
2561 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2562 | bool migrated, all; | ||
2563 | |||
2564 | /* | ||
2565 | * Have we migrated to a different CPU? | ||
2566 | * If so, any old guest TLB state may be stale. | ||
2567 | */ | ||
2568 | migrated = (vcpu->arch.last_sched_cpu != cpu); | ||
2569 | |||
2570 | /* | ||
2571 | * Was this the last VCPU to run on this CPU? | ||
2572 | * If not, any old guest state from this VCPU will have been clobbered. | ||
2573 | */ | ||
2574 | all = migrated || (last_vcpu[cpu] != vcpu); | ||
2575 | last_vcpu[cpu] = vcpu; | ||
2576 | |||
2577 | /* | ||
2578 | * Restore CP0_Wired unconditionally as we clear it after use, and | ||
2579 | * restore wired guest TLB entries (while in guest context). | ||
2580 | */ | ||
2581 | kvm_restore_gc0_wired(cop0); | ||
2582 | if (current->flags & PF_VCPU) { | ||
2583 | tlbw_use_hazard(); | ||
2584 | kvm_vz_vcpu_load_tlb(vcpu, cpu); | ||
2585 | kvm_vz_vcpu_load_wired(vcpu); | ||
2586 | } | ||
2587 | |||
2588 | /* | ||
2589 | * Restore timer state regardless, as e.g. Cause.TI can change over time | ||
2590 | * if left unmaintained. | ||
2591 | */ | ||
2592 | kvm_vz_restore_timer(vcpu); | ||
2593 | |||
2594 | /* Set MC bit if we want to trace guest mode changes */ | ||
2595 | if (kvm_trace_guest_mode_change) | ||
2596 | set_c0_guestctl0(MIPS_GCTL0_MC); | ||
2597 | else | ||
2598 | clear_c0_guestctl0(MIPS_GCTL0_MC); | ||
2599 | |||
2600 | /* Don't bother restoring registers multiple times unless necessary */ | ||
2601 | if (!all) | ||
2602 | return 0; | ||
2603 | |||
2604 | /* | ||
2605 | * Restore config registers first, as some implementations restrict | ||
2606 | * writes to other registers when the corresponding feature bits aren't | ||
2607 | * set. For example Status.CU1 cannot be set unless Config1.FP is set. | ||
2608 | */ | ||
2609 | kvm_restore_gc0_config(cop0); | ||
2610 | if (cpu_guest_has_conf1) | ||
2611 | kvm_restore_gc0_config1(cop0); | ||
2612 | if (cpu_guest_has_conf2) | ||
2613 | kvm_restore_gc0_config2(cop0); | ||
2614 | if (cpu_guest_has_conf3) | ||
2615 | kvm_restore_gc0_config3(cop0); | ||
2616 | if (cpu_guest_has_conf4) | ||
2617 | kvm_restore_gc0_config4(cop0); | ||
2618 | if (cpu_guest_has_conf5) | ||
2619 | kvm_restore_gc0_config5(cop0); | ||
2620 | if (cpu_guest_has_conf6) | ||
2621 | kvm_restore_gc0_config6(cop0); | ||
2622 | if (cpu_guest_has_conf7) | ||
2623 | kvm_restore_gc0_config7(cop0); | ||
2624 | |||
2625 | kvm_restore_gc0_index(cop0); | ||
2626 | kvm_restore_gc0_entrylo0(cop0); | ||
2627 | kvm_restore_gc0_entrylo1(cop0); | ||
2628 | kvm_restore_gc0_context(cop0); | ||
2629 | if (cpu_guest_has_contextconfig) | ||
2630 | kvm_restore_gc0_contextconfig(cop0); | ||
2631 | #ifdef CONFIG_64BIT | ||
2632 | kvm_restore_gc0_xcontext(cop0); | ||
2633 | if (cpu_guest_has_contextconfig) | ||
2634 | kvm_restore_gc0_xcontextconfig(cop0); | ||
2635 | #endif | ||
2636 | kvm_restore_gc0_pagemask(cop0); | ||
2637 | kvm_restore_gc0_pagegrain(cop0); | ||
2638 | kvm_restore_gc0_hwrena(cop0); | ||
2639 | kvm_restore_gc0_badvaddr(cop0); | ||
2640 | kvm_restore_gc0_entryhi(cop0); | ||
2641 | kvm_restore_gc0_status(cop0); | ||
2642 | kvm_restore_gc0_intctl(cop0); | ||
2643 | kvm_restore_gc0_epc(cop0); | ||
2644 | kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0)); | ||
2645 | if (cpu_guest_has_userlocal) | ||
2646 | kvm_restore_gc0_userlocal(cop0); | ||
2647 | |||
2648 | kvm_restore_gc0_errorepc(cop0); | ||
2649 | |||
2650 | /* restore KScratch registers if enabled in guest */ | ||
2651 | if (cpu_guest_has_conf4) { | ||
2652 | if (cpu_guest_has_kscr(2)) | ||
2653 | kvm_restore_gc0_kscratch1(cop0); | ||
2654 | if (cpu_guest_has_kscr(3)) | ||
2655 | kvm_restore_gc0_kscratch2(cop0); | ||
2656 | if (cpu_guest_has_kscr(4)) | ||
2657 | kvm_restore_gc0_kscratch3(cop0); | ||
2658 | if (cpu_guest_has_kscr(5)) | ||
2659 | kvm_restore_gc0_kscratch4(cop0); | ||
2660 | if (cpu_guest_has_kscr(6)) | ||
2661 | kvm_restore_gc0_kscratch5(cop0); | ||
2662 | if (cpu_guest_has_kscr(7)) | ||
2663 | kvm_restore_gc0_kscratch6(cop0); | ||
2664 | } | ||
2665 | |||
2666 | if (cpu_guest_has_badinstr) | ||
2667 | kvm_restore_gc0_badinstr(cop0); | ||
2668 | if (cpu_guest_has_badinstrp) | ||
2669 | kvm_restore_gc0_badinstrp(cop0); | ||
2670 | |||
2671 | if (cpu_guest_has_segments) { | ||
2672 | kvm_restore_gc0_segctl0(cop0); | ||
2673 | kvm_restore_gc0_segctl1(cop0); | ||
2674 | kvm_restore_gc0_segctl2(cop0); | ||
2675 | } | ||
2676 | |||
2677 | /* restore HTW registers */ | ||
2678 | if (cpu_guest_has_htw || cpu_guest_has_ldpte) { | ||
2679 | kvm_restore_gc0_pwbase(cop0); | ||
2680 | kvm_restore_gc0_pwfield(cop0); | ||
2681 | kvm_restore_gc0_pwsize(cop0); | ||
2682 | kvm_restore_gc0_pwctl(cop0); | ||
2683 | } | ||
2684 | |||
2685 | /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ | ||
2686 | if (cpu_has_guestctl2) | ||
2687 | write_c0_guestctl2( | ||
2688 | cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); | ||
2689 | |||
2690 | /* | ||
2691 | * We should clear linked load bit to break interrupted atomics. This | ||
2692 | * prevents a SC on the next VCPU from succeeding by matching a LL on | ||
2693 | * the previous VCPU. | ||
2694 | */ | ||
2695 | if (vcpu->kvm->created_vcpus > 1) | ||
2696 | write_gc0_lladdr(0); | ||
2697 | |||
2698 | return 0; | ||
2699 | } | ||
2700 | |||
2701 | static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) | ||
2702 | { | ||
2703 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
2704 | |||
2705 | if (current->flags & PF_VCPU) | ||
2706 | kvm_vz_vcpu_save_wired(vcpu); | ||
2707 | |||
2708 | kvm_lose_fpu(vcpu); | ||
2709 | |||
2710 | kvm_save_gc0_index(cop0); | ||
2711 | kvm_save_gc0_entrylo0(cop0); | ||
2712 | kvm_save_gc0_entrylo1(cop0); | ||
2713 | kvm_save_gc0_context(cop0); | ||
2714 | if (cpu_guest_has_contextconfig) | ||
2715 | kvm_save_gc0_contextconfig(cop0); | ||
2716 | #ifdef CONFIG_64BIT | ||
2717 | kvm_save_gc0_xcontext(cop0); | ||
2718 | if (cpu_guest_has_contextconfig) | ||
2719 | kvm_save_gc0_xcontextconfig(cop0); | ||
2720 | #endif | ||
2721 | kvm_save_gc0_pagemask(cop0); | ||
2722 | kvm_save_gc0_pagegrain(cop0); | ||
2723 | kvm_save_gc0_wired(cop0); | ||
2724 | /* allow wired TLB entries to be overwritten */ | ||
2725 | clear_gc0_wired(MIPSR6_WIRED_WIRED); | ||
2726 | kvm_save_gc0_hwrena(cop0); | ||
2727 | kvm_save_gc0_badvaddr(cop0); | ||
2728 | kvm_save_gc0_entryhi(cop0); | ||
2729 | kvm_save_gc0_status(cop0); | ||
2730 | kvm_save_gc0_intctl(cop0); | ||
2731 | kvm_save_gc0_epc(cop0); | ||
2732 | kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase()); | ||
2733 | if (cpu_guest_has_userlocal) | ||
2734 | kvm_save_gc0_userlocal(cop0); | ||
2735 | |||
2736 | /* only save implemented config registers */ | ||
2737 | kvm_save_gc0_config(cop0); | ||
2738 | if (cpu_guest_has_conf1) | ||
2739 | kvm_save_gc0_config1(cop0); | ||
2740 | if (cpu_guest_has_conf2) | ||
2741 | kvm_save_gc0_config2(cop0); | ||
2742 | if (cpu_guest_has_conf3) | ||
2743 | kvm_save_gc0_config3(cop0); | ||
2744 | if (cpu_guest_has_conf4) | ||
2745 | kvm_save_gc0_config4(cop0); | ||
2746 | if (cpu_guest_has_conf5) | ||
2747 | kvm_save_gc0_config5(cop0); | ||
2748 | if (cpu_guest_has_conf6) | ||
2749 | kvm_save_gc0_config6(cop0); | ||
2750 | if (cpu_guest_has_conf7) | ||
2751 | kvm_save_gc0_config7(cop0); | ||
2752 | |||
2753 | kvm_save_gc0_errorepc(cop0); | ||
2754 | |||
2755 | /* save KScratch registers if enabled in guest */ | ||
2756 | if (cpu_guest_has_conf4) { | ||
2757 | if (cpu_guest_has_kscr(2)) | ||
2758 | kvm_save_gc0_kscratch1(cop0); | ||
2759 | if (cpu_guest_has_kscr(3)) | ||
2760 | kvm_save_gc0_kscratch2(cop0); | ||
2761 | if (cpu_guest_has_kscr(4)) | ||
2762 | kvm_save_gc0_kscratch3(cop0); | ||
2763 | if (cpu_guest_has_kscr(5)) | ||
2764 | kvm_save_gc0_kscratch4(cop0); | ||
2765 | if (cpu_guest_has_kscr(6)) | ||
2766 | kvm_save_gc0_kscratch5(cop0); | ||
2767 | if (cpu_guest_has_kscr(7)) | ||
2768 | kvm_save_gc0_kscratch6(cop0); | ||
2769 | } | ||
2770 | |||
2771 | if (cpu_guest_has_badinstr) | ||
2772 | kvm_save_gc0_badinstr(cop0); | ||
2773 | if (cpu_guest_has_badinstrp) | ||
2774 | kvm_save_gc0_badinstrp(cop0); | ||
2775 | |||
2776 | if (cpu_guest_has_segments) { | ||
2777 | kvm_save_gc0_segctl0(cop0); | ||
2778 | kvm_save_gc0_segctl1(cop0); | ||
2779 | kvm_save_gc0_segctl2(cop0); | ||
2780 | } | ||
2781 | |||
2782 | /* save HTW registers if enabled in guest */ | ||
2783 | if (cpu_guest_has_ldpte || (cpu_guest_has_htw && | ||
2784 | kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW)) { | ||
2785 | kvm_save_gc0_pwbase(cop0); | ||
2786 | kvm_save_gc0_pwfield(cop0); | ||
2787 | kvm_save_gc0_pwsize(cop0); | ||
2788 | kvm_save_gc0_pwctl(cop0); | ||
2789 | } | ||
2790 | |||
2791 | kvm_vz_save_timer(vcpu); | ||
2792 | |||
2793 | /* save Root.GuestCtl2 in unused Guest guestctl2 register */ | ||
2794 | if (cpu_has_guestctl2) | ||
2795 | cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = | ||
2796 | read_c0_guestctl2(); | ||
2797 | |||
2798 | return 0; | ||
2799 | } | ||
2800 | |||
2801 | /** | ||
2802 | * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB. | ||
2803 | * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries). | ||
2804 | * | ||
2805 | * Attempt to resize the guest VTLB by writing guest Config registers. This is | ||
2806 | * necessary for cores with a shared root/guest TLB to avoid overlap with wired | ||
2807 | * entries in the root VTLB. | ||
2808 | * | ||
2809 | * Returns: The resulting guest VTLB size. | ||
2810 | */ | ||
2811 | static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) | ||
2812 | { | ||
2813 | unsigned int config4 = 0, ret = 0, limit; | ||
2814 | |||
2815 | /* Write MMUSize - 1 into guest Config registers */ | ||
2816 | if (cpu_guest_has_conf1) | ||
2817 | change_gc0_config1(MIPS_CONF1_TLBS, | ||
2818 | (size - 1) << MIPS_CONF1_TLBS_SHIFT); | ||
2819 | if (cpu_guest_has_conf4) { | ||
2820 | config4 = read_gc0_config4(); | ||
2821 | if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == | ||
2822 | MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) { | ||
2823 | config4 &= ~MIPS_CONF4_VTLBSIZEEXT; | ||
2824 | config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << | ||
2825 | MIPS_CONF4_VTLBSIZEEXT_SHIFT; | ||
2826 | } else if ((config4 & MIPS_CONF4_MMUEXTDEF) == | ||
2827 | MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) { | ||
2828 | config4 &= ~MIPS_CONF4_MMUSIZEEXT; | ||
2829 | config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << | ||
2830 | MIPS_CONF4_MMUSIZEEXT_SHIFT; | ||
2831 | } | ||
2832 | write_gc0_config4(config4); | ||
2833 | } | ||
2834 | |||
2835 | /* | ||
2836 | * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it | ||
2837 | * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write | ||
2838 | * not dropped) | ||
2839 | */ | ||
2840 | if (cpu_has_mips_r6) { | ||
2841 | limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >> | ||
2842 | MIPSR6_WIRED_LIMIT_SHIFT; | ||
2843 | if (size - 1 <= limit) | ||
2844 | limit = 0; | ||
2845 | write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT); | ||
2846 | } | ||
2847 | |||
2848 | /* Read back MMUSize - 1 */ | ||
2849 | back_to_back_c0_hazard(); | ||
2850 | if (cpu_guest_has_conf1) | ||
2851 | ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >> | ||
2852 | MIPS_CONF1_TLBS_SHIFT; | ||
2853 | if (config4) { | ||
2854 | if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == | ||
2855 | MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) | ||
2856 | ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> | ||
2857 | MIPS_CONF4_VTLBSIZEEXT_SHIFT) << | ||
2858 | MIPS_CONF1_TLBS_SIZE; | ||
2859 | else if ((config4 & MIPS_CONF4_MMUEXTDEF) == | ||
2860 | MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) | ||
2861 | ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >> | ||
2862 | MIPS_CONF4_MMUSIZEEXT_SHIFT) << | ||
2863 | MIPS_CONF1_TLBS_SIZE; | ||
2864 | } | ||
2865 | return ret + 1; | ||
2866 | } | ||
2867 | |||
2868 | static int kvm_vz_hardware_enable(void) | ||
2869 | { | ||
2870 | unsigned int mmu_size, guest_mmu_size, ftlb_size; | ||
2871 | u64 guest_cvmctl, cvmvmconfig; | ||
2872 | |||
2873 | switch (current_cpu_type()) { | ||
2874 | case CPU_CAVIUM_OCTEON3: | ||
2875 | /* Set up guest timer/perfcount IRQ lines */ | ||
2876 | guest_cvmctl = read_gc0_cvmctl(); | ||
2877 | guest_cvmctl &= ~CVMCTL_IPTI; | ||
2878 | guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT; | ||
2879 | guest_cvmctl &= ~CVMCTL_IPPCI; | ||
2880 | guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT; | ||
2881 | write_gc0_cvmctl(guest_cvmctl); | ||
2882 | |||
2883 | cvmvmconfig = read_c0_cvmvmconfig(); | ||
2884 | /* No I/O hole translation. */ | ||
2885 | cvmvmconfig |= CVMVMCONF_DGHT; | ||
2886 | /* Halve the root MMU size */ | ||
2887 | mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1) | ||
2888 | >> CVMVMCONF_MMUSIZEM1_S) + 1; | ||
2889 | guest_mmu_size = mmu_size / 2; | ||
2890 | mmu_size -= guest_mmu_size; | ||
2891 | cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1; | ||
2892 | cvmvmconfig |= mmu_size - 1; | ||
2893 | write_c0_cvmvmconfig(cvmvmconfig); | ||
2894 | |||
2895 | /* Update our records */ | ||
2896 | current_cpu_data.tlbsize = mmu_size; | ||
2897 | current_cpu_data.tlbsizevtlb = mmu_size; | ||
2898 | current_cpu_data.guest.tlbsize = guest_mmu_size; | ||
2899 | |||
2900 | /* Flush moved entries in new (guest) context */ | ||
2901 | kvm_vz_local_flush_guesttlb_all(); | ||
2902 | break; | ||
2903 | default: | ||
2904 | /* | ||
2905 | * ImgTec cores tend to use a shared root/guest TLB. To avoid | ||
2906 | * overlap of root wired and guest entries, the guest TLB may | ||
2907 | * need resizing. | ||
2908 | */ | ||
2909 | mmu_size = current_cpu_data.tlbsizevtlb; | ||
2910 | ftlb_size = current_cpu_data.tlbsize - mmu_size; | ||
2911 | |||
2912 | /* Try switching to maximum guest VTLB size for flush */ | ||
2913 | guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size); | ||
2914 | current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; | ||
2915 | kvm_vz_local_flush_guesttlb_all(); | ||
2916 | |||
2917 | /* | ||
2918 | * Reduce to make space for root wired entries and at least 2 | ||
2919 | * root non-wired entries. This does assume that long-term wired | ||
2920 | * entries won't be added later. | ||
2921 | */ | ||
2922 | guest_mmu_size = mmu_size - num_wired_entries() - 2; | ||
2923 | guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size); | ||
2924 | current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; | ||
2925 | |||
2926 | /* | ||
2927 | * Write the VTLB size, but if another CPU has already written, | ||
2928 | * check it matches or we won't provide a consistent view to the | ||
2929 | * guest. If this ever happens it suggests an asymmetric number | ||
2930 | * of wired entries. | ||
2931 | */ | ||
2932 | if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) && | ||
2933 | WARN(guest_mmu_size != kvm_vz_guest_vtlb_size, | ||
2934 | "Available guest VTLB size mismatch")) | ||
2935 | return -EINVAL; | ||
2936 | break; | ||
2937 | } | ||
2938 | |||
2939 | /* | ||
2940 | * Enable virtualization features granting guest direct control of | ||
2941 | * certain features: | ||
2942 | * CP0=1: Guest coprocessor 0 context. | ||
2943 | * AT=Guest: Guest MMU. | ||
2944 | * CG=1: Hit (virtual address) CACHE operations (optional). | ||
2945 | * CF=1: Guest Config registers. | ||
2946 | * CGI=1: Indexed flush CACHE operations (optional). | ||
2947 | */ | ||
2948 | write_c0_guestctl0(MIPS_GCTL0_CP0 | | ||
2949 | (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | | ||
2950 | MIPS_GCTL0_CG | MIPS_GCTL0_CF); | ||
2951 | if (cpu_has_guestctl0ext) { | ||
2952 | if (current_cpu_type() != CPU_LOONGSON64) | ||
2953 | set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); | ||
2954 | else | ||
2955 | clear_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); | ||
2956 | } | ||
2957 | |||
2958 | if (cpu_has_guestid) { | ||
2959 | write_c0_guestctl1(0); | ||
2960 | kvm_vz_local_flush_roottlb_all_guests(); | ||
2961 | |||
2962 | GUESTID_MASK = current_cpu_data.guestid_mask; | ||
2963 | GUESTID_FIRST_VERSION = GUESTID_MASK + 1; | ||
2964 | GUESTID_VERSION_MASK = ~GUESTID_MASK; | ||
2965 | |||
2966 | current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION; | ||
2967 | } | ||
2968 | |||
2969 | /* clear any pending injected virtual guest interrupts */ | ||
2970 | if (cpu_has_guestctl2) | ||
2971 | clear_c0_guestctl2(0x3f << 10); | ||
2972 | |||
2973 | #ifdef CONFIG_CPU_LOONGSON64 | ||
2974 | /* Control guest CCA attribute */ | ||
2975 | if (cpu_has_csr()) | ||
2976 | csr_writel(csr_readl(0xffffffec) | 0x1, 0xffffffec); | ||
2977 | #endif | ||
2978 | |||
2979 | return 0; | ||
2980 | } | ||
2981 | |||
2982 | static void kvm_vz_hardware_disable(void) | ||
2983 | { | ||
2984 | u64 cvmvmconfig; | ||
2985 | unsigned int mmu_size; | ||
2986 | |||
2987 | /* Flush any remaining guest TLB entries */ | ||
2988 | kvm_vz_local_flush_guesttlb_all(); | ||
2989 | |||
2990 | switch (current_cpu_type()) { | ||
2991 | case CPU_CAVIUM_OCTEON3: | ||
2992 | /* | ||
2993 | * Allocate whole TLB for root. Existing guest TLB entries will | ||
2994 | * change ownership to the root TLB. We should be safe though as | ||
2995 | * they've already been flushed above while in guest TLB. | ||
2996 | */ | ||
2997 | cvmvmconfig = read_c0_cvmvmconfig(); | ||
2998 | mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1) | ||
2999 | >> CVMVMCONF_MMUSIZEM1_S) + 1; | ||
3000 | cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1; | ||
3001 | cvmvmconfig |= mmu_size - 1; | ||
3002 | write_c0_cvmvmconfig(cvmvmconfig); | ||
3003 | |||
3004 | /* Update our records */ | ||
3005 | current_cpu_data.tlbsize = mmu_size; | ||
3006 | current_cpu_data.tlbsizevtlb = mmu_size; | ||
3007 | current_cpu_data.guest.tlbsize = 0; | ||
3008 | |||
3009 | /* Flush moved entries in new (root) context */ | ||
3010 | local_flush_tlb_all(); | ||
3011 | break; | ||
3012 | } | ||
3013 | |||
3014 | if (cpu_has_guestid) { | ||
3015 | write_c0_guestctl1(0); | ||
3016 | kvm_vz_local_flush_roottlb_all_guests(); | ||
3017 | } | ||
3018 | } | ||
3019 | |||
3020 | static int kvm_vz_check_extension(struct kvm *kvm, long ext) | ||
3021 | { | ||
3022 | int r; | ||
3023 | |||
3024 | switch (ext) { | ||
3025 | case KVM_CAP_MIPS_VZ: | ||
3026 | /* we wouldn't be here unless cpu_has_vz */ | ||
3027 | r = 1; | ||
3028 | break; | ||
3029 | #ifdef CONFIG_64BIT | ||
3030 | case KVM_CAP_MIPS_64BIT: | ||
3031 | /* We support 64-bit registers/operations and addresses */ | ||
3032 | r = 2; | ||
3033 | break; | ||
3034 | #endif | ||
3035 | case KVM_CAP_IOEVENTFD: | ||
3036 | r = 1; | ||
3037 | break; | ||
3038 | default: | ||
3039 | r = 0; | ||
3040 | break; | ||
3041 | } | ||
3042 | |||
3043 | return r; | ||
3044 | } | ||
3045 | |||
3046 | static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu) | ||
3047 | { | ||
3048 | int i; | ||
3049 | |||
3050 | for_each_possible_cpu(i) | ||
3051 | vcpu->arch.vzguestid[i] = 0; | ||
3052 | |||
3053 | return 0; | ||
3054 | } | ||
3055 | |||
3056 | static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) | ||
3057 | { | ||
3058 | int cpu; | ||
3059 | |||
3060 | /* | ||
3061 | * If the VCPU is freed and reused as another VCPU, we don't want the | ||
3062 | * matching pointer wrongly hanging around in last_vcpu[] or | ||
3063 | * last_exec_vcpu[]. | ||
3064 | */ | ||
3065 | for_each_possible_cpu(cpu) { | ||
3066 | if (last_vcpu[cpu] == vcpu) | ||
3067 | last_vcpu[cpu] = NULL; | ||
3068 | if (last_exec_vcpu[cpu] == vcpu) | ||
3069 | last_exec_vcpu[cpu] = NULL; | ||
3070 | } | ||
3071 | } | ||
3072 | |||
3073 | static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) | ||
3074 | { | ||
3075 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
3076 | unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ | ||
3077 | |||
3078 | /* | ||
3079 | * Start off the timer at the same frequency as the host timer, but the | ||
3080 | * soft timer doesn't handle frequencies greater than 1GHz yet. | ||
3081 | */ | ||
3082 | if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC) | ||
3083 | count_hz = mips_hpt_frequency; | ||
3084 | kvm_mips_init_count(vcpu, count_hz); | ||
3085 | |||
3086 | /* | ||
3087 | * Initialize guest register state to valid architectural reset state. | ||
3088 | */ | ||
3089 | |||
3090 | /* PageGrain */ | ||
3091 | if (cpu_has_mips_r5 || cpu_has_mips_r6) | ||
3092 | kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); | ||
3093 | /* Wired */ | ||
3094 | if (cpu_has_mips_r6) | ||
3095 | kvm_write_sw_gc0_wired(cop0, | ||
3096 | read_gc0_wired() & MIPSR6_WIRED_LIMIT); | ||
3097 | /* Status */ | ||
3098 | kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); | ||
3099 | if (cpu_has_mips_r5 || cpu_has_mips_r6) | ||
3100 | kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); | ||
3101 | /* IntCtl */ | ||
3102 | kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & | ||
3103 | (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI)); | ||
3104 | /* PRId */ | ||
3105 | kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id); | ||
3106 | /* EBase */ | ||
3107 | kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); | ||
3108 | /* Config */ | ||
3109 | kvm_save_gc0_config(cop0); | ||
3110 | /* architecturally writable (e.g. from guest) */ | ||
3111 | kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK, | ||
3112 | _page_cachable_default >> _CACHE_SHIFT); | ||
3113 | /* architecturally read only, but maybe writable from root */ | ||
3114 | kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config()); | ||
3115 | if (cpu_guest_has_conf1) { | ||
3116 | kvm_set_sw_gc0_config(cop0, MIPS_CONF_M); | ||
3117 | /* Config1 */ | ||
3118 | kvm_save_gc0_config1(cop0); | ||
3119 | /* architecturally read only, but maybe writable from root */ | ||
3120 | kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 | | ||
3121 | MIPS_CONF1_MD | | ||
3122 | MIPS_CONF1_PC | | ||
3123 | MIPS_CONF1_WR | | ||
3124 | MIPS_CONF1_CA | | ||
3125 | MIPS_CONF1_FP); | ||
3126 | } | ||
3127 | if (cpu_guest_has_conf2) { | ||
3128 | kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M); | ||
3129 | /* Config2 */ | ||
3130 | kvm_save_gc0_config2(cop0); | ||
3131 | } | ||
3132 | if (cpu_guest_has_conf3) { | ||
3133 | kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M); | ||
3134 | /* Config3 */ | ||
3135 | kvm_save_gc0_config3(cop0); | ||
3136 | /* architecturally writable (e.g. from guest) */ | ||
3137 | kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE); | ||
3138 | /* architecturally read only, but maybe writable from root */ | ||
3139 | kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA | | ||
3140 | MIPS_CONF3_BPG | | ||
3141 | MIPS_CONF3_ULRI | | ||
3142 | MIPS_CONF3_DSP | | ||
3143 | MIPS_CONF3_CTXTC | | ||
3144 | MIPS_CONF3_ITL | | ||
3145 | MIPS_CONF3_LPA | | ||
3146 | MIPS_CONF3_VEIC | | ||
3147 | MIPS_CONF3_VINT | | ||
3148 | MIPS_CONF3_SP | | ||
3149 | MIPS_CONF3_CDMM | | ||
3150 | MIPS_CONF3_MT | | ||
3151 | MIPS_CONF3_SM | | ||
3152 | MIPS_CONF3_TL); | ||
3153 | } | ||
3154 | if (cpu_guest_has_conf4) { | ||
3155 | kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M); | ||
3156 | /* Config4 */ | ||
3157 | kvm_save_gc0_config4(cop0); | ||
3158 | } | ||
3159 | if (cpu_guest_has_conf5) { | ||
3160 | kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M); | ||
3161 | /* Config5 */ | ||
3162 | kvm_save_gc0_config5(cop0); | ||
3163 | /* architecturally writable (e.g. from guest) */ | ||
3164 | kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K | | ||
3165 | MIPS_CONF5_CV | | ||
3166 | MIPS_CONF5_MSAEN | | ||
3167 | MIPS_CONF5_UFE | | ||
3168 | MIPS_CONF5_FRE | | ||
3169 | MIPS_CONF5_SBRI | | ||
3170 | MIPS_CONF5_UFR); | ||
3171 | /* architecturally read only, but maybe writable from root */ | ||
3172 | kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); | ||
3173 | } | ||
3174 | |||
3175 | if (cpu_guest_has_contextconfig) { | ||
3176 | /* ContextConfig */ | ||
3177 | kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0); | ||
3178 | #ifdef CONFIG_64BIT | ||
3179 | /* XContextConfig */ | ||
3180 | /* bits SEGBITS-13+3:4 set */ | ||
3181 | kvm_write_sw_gc0_xcontextconfig(cop0, | ||
3182 | ((1ull << (cpu_vmbits - 13)) - 1) << 4); | ||
3183 | #endif | ||
3184 | } | ||
3185 | |||
3186 | /* Implementation dependent, use the legacy layout */ | ||
3187 | if (cpu_guest_has_segments) { | ||
3188 | /* SegCtl0, SegCtl1, SegCtl2 */ | ||
3189 | kvm_write_sw_gc0_segctl0(cop0, 0x00200010); | ||
3190 | kvm_write_sw_gc0_segctl1(cop0, 0x00000002 | | ||
3191 | (_page_cachable_default >> _CACHE_SHIFT) << | ||
3192 | (16 + MIPS_SEGCFG_C_SHIFT)); | ||
3193 | kvm_write_sw_gc0_segctl2(cop0, 0x00380438); | ||
3194 | } | ||
3195 | |||
3196 | /* reset HTW registers */ | ||
3197 | if (cpu_guest_has_htw && (cpu_has_mips_r5 || cpu_has_mips_r6)) { | ||
3198 | /* PWField */ | ||
3199 | kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302); | ||
3200 | /* PWSize */ | ||
3201 | kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT); | ||
3202 | } | ||
3203 | |||
3204 | /* start with no pending virtual guest interrupts */ | ||
3205 | if (cpu_has_guestctl2) | ||
3206 | cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; | ||
3207 | |||
3208 | /* Put PC at reset vector */ | ||
3209 | vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); | ||
3210 | |||
3211 | return 0; | ||
3212 | } | ||
3213 | |||
3214 | static void kvm_vz_flush_shadow_all(struct kvm *kvm) | ||
3215 | { | ||
3216 | if (cpu_has_guestid) { | ||
3217 | /* Flush GuestID for each VCPU individually */ | ||
3218 | kvm_flush_remote_tlbs(kvm); | ||
3219 | } else { | ||
3220 | /* | ||
3221 | * For each CPU there is a single GPA ASID used by all VCPUs in | ||
3222 | * the VM, so it doesn't make sense for the VCPUs to handle | ||
3223 | * invalidation of these ASIDs individually. | ||
3224 | * | ||
3225 | * Instead mark all CPUs as needing ASID invalidation in | ||
3226 | * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to | ||
3227 | * kick any running VCPUs so they check asid_flush_mask. | ||
3228 | */ | ||
3229 | cpumask_setall(&kvm->arch.asid_flush_mask); | ||
3230 | kvm_flush_remote_tlbs(kvm); | ||
3231 | } | ||
3232 | } | ||
3233 | |||
3234 | static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, | ||
3235 | const struct kvm_memory_slot *slot) | ||
3236 | { | ||
3237 | kvm_vz_flush_shadow_all(kvm); | ||
3238 | } | ||
3239 | |||
3240 | static void kvm_vz_vcpu_reenter(struct kvm_vcpu *vcpu) | ||
3241 | { | ||
3242 | int cpu = smp_processor_id(); | ||
3243 | int preserve_guest_tlb; | ||
3244 | |||
3245 | preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu); | ||
3246 | |||
3247 | if (preserve_guest_tlb) | ||
3248 | kvm_vz_vcpu_save_wired(vcpu); | ||
3249 | |||
3250 | kvm_vz_vcpu_load_tlb(vcpu, cpu); | ||
3251 | |||
3252 | if (preserve_guest_tlb) | ||
3253 | kvm_vz_vcpu_load_wired(vcpu); | ||
3254 | } | ||
3255 | |||
3256 | static int kvm_vz_vcpu_run(struct kvm_vcpu *vcpu) | ||
3257 | { | ||
3258 | int cpu = smp_processor_id(); | ||
3259 | int r; | ||
3260 | |||
3261 | kvm_vz_acquire_htimer(vcpu); | ||
3262 | /* Check if we have any exceptions/interrupts pending */ | ||
3263 | kvm_mips_deliver_interrupts(vcpu, read_gc0_cause()); | ||
3264 | |||
3265 | kvm_vz_check_requests(vcpu, cpu); | ||
3266 | kvm_vz_vcpu_load_tlb(vcpu, cpu); | ||
3267 | kvm_vz_vcpu_load_wired(vcpu); | ||
3268 | |||
3269 | r = vcpu->arch.vcpu_run(vcpu); | ||
3270 | |||
3271 | kvm_vz_vcpu_save_wired(vcpu); | ||
3272 | |||
3273 | return r; | ||
3274 | } | ||
3275 | |||
3276 | static struct kvm_mips_callbacks kvm_vz_callbacks = { | ||
3277 | .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable, | ||
3278 | .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss, | ||
3279 | .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss, | ||
3280 | .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss, | ||
3281 | .handle_addr_err_st = kvm_trap_vz_no_handler, | ||
3282 | .handle_addr_err_ld = kvm_trap_vz_no_handler, | ||
3283 | .handle_syscall = kvm_trap_vz_no_handler, | ||
3284 | .handle_res_inst = kvm_trap_vz_no_handler, | ||
3285 | .handle_break = kvm_trap_vz_no_handler, | ||
3286 | .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, | ||
3287 | .handle_guest_exit = kvm_trap_vz_handle_guest_exit, | ||
3288 | |||
3289 | .hardware_enable = kvm_vz_hardware_enable, | ||
3290 | .hardware_disable = kvm_vz_hardware_disable, | ||
3291 | .check_extension = kvm_vz_check_extension, | ||
3292 | .vcpu_init = kvm_vz_vcpu_init, | ||
3293 | .vcpu_uninit = kvm_vz_vcpu_uninit, | ||
3294 | .vcpu_setup = kvm_vz_vcpu_setup, | ||
3295 | .flush_shadow_all = kvm_vz_flush_shadow_all, | ||
3296 | .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, | ||
3297 | .gva_to_gpa = kvm_vz_gva_to_gpa_cb, | ||
3298 | .queue_timer_int = kvm_vz_queue_timer_int_cb, | ||
3299 | .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, | ||
3300 | .queue_io_int = kvm_vz_queue_io_int_cb, | ||
3301 | .dequeue_io_int = kvm_vz_dequeue_io_int_cb, | ||
3302 | .irq_deliver = kvm_vz_irq_deliver_cb, | ||
3303 | .irq_clear = kvm_vz_irq_clear_cb, | ||
3304 | .num_regs = kvm_vz_num_regs, | ||
3305 | .copy_reg_indices = kvm_vz_copy_reg_indices, | ||
3306 | .get_one_reg = kvm_vz_get_one_reg, | ||
3307 | .set_one_reg = kvm_vz_set_one_reg, | ||
3308 | .vcpu_load = kvm_vz_vcpu_load, | ||
3309 | .vcpu_put = kvm_vz_vcpu_put, | ||
3310 | .vcpu_run = kvm_vz_vcpu_run, | ||
3311 | .vcpu_reenter = kvm_vz_vcpu_reenter, | ||
3312 | }; | ||
3313 | |||
3314 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) | ||
3315 | { | ||
3316 | if (!cpu_has_vz) | ||
3317 | return -ENODEV; | ||
3318 | |||
3319 | /* | ||
3320 | * VZ requires at least 2 KScratch registers, so it should have been | ||
3321 | * possible to allocate pgd_reg. | ||
3322 | */ | ||
3323 | if (WARN(pgd_reg == -1, | ||
3324 | "pgd_reg not allocated even though cpu_has_vz\n")) | ||
3325 | return -ENODEV; | ||
3326 | |||
3327 | pr_info("Starting KVM with MIPS VZ extensions\n"); | ||
3328 | |||
3329 | *install_callbacks = &kvm_vz_callbacks; | ||
3330 | return 0; | ||
3331 | } | ||