diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /arch/mips/kvm/mips.c | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'arch/mips/kvm/mips.c')
-rw-r--r-- | arch/mips/kvm/mips.c | 1701 |
1 files changed, 1701 insertions, 0 deletions
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c new file mode 100644 index 000000000..3d6a7f582 --- /dev/null +++ b/arch/mips/kvm/mips.c | |||
@@ -0,0 +1,1701 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * KVM/MIPS: MIPS specific KVM APIs | ||
7 | * | ||
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | ||
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | ||
10 | */ | ||
11 | |||
12 | #include <linux/bitops.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/err.h> | ||
15 | #include <linux/kdebug.h> | ||
16 | #include <linux/module.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | #include <linux/vmalloc.h> | ||
19 | #include <linux/sched/signal.h> | ||
20 | #include <linux/fs.h> | ||
21 | #include <linux/memblock.h> | ||
22 | #include <linux/pgtable.h> | ||
23 | |||
24 | #include <asm/fpu.h> | ||
25 | #include <asm/page.h> | ||
26 | #include <asm/cacheflush.h> | ||
27 | #include <asm/mmu_context.h> | ||
28 | #include <asm/pgalloc.h> | ||
29 | |||
30 | #include <linux/kvm_host.h> | ||
31 | |||
32 | #include "interrupt.h" | ||
33 | #include "commpage.h" | ||
34 | |||
35 | #define CREATE_TRACE_POINTS | ||
36 | #include "trace.h" | ||
37 | |||
38 | #ifndef VECTORSPACING | ||
39 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | ||
40 | #endif | ||
41 | |||
42 | struct kvm_stats_debugfs_item debugfs_entries[] = { | ||
43 | VCPU_STAT("wait", wait_exits), | ||
44 | VCPU_STAT("cache", cache_exits), | ||
45 | VCPU_STAT("signal", signal_exits), | ||
46 | VCPU_STAT("interrupt", int_exits), | ||
47 | VCPU_STAT("cop_unusable", cop_unusable_exits), | ||
48 | VCPU_STAT("tlbmod", tlbmod_exits), | ||
49 | VCPU_STAT("tlbmiss_ld", tlbmiss_ld_exits), | ||
50 | VCPU_STAT("tlbmiss_st", tlbmiss_st_exits), | ||
51 | VCPU_STAT("addrerr_st", addrerr_st_exits), | ||
52 | VCPU_STAT("addrerr_ld", addrerr_ld_exits), | ||
53 | VCPU_STAT("syscall", syscall_exits), | ||
54 | VCPU_STAT("resvd_inst", resvd_inst_exits), | ||
55 | VCPU_STAT("break_inst", break_inst_exits), | ||
56 | VCPU_STAT("trap_inst", trap_inst_exits), | ||
57 | VCPU_STAT("msa_fpe", msa_fpe_exits), | ||
58 | VCPU_STAT("fpe", fpe_exits), | ||
59 | VCPU_STAT("msa_disabled", msa_disabled_exits), | ||
60 | VCPU_STAT("flush_dcache", flush_dcache_exits), | ||
61 | #ifdef CONFIG_KVM_MIPS_VZ | ||
62 | VCPU_STAT("vz_gpsi", vz_gpsi_exits), | ||
63 | VCPU_STAT("vz_gsfc", vz_gsfc_exits), | ||
64 | VCPU_STAT("vz_hc", vz_hc_exits), | ||
65 | VCPU_STAT("vz_grr", vz_grr_exits), | ||
66 | VCPU_STAT("vz_gva", vz_gva_exits), | ||
67 | VCPU_STAT("vz_ghfc", vz_ghfc_exits), | ||
68 | VCPU_STAT("vz_gpa", vz_gpa_exits), | ||
69 | VCPU_STAT("vz_resvd", vz_resvd_exits), | ||
70 | #ifdef CONFIG_CPU_LOONGSON64 | ||
71 | VCPU_STAT("vz_cpucfg", vz_cpucfg_exits), | ||
72 | #endif | ||
73 | #endif | ||
74 | VCPU_STAT("halt_successful_poll", halt_successful_poll), | ||
75 | VCPU_STAT("halt_attempted_poll", halt_attempted_poll), | ||
76 | VCPU_STAT("halt_poll_invalid", halt_poll_invalid), | ||
77 | VCPU_STAT("halt_wakeup", halt_wakeup), | ||
78 | VCPU_STAT("halt_poll_success_ns", halt_poll_success_ns), | ||
79 | VCPU_STAT("halt_poll_fail_ns", halt_poll_fail_ns), | ||
80 | {NULL} | ||
81 | }; | ||
82 | |||
83 | bool kvm_trace_guest_mode_change; | ||
84 | |||
85 | int kvm_guest_mode_change_trace_reg(void) | ||
86 | { | ||
87 | kvm_trace_guest_mode_change = true; | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | void kvm_guest_mode_change_trace_unreg(void) | ||
92 | { | ||
93 | kvm_trace_guest_mode_change = false; | ||
94 | } | ||
95 | |||
96 | /* | ||
97 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in | ||
98 | * Config7, so we are "runnable" if interrupts are pending | ||
99 | */ | ||
100 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | ||
101 | { | ||
102 | return !!(vcpu->arch.pending_exceptions); | ||
103 | } | ||
104 | |||
105 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) | ||
106 | { | ||
107 | return false; | ||
108 | } | ||
109 | |||
110 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | ||
111 | { | ||
112 | return 1; | ||
113 | } | ||
114 | |||
115 | int kvm_arch_hardware_enable(void) | ||
116 | { | ||
117 | return kvm_mips_callbacks->hardware_enable(); | ||
118 | } | ||
119 | |||
120 | void kvm_arch_hardware_disable(void) | ||
121 | { | ||
122 | kvm_mips_callbacks->hardware_disable(); | ||
123 | } | ||
124 | |||
125 | int kvm_arch_hardware_setup(void *opaque) | ||
126 | { | ||
127 | return 0; | ||
128 | } | ||
129 | |||
130 | int kvm_arch_check_processor_compat(void *opaque) | ||
131 | { | ||
132 | return 0; | ||
133 | } | ||
134 | |||
135 | extern void kvm_init_loongson_ipi(struct kvm *kvm); | ||
136 | |||
137 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | ||
138 | { | ||
139 | switch (type) { | ||
140 | case KVM_VM_MIPS_AUTO: | ||
141 | break; | ||
142 | #ifdef CONFIG_KVM_MIPS_VZ | ||
143 | case KVM_VM_MIPS_VZ: | ||
144 | #else | ||
145 | case KVM_VM_MIPS_TE: | ||
146 | #endif | ||
147 | break; | ||
148 | default: | ||
149 | /* Unsupported KVM type */ | ||
150 | return -EINVAL; | ||
151 | }; | ||
152 | |||
153 | /* Allocate page table to map GPA -> RPA */ | ||
154 | kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); | ||
155 | if (!kvm->arch.gpa_mm.pgd) | ||
156 | return -ENOMEM; | ||
157 | |||
158 | #ifdef CONFIG_CPU_LOONGSON64 | ||
159 | kvm_init_loongson_ipi(kvm); | ||
160 | #endif | ||
161 | |||
162 | return 0; | ||
163 | } | ||
164 | |||
165 | void kvm_mips_free_vcpus(struct kvm *kvm) | ||
166 | { | ||
167 | unsigned int i; | ||
168 | struct kvm_vcpu *vcpu; | ||
169 | |||
170 | kvm_for_each_vcpu(i, vcpu, kvm) { | ||
171 | kvm_vcpu_destroy(vcpu); | ||
172 | } | ||
173 | |||
174 | mutex_lock(&kvm->lock); | ||
175 | |||
176 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | ||
177 | kvm->vcpus[i] = NULL; | ||
178 | |||
179 | atomic_set(&kvm->online_vcpus, 0); | ||
180 | |||
181 | mutex_unlock(&kvm->lock); | ||
182 | } | ||
183 | |||
184 | static void kvm_mips_free_gpa_pt(struct kvm *kvm) | ||
185 | { | ||
186 | /* It should always be safe to remove after flushing the whole range */ | ||
187 | WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0)); | ||
188 | pgd_free(NULL, kvm->arch.gpa_mm.pgd); | ||
189 | } | ||
190 | |||
191 | void kvm_arch_destroy_vm(struct kvm *kvm) | ||
192 | { | ||
193 | kvm_mips_free_vcpus(kvm); | ||
194 | kvm_mips_free_gpa_pt(kvm); | ||
195 | } | ||
196 | |||
197 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, | ||
198 | unsigned long arg) | ||
199 | { | ||
200 | return -ENOIOCTLCMD; | ||
201 | } | ||
202 | |||
203 | void kvm_arch_flush_shadow_all(struct kvm *kvm) | ||
204 | { | ||
205 | /* Flush whole GPA */ | ||
206 | kvm_mips_flush_gpa_pt(kvm, 0, ~0); | ||
207 | |||
208 | /* Let implementation do the rest */ | ||
209 | kvm_mips_callbacks->flush_shadow_all(kvm); | ||
210 | } | ||
211 | |||
212 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | ||
213 | struct kvm_memory_slot *slot) | ||
214 | { | ||
215 | /* | ||
216 | * The slot has been made invalid (ready for moving or deletion), so we | ||
217 | * need to ensure that it can no longer be accessed by any guest VCPUs. | ||
218 | */ | ||
219 | |||
220 | spin_lock(&kvm->mmu_lock); | ||
221 | /* Flush slot from GPA */ | ||
222 | kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, | ||
223 | slot->base_gfn + slot->npages - 1); | ||
224 | /* Let implementation do the rest */ | ||
225 | kvm_mips_callbacks->flush_shadow_memslot(kvm, slot); | ||
226 | spin_unlock(&kvm->mmu_lock); | ||
227 | } | ||
228 | |||
229 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | ||
230 | struct kvm_memory_slot *memslot, | ||
231 | const struct kvm_userspace_memory_region *mem, | ||
232 | enum kvm_mr_change change) | ||
233 | { | ||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | void kvm_arch_commit_memory_region(struct kvm *kvm, | ||
238 | const struct kvm_userspace_memory_region *mem, | ||
239 | struct kvm_memory_slot *old, | ||
240 | const struct kvm_memory_slot *new, | ||
241 | enum kvm_mr_change change) | ||
242 | { | ||
243 | int needs_flush; | ||
244 | |||
245 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", | ||
246 | __func__, kvm, mem->slot, mem->guest_phys_addr, | ||
247 | mem->memory_size, mem->userspace_addr); | ||
248 | |||
249 | /* | ||
250 | * If dirty page logging is enabled, write protect all pages in the slot | ||
251 | * ready for dirty logging. | ||
252 | * | ||
253 | * There is no need to do this in any of the following cases: | ||
254 | * CREATE: No dirty mappings will already exist. | ||
255 | * MOVE/DELETE: The old mappings will already have been cleaned up by | ||
256 | * kvm_arch_flush_shadow_memslot() | ||
257 | */ | ||
258 | if (change == KVM_MR_FLAGS_ONLY && | ||
259 | (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && | ||
260 | new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { | ||
261 | spin_lock(&kvm->mmu_lock); | ||
262 | /* Write protect GPA page table entries */ | ||
263 | needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, | ||
264 | new->base_gfn + new->npages - 1); | ||
265 | /* Let implementation do the rest */ | ||
266 | if (needs_flush) | ||
267 | kvm_mips_callbacks->flush_shadow_memslot(kvm, new); | ||
268 | spin_unlock(&kvm->mmu_lock); | ||
269 | } | ||
270 | } | ||
271 | |||
272 | static inline void dump_handler(const char *symbol, void *start, void *end) | ||
273 | { | ||
274 | u32 *p; | ||
275 | |||
276 | pr_debug("LEAF(%s)\n", symbol); | ||
277 | |||
278 | pr_debug("\t.set push\n"); | ||
279 | pr_debug("\t.set noreorder\n"); | ||
280 | |||
281 | for (p = start; p < (u32 *)end; ++p) | ||
282 | pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p); | ||
283 | |||
284 | pr_debug("\t.set\tpop\n"); | ||
285 | |||
286 | pr_debug("\tEND(%s)\n", symbol); | ||
287 | } | ||
288 | |||
289 | /* low level hrtimer wake routine */ | ||
290 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) | ||
291 | { | ||
292 | struct kvm_vcpu *vcpu; | ||
293 | |||
294 | vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); | ||
295 | |||
296 | kvm_mips_callbacks->queue_timer_int(vcpu); | ||
297 | |||
298 | vcpu->arch.wait = 0; | ||
299 | rcuwait_wake_up(&vcpu->wait); | ||
300 | |||
301 | return kvm_mips_count_timeout(vcpu); | ||
302 | } | ||
303 | |||
304 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) | ||
305 | { | ||
306 | return 0; | ||
307 | } | ||
308 | |||
309 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) | ||
310 | { | ||
311 | int err, size; | ||
312 | void *gebase, *p, *handler, *refill_start, *refill_end; | ||
313 | int i; | ||
314 | |||
315 | kvm_debug("kvm @ %p: create cpu %d at %p\n", | ||
316 | vcpu->kvm, vcpu->vcpu_id, vcpu); | ||
317 | |||
318 | err = kvm_mips_callbacks->vcpu_init(vcpu); | ||
319 | if (err) | ||
320 | return err; | ||
321 | |||
322 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, | ||
323 | HRTIMER_MODE_REL); | ||
324 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; | ||
325 | |||
326 | /* | ||
327 | * Allocate space for host mode exception handlers that handle | ||
328 | * guest mode exits | ||
329 | */ | ||
330 | if (cpu_has_veic || cpu_has_vint) | ||
331 | size = 0x200 + VECTORSPACING * 64; | ||
332 | else | ||
333 | size = 0x4000; | ||
334 | |||
335 | gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); | ||
336 | |||
337 | if (!gebase) { | ||
338 | err = -ENOMEM; | ||
339 | goto out_uninit_vcpu; | ||
340 | } | ||
341 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", | ||
342 | ALIGN(size, PAGE_SIZE), gebase); | ||
343 | |||
344 | /* | ||
345 | * Check new ebase actually fits in CP0_EBase. The lack of a write gate | ||
346 | * limits us to the low 512MB of physical address space. If the memory | ||
347 | * we allocate is out of range, just give up now. | ||
348 | */ | ||
349 | if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { | ||
350 | kvm_err("CP0_EBase.WG required for guest exception base %pK\n", | ||
351 | gebase); | ||
352 | err = -ENOMEM; | ||
353 | goto out_free_gebase; | ||
354 | } | ||
355 | |||
356 | /* Save new ebase */ | ||
357 | vcpu->arch.guest_ebase = gebase; | ||
358 | |||
359 | /* Build guest exception vectors dynamically in unmapped memory */ | ||
360 | handler = gebase + 0x2000; | ||
361 | |||
362 | /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ | ||
363 | refill_start = gebase; | ||
364 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT)) | ||
365 | refill_start += 0x080; | ||
366 | refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); | ||
367 | |||
368 | /* General Exception Entry point */ | ||
369 | kvm_mips_build_exception(gebase + 0x180, handler); | ||
370 | |||
371 | /* For vectored interrupts poke the exception code @ all offsets 0-7 */ | ||
372 | for (i = 0; i < 8; i++) { | ||
373 | kvm_debug("L1 Vectored handler @ %p\n", | ||
374 | gebase + 0x200 + (i * VECTORSPACING)); | ||
375 | kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, | ||
376 | handler); | ||
377 | } | ||
378 | |||
379 | /* General exit handler */ | ||
380 | p = handler; | ||
381 | p = kvm_mips_build_exit(p); | ||
382 | |||
383 | /* Guest entry routine */ | ||
384 | vcpu->arch.vcpu_run = p; | ||
385 | p = kvm_mips_build_vcpu_run(p); | ||
386 | |||
387 | /* Dump the generated code */ | ||
388 | pr_debug("#include <asm/asm.h>\n"); | ||
389 | pr_debug("#include <asm/regdef.h>\n"); | ||
390 | pr_debug("\n"); | ||
391 | dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); | ||
392 | dump_handler("kvm_tlb_refill", refill_start, refill_end); | ||
393 | dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); | ||
394 | dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); | ||
395 | |||
396 | /* Invalidate the icache for these ranges */ | ||
397 | flush_icache_range((unsigned long)gebase, | ||
398 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | ||
399 | |||
400 | /* | ||
401 | * Allocate comm page for guest kernel, a TLB will be reserved for | ||
402 | * mapping GVA @ 0xFFFF8000 to this page | ||
403 | */ | ||
404 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); | ||
405 | |||
406 | if (!vcpu->arch.kseg0_commpage) { | ||
407 | err = -ENOMEM; | ||
408 | goto out_free_gebase; | ||
409 | } | ||
410 | |||
411 | kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); | ||
412 | kvm_mips_commpage_init(vcpu); | ||
413 | |||
414 | /* Init */ | ||
415 | vcpu->arch.last_sched_cpu = -1; | ||
416 | vcpu->arch.last_exec_cpu = -1; | ||
417 | |||
418 | /* Initial guest state */ | ||
419 | err = kvm_mips_callbacks->vcpu_setup(vcpu); | ||
420 | if (err) | ||
421 | goto out_free_commpage; | ||
422 | |||
423 | return 0; | ||
424 | |||
425 | out_free_commpage: | ||
426 | kfree(vcpu->arch.kseg0_commpage); | ||
427 | out_free_gebase: | ||
428 | kfree(gebase); | ||
429 | out_uninit_vcpu: | ||
430 | kvm_mips_callbacks->vcpu_uninit(vcpu); | ||
431 | return err; | ||
432 | } | ||
433 | |||
434 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | ||
435 | { | ||
436 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | ||
437 | |||
438 | kvm_mips_dump_stats(vcpu); | ||
439 | |||
440 | kvm_mmu_free_memory_caches(vcpu); | ||
441 | kfree(vcpu->arch.guest_ebase); | ||
442 | kfree(vcpu->arch.kseg0_commpage); | ||
443 | |||
444 | kvm_mips_callbacks->vcpu_uninit(vcpu); | ||
445 | } | ||
446 | |||
447 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, | ||
448 | struct kvm_guest_debug *dbg) | ||
449 | { | ||
450 | return -ENOIOCTLCMD; | ||
451 | } | ||
452 | |||
453 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) | ||
454 | { | ||
455 | int r = -EINTR; | ||
456 | |||
457 | vcpu_load(vcpu); | ||
458 | |||
459 | kvm_sigset_activate(vcpu); | ||
460 | |||
461 | if (vcpu->mmio_needed) { | ||
462 | if (!vcpu->mmio_is_write) | ||
463 | kvm_mips_complete_mmio_load(vcpu); | ||
464 | vcpu->mmio_needed = 0; | ||
465 | } | ||
466 | |||
467 | if (vcpu->run->immediate_exit) | ||
468 | goto out; | ||
469 | |||
470 | lose_fpu(1); | ||
471 | |||
472 | local_irq_disable(); | ||
473 | guest_enter_irqoff(); | ||
474 | trace_kvm_enter(vcpu); | ||
475 | |||
476 | /* | ||
477 | * Make sure the read of VCPU requests in vcpu_run() callback is not | ||
478 | * reordered ahead of the write to vcpu->mode, or we could miss a TLB | ||
479 | * flush request while the requester sees the VCPU as outside of guest | ||
480 | * mode and not needing an IPI. | ||
481 | */ | ||
482 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | ||
483 | |||
484 | r = kvm_mips_callbacks->vcpu_run(vcpu); | ||
485 | |||
486 | trace_kvm_out(vcpu); | ||
487 | guest_exit_irqoff(); | ||
488 | local_irq_enable(); | ||
489 | |||
490 | out: | ||
491 | kvm_sigset_deactivate(vcpu); | ||
492 | |||
493 | vcpu_put(vcpu); | ||
494 | return r; | ||
495 | } | ||
496 | |||
497 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | ||
498 | struct kvm_mips_interrupt *irq) | ||
499 | { | ||
500 | int intr = (int)irq->irq; | ||
501 | struct kvm_vcpu *dvcpu = NULL; | ||
502 | |||
503 | if (intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_1] || | ||
504 | intr == kvm_priority_to_irq[MIPS_EXC_INT_IPI_2] || | ||
505 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_1]) || | ||
506 | intr == (-kvm_priority_to_irq[MIPS_EXC_INT_IPI_2])) | ||
507 | kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, | ||
508 | (int)intr); | ||
509 | |||
510 | if (irq->cpu == -1) | ||
511 | dvcpu = vcpu; | ||
512 | else | ||
513 | dvcpu = vcpu->kvm->vcpus[irq->cpu]; | ||
514 | |||
515 | if (intr == 2 || intr == 3 || intr == 4 || intr == 6) { | ||
516 | kvm_mips_callbacks->queue_io_int(dvcpu, irq); | ||
517 | |||
518 | } else if (intr == -2 || intr == -3 || intr == -4 || intr == -6) { | ||
519 | kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); | ||
520 | } else { | ||
521 | kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, | ||
522 | irq->cpu, irq->irq); | ||
523 | return -EINVAL; | ||
524 | } | ||
525 | |||
526 | dvcpu->arch.wait = 0; | ||
527 | |||
528 | rcuwait_wake_up(&dvcpu->wait); | ||
529 | |||
530 | return 0; | ||
531 | } | ||
532 | |||
533 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | ||
534 | struct kvm_mp_state *mp_state) | ||
535 | { | ||
536 | return -ENOIOCTLCMD; | ||
537 | } | ||
538 | |||
539 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | ||
540 | struct kvm_mp_state *mp_state) | ||
541 | { | ||
542 | return -ENOIOCTLCMD; | ||
543 | } | ||
544 | |||
545 | static u64 kvm_mips_get_one_regs[] = { | ||
546 | KVM_REG_MIPS_R0, | ||
547 | KVM_REG_MIPS_R1, | ||
548 | KVM_REG_MIPS_R2, | ||
549 | KVM_REG_MIPS_R3, | ||
550 | KVM_REG_MIPS_R4, | ||
551 | KVM_REG_MIPS_R5, | ||
552 | KVM_REG_MIPS_R6, | ||
553 | KVM_REG_MIPS_R7, | ||
554 | KVM_REG_MIPS_R8, | ||
555 | KVM_REG_MIPS_R9, | ||
556 | KVM_REG_MIPS_R10, | ||
557 | KVM_REG_MIPS_R11, | ||
558 | KVM_REG_MIPS_R12, | ||
559 | KVM_REG_MIPS_R13, | ||
560 | KVM_REG_MIPS_R14, | ||
561 | KVM_REG_MIPS_R15, | ||
562 | KVM_REG_MIPS_R16, | ||
563 | KVM_REG_MIPS_R17, | ||
564 | KVM_REG_MIPS_R18, | ||
565 | KVM_REG_MIPS_R19, | ||
566 | KVM_REG_MIPS_R20, | ||
567 | KVM_REG_MIPS_R21, | ||
568 | KVM_REG_MIPS_R22, | ||
569 | KVM_REG_MIPS_R23, | ||
570 | KVM_REG_MIPS_R24, | ||
571 | KVM_REG_MIPS_R25, | ||
572 | KVM_REG_MIPS_R26, | ||
573 | KVM_REG_MIPS_R27, | ||
574 | KVM_REG_MIPS_R28, | ||
575 | KVM_REG_MIPS_R29, | ||
576 | KVM_REG_MIPS_R30, | ||
577 | KVM_REG_MIPS_R31, | ||
578 | |||
579 | #ifndef CONFIG_CPU_MIPSR6 | ||
580 | KVM_REG_MIPS_HI, | ||
581 | KVM_REG_MIPS_LO, | ||
582 | #endif | ||
583 | KVM_REG_MIPS_PC, | ||
584 | }; | ||
585 | |||
586 | static u64 kvm_mips_get_one_regs_fpu[] = { | ||
587 | KVM_REG_MIPS_FCR_IR, | ||
588 | KVM_REG_MIPS_FCR_CSR, | ||
589 | }; | ||
590 | |||
591 | static u64 kvm_mips_get_one_regs_msa[] = { | ||
592 | KVM_REG_MIPS_MSA_IR, | ||
593 | KVM_REG_MIPS_MSA_CSR, | ||
594 | }; | ||
595 | |||
596 | static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) | ||
597 | { | ||
598 | unsigned long ret; | ||
599 | |||
600 | ret = ARRAY_SIZE(kvm_mips_get_one_regs); | ||
601 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { | ||
602 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; | ||
603 | /* odd doubles */ | ||
604 | if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) | ||
605 | ret += 16; | ||
606 | } | ||
607 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | ||
608 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; | ||
609 | ret += kvm_mips_callbacks->num_regs(vcpu); | ||
610 | |||
611 | return ret; | ||
612 | } | ||
613 | |||
614 | static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) | ||
615 | { | ||
616 | u64 index; | ||
617 | unsigned int i; | ||
618 | |||
619 | if (copy_to_user(indices, kvm_mips_get_one_regs, | ||
620 | sizeof(kvm_mips_get_one_regs))) | ||
621 | return -EFAULT; | ||
622 | indices += ARRAY_SIZE(kvm_mips_get_one_regs); | ||
623 | |||
624 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { | ||
625 | if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, | ||
626 | sizeof(kvm_mips_get_one_regs_fpu))) | ||
627 | return -EFAULT; | ||
628 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); | ||
629 | |||
630 | for (i = 0; i < 32; ++i) { | ||
631 | index = KVM_REG_MIPS_FPR_32(i); | ||
632 | if (copy_to_user(indices, &index, sizeof(index))) | ||
633 | return -EFAULT; | ||
634 | ++indices; | ||
635 | |||
636 | /* skip odd doubles if no F64 */ | ||
637 | if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) | ||
638 | continue; | ||
639 | |||
640 | index = KVM_REG_MIPS_FPR_64(i); | ||
641 | if (copy_to_user(indices, &index, sizeof(index))) | ||
642 | return -EFAULT; | ||
643 | ++indices; | ||
644 | } | ||
645 | } | ||
646 | |||
647 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { | ||
648 | if (copy_to_user(indices, kvm_mips_get_one_regs_msa, | ||
649 | sizeof(kvm_mips_get_one_regs_msa))) | ||
650 | return -EFAULT; | ||
651 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); | ||
652 | |||
653 | for (i = 0; i < 32; ++i) { | ||
654 | index = KVM_REG_MIPS_VEC_128(i); | ||
655 | if (copy_to_user(indices, &index, sizeof(index))) | ||
656 | return -EFAULT; | ||
657 | ++indices; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); | ||
662 | } | ||
663 | |||
664 | static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, | ||
665 | const struct kvm_one_reg *reg) | ||
666 | { | ||
667 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
668 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; | ||
669 | int ret; | ||
670 | s64 v; | ||
671 | s64 vs[2]; | ||
672 | unsigned int idx; | ||
673 | |||
674 | switch (reg->id) { | ||
675 | /* General purpose registers */ | ||
676 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: | ||
677 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; | ||
678 | break; | ||
679 | #ifndef CONFIG_CPU_MIPSR6 | ||
680 | case KVM_REG_MIPS_HI: | ||
681 | v = (long)vcpu->arch.hi; | ||
682 | break; | ||
683 | case KVM_REG_MIPS_LO: | ||
684 | v = (long)vcpu->arch.lo; | ||
685 | break; | ||
686 | #endif | ||
687 | case KVM_REG_MIPS_PC: | ||
688 | v = (long)vcpu->arch.pc; | ||
689 | break; | ||
690 | |||
691 | /* Floating point registers */ | ||
692 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | ||
693 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
694 | return -EINVAL; | ||
695 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | ||
696 | /* Odd singles in top of even double when FR=0 */ | ||
697 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | ||
698 | v = get_fpr32(&fpu->fpr[idx], 0); | ||
699 | else | ||
700 | v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); | ||
701 | break; | ||
702 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | ||
703 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
704 | return -EINVAL; | ||
705 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | ||
706 | /* Can't access odd doubles in FR=0 mode */ | ||
707 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
708 | return -EINVAL; | ||
709 | v = get_fpr64(&fpu->fpr[idx], 0); | ||
710 | break; | ||
711 | case KVM_REG_MIPS_FCR_IR: | ||
712 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
713 | return -EINVAL; | ||
714 | v = boot_cpu_data.fpu_id; | ||
715 | break; | ||
716 | case KVM_REG_MIPS_FCR_CSR: | ||
717 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
718 | return -EINVAL; | ||
719 | v = fpu->fcr31; | ||
720 | break; | ||
721 | |||
722 | /* MIPS SIMD Architecture (MSA) registers */ | ||
723 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | ||
724 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
725 | return -EINVAL; | ||
726 | /* Can't access MSA registers in FR=0 mode */ | ||
727 | if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
728 | return -EINVAL; | ||
729 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | ||
730 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
731 | /* least significant byte first */ | ||
732 | vs[0] = get_fpr64(&fpu->fpr[idx], 0); | ||
733 | vs[1] = get_fpr64(&fpu->fpr[idx], 1); | ||
734 | #else | ||
735 | /* most significant byte first */ | ||
736 | vs[0] = get_fpr64(&fpu->fpr[idx], 1); | ||
737 | vs[1] = get_fpr64(&fpu->fpr[idx], 0); | ||
738 | #endif | ||
739 | break; | ||
740 | case KVM_REG_MIPS_MSA_IR: | ||
741 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
742 | return -EINVAL; | ||
743 | v = boot_cpu_data.msa_id; | ||
744 | break; | ||
745 | case KVM_REG_MIPS_MSA_CSR: | ||
746 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
747 | return -EINVAL; | ||
748 | v = fpu->msacsr; | ||
749 | break; | ||
750 | |||
751 | /* registers to be handled specially */ | ||
752 | default: | ||
753 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); | ||
754 | if (ret) | ||
755 | return ret; | ||
756 | break; | ||
757 | } | ||
758 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | ||
759 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | ||
760 | |||
761 | return put_user(v, uaddr64); | ||
762 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | ||
763 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | ||
764 | u32 v32 = (u32)v; | ||
765 | |||
766 | return put_user(v32, uaddr32); | ||
767 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { | ||
768 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
769 | |||
770 | return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; | ||
771 | } else { | ||
772 | return -EINVAL; | ||
773 | } | ||
774 | } | ||
775 | |||
776 | static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | ||
777 | const struct kvm_one_reg *reg) | ||
778 | { | ||
779 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
780 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; | ||
781 | s64 v; | ||
782 | s64 vs[2]; | ||
783 | unsigned int idx; | ||
784 | |||
785 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { | ||
786 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | ||
787 | |||
788 | if (get_user(v, uaddr64) != 0) | ||
789 | return -EFAULT; | ||
790 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | ||
791 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | ||
792 | s32 v32; | ||
793 | |||
794 | if (get_user(v32, uaddr32) != 0) | ||
795 | return -EFAULT; | ||
796 | v = (s64)v32; | ||
797 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { | ||
798 | void __user *uaddr = (void __user *)(long)reg->addr; | ||
799 | |||
800 | return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; | ||
801 | } else { | ||
802 | return -EINVAL; | ||
803 | } | ||
804 | |||
805 | switch (reg->id) { | ||
806 | /* General purpose registers */ | ||
807 | case KVM_REG_MIPS_R0: | ||
808 | /* Silently ignore requests to set $0 */ | ||
809 | break; | ||
810 | case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: | ||
811 | vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; | ||
812 | break; | ||
813 | #ifndef CONFIG_CPU_MIPSR6 | ||
814 | case KVM_REG_MIPS_HI: | ||
815 | vcpu->arch.hi = v; | ||
816 | break; | ||
817 | case KVM_REG_MIPS_LO: | ||
818 | vcpu->arch.lo = v; | ||
819 | break; | ||
820 | #endif | ||
821 | case KVM_REG_MIPS_PC: | ||
822 | vcpu->arch.pc = v; | ||
823 | break; | ||
824 | |||
825 | /* Floating point registers */ | ||
826 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | ||
827 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
828 | return -EINVAL; | ||
829 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | ||
830 | /* Odd singles in top of even double when FR=0 */ | ||
831 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | ||
832 | set_fpr32(&fpu->fpr[idx], 0, v); | ||
833 | else | ||
834 | set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); | ||
835 | break; | ||
836 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | ||
837 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
838 | return -EINVAL; | ||
839 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | ||
840 | /* Can't access odd doubles in FR=0 mode */ | ||
841 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | ||
842 | return -EINVAL; | ||
843 | set_fpr64(&fpu->fpr[idx], 0, v); | ||
844 | break; | ||
845 | case KVM_REG_MIPS_FCR_IR: | ||
846 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
847 | return -EINVAL; | ||
848 | /* Read-only */ | ||
849 | break; | ||
850 | case KVM_REG_MIPS_FCR_CSR: | ||
851 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
852 | return -EINVAL; | ||
853 | fpu->fcr31 = v; | ||
854 | break; | ||
855 | |||
856 | /* MIPS SIMD Architecture (MSA) registers */ | ||
857 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | ||
858 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
859 | return -EINVAL; | ||
860 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | ||
861 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
862 | /* least significant byte first */ | ||
863 | set_fpr64(&fpu->fpr[idx], 0, vs[0]); | ||
864 | set_fpr64(&fpu->fpr[idx], 1, vs[1]); | ||
865 | #else | ||
866 | /* most significant byte first */ | ||
867 | set_fpr64(&fpu->fpr[idx], 1, vs[0]); | ||
868 | set_fpr64(&fpu->fpr[idx], 0, vs[1]); | ||
869 | #endif | ||
870 | break; | ||
871 | case KVM_REG_MIPS_MSA_IR: | ||
872 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
873 | return -EINVAL; | ||
874 | /* Read-only */ | ||
875 | break; | ||
876 | case KVM_REG_MIPS_MSA_CSR: | ||
877 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | ||
878 | return -EINVAL; | ||
879 | fpu->msacsr = v; | ||
880 | break; | ||
881 | |||
882 | /* registers to be handled specially */ | ||
883 | default: | ||
884 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); | ||
885 | } | ||
886 | return 0; | ||
887 | } | ||
888 | |||
889 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | ||
890 | struct kvm_enable_cap *cap) | ||
891 | { | ||
892 | int r = 0; | ||
893 | |||
894 | if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) | ||
895 | return -EINVAL; | ||
896 | if (cap->flags) | ||
897 | return -EINVAL; | ||
898 | if (cap->args[0]) | ||
899 | return -EINVAL; | ||
900 | |||
901 | switch (cap->cap) { | ||
902 | case KVM_CAP_MIPS_FPU: | ||
903 | vcpu->arch.fpu_enabled = true; | ||
904 | break; | ||
905 | case KVM_CAP_MIPS_MSA: | ||
906 | vcpu->arch.msa_enabled = true; | ||
907 | break; | ||
908 | default: | ||
909 | r = -EINVAL; | ||
910 | break; | ||
911 | } | ||
912 | |||
913 | return r; | ||
914 | } | ||
915 | |||
916 | long kvm_arch_vcpu_async_ioctl(struct file *filp, unsigned int ioctl, | ||
917 | unsigned long arg) | ||
918 | { | ||
919 | struct kvm_vcpu *vcpu = filp->private_data; | ||
920 | void __user *argp = (void __user *)arg; | ||
921 | |||
922 | if (ioctl == KVM_INTERRUPT) { | ||
923 | struct kvm_mips_interrupt irq; | ||
924 | |||
925 | if (copy_from_user(&irq, argp, sizeof(irq))) | ||
926 | return -EFAULT; | ||
927 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, | ||
928 | irq.irq); | ||
929 | |||
930 | return kvm_vcpu_ioctl_interrupt(vcpu, &irq); | ||
931 | } | ||
932 | |||
933 | return -ENOIOCTLCMD; | ||
934 | } | ||
935 | |||
936 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, | ||
937 | unsigned long arg) | ||
938 | { | ||
939 | struct kvm_vcpu *vcpu = filp->private_data; | ||
940 | void __user *argp = (void __user *)arg; | ||
941 | long r; | ||
942 | |||
943 | vcpu_load(vcpu); | ||
944 | |||
945 | switch (ioctl) { | ||
946 | case KVM_SET_ONE_REG: | ||
947 | case KVM_GET_ONE_REG: { | ||
948 | struct kvm_one_reg reg; | ||
949 | |||
950 | r = -EFAULT; | ||
951 | if (copy_from_user(®, argp, sizeof(reg))) | ||
952 | break; | ||
953 | if (ioctl == KVM_SET_ONE_REG) | ||
954 | r = kvm_mips_set_reg(vcpu, ®); | ||
955 | else | ||
956 | r = kvm_mips_get_reg(vcpu, ®); | ||
957 | break; | ||
958 | } | ||
959 | case KVM_GET_REG_LIST: { | ||
960 | struct kvm_reg_list __user *user_list = argp; | ||
961 | struct kvm_reg_list reg_list; | ||
962 | unsigned n; | ||
963 | |||
964 | r = -EFAULT; | ||
965 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | ||
966 | break; | ||
967 | n = reg_list.n; | ||
968 | reg_list.n = kvm_mips_num_regs(vcpu); | ||
969 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) | ||
970 | break; | ||
971 | r = -E2BIG; | ||
972 | if (n < reg_list.n) | ||
973 | break; | ||
974 | r = kvm_mips_copy_reg_indices(vcpu, user_list->reg); | ||
975 | break; | ||
976 | } | ||
977 | case KVM_ENABLE_CAP: { | ||
978 | struct kvm_enable_cap cap; | ||
979 | |||
980 | r = -EFAULT; | ||
981 | if (copy_from_user(&cap, argp, sizeof(cap))) | ||
982 | break; | ||
983 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | ||
984 | break; | ||
985 | } | ||
986 | default: | ||
987 | r = -ENOIOCTLCMD; | ||
988 | } | ||
989 | |||
990 | vcpu_put(vcpu); | ||
991 | return r; | ||
992 | } | ||
993 | |||
994 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) | ||
995 | { | ||
996 | |||
997 | } | ||
998 | |||
999 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, | ||
1000 | struct kvm_memory_slot *memslot) | ||
1001 | { | ||
1002 | /* Let implementation handle TLB/GVA invalidation */ | ||
1003 | kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); | ||
1004 | } | ||
1005 | |||
1006 | long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | ||
1007 | { | ||
1008 | long r; | ||
1009 | |||
1010 | switch (ioctl) { | ||
1011 | default: | ||
1012 | r = -ENOIOCTLCMD; | ||
1013 | } | ||
1014 | |||
1015 | return r; | ||
1016 | } | ||
1017 | |||
1018 | int kvm_arch_init(void *opaque) | ||
1019 | { | ||
1020 | if (kvm_mips_callbacks) { | ||
1021 | kvm_err("kvm: module already exists\n"); | ||
1022 | return -EEXIST; | ||
1023 | } | ||
1024 | |||
1025 | return kvm_mips_emulation_init(&kvm_mips_callbacks); | ||
1026 | } | ||
1027 | |||
1028 | void kvm_arch_exit(void) | ||
1029 | { | ||
1030 | kvm_mips_callbacks = NULL; | ||
1031 | } | ||
1032 | |||
1033 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | ||
1034 | struct kvm_sregs *sregs) | ||
1035 | { | ||
1036 | return -ENOIOCTLCMD; | ||
1037 | } | ||
1038 | |||
1039 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | ||
1040 | struct kvm_sregs *sregs) | ||
1041 | { | ||
1042 | return -ENOIOCTLCMD; | ||
1043 | } | ||
1044 | |||
1045 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | ||
1046 | { | ||
1047 | } | ||
1048 | |||
1049 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1050 | { | ||
1051 | return -ENOIOCTLCMD; | ||
1052 | } | ||
1053 | |||
1054 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | ||
1055 | { | ||
1056 | return -ENOIOCTLCMD; | ||
1057 | } | ||
1058 | |||
1059 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | ||
1060 | { | ||
1061 | return VM_FAULT_SIGBUS; | ||
1062 | } | ||
1063 | |||
1064 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | ||
1065 | { | ||
1066 | int r; | ||
1067 | |||
1068 | switch (ext) { | ||
1069 | case KVM_CAP_ONE_REG: | ||
1070 | case KVM_CAP_ENABLE_CAP: | ||
1071 | case KVM_CAP_READONLY_MEM: | ||
1072 | case KVM_CAP_SYNC_MMU: | ||
1073 | case KVM_CAP_IMMEDIATE_EXIT: | ||
1074 | r = 1; | ||
1075 | break; | ||
1076 | case KVM_CAP_NR_VCPUS: | ||
1077 | r = num_online_cpus(); | ||
1078 | break; | ||
1079 | case KVM_CAP_MAX_VCPUS: | ||
1080 | r = KVM_MAX_VCPUS; | ||
1081 | break; | ||
1082 | case KVM_CAP_MAX_VCPU_ID: | ||
1083 | r = KVM_MAX_VCPU_ID; | ||
1084 | break; | ||
1085 | case KVM_CAP_MIPS_FPU: | ||
1086 | /* We don't handle systems with inconsistent cpu_has_fpu */ | ||
1087 | r = !!raw_cpu_has_fpu; | ||
1088 | break; | ||
1089 | case KVM_CAP_MIPS_MSA: | ||
1090 | /* | ||
1091 | * We don't support MSA vector partitioning yet: | ||
1092 | * 1) It would require explicit support which can't be tested | ||
1093 | * yet due to lack of support in current hardware. | ||
1094 | * 2) It extends the state that would need to be saved/restored | ||
1095 | * by e.g. QEMU for migration. | ||
1096 | * | ||
1097 | * When vector partitioning hardware becomes available, support | ||
1098 | * could be added by requiring a flag when enabling | ||
1099 | * KVM_CAP_MIPS_MSA capability to indicate that userland knows | ||
1100 | * to save/restore the appropriate extra state. | ||
1101 | */ | ||
1102 | r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); | ||
1103 | break; | ||
1104 | default: | ||
1105 | r = kvm_mips_callbacks->check_extension(kvm, ext); | ||
1106 | break; | ||
1107 | } | ||
1108 | return r; | ||
1109 | } | ||
1110 | |||
1111 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | ||
1112 | { | ||
1113 | return kvm_mips_pending_timer(vcpu) || | ||
1114 | kvm_read_c0_guest_cause(vcpu->arch.cop0) & C_TI; | ||
1115 | } | ||
1116 | |||
1117 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) | ||
1118 | { | ||
1119 | int i; | ||
1120 | struct mips_coproc *cop0; | ||
1121 | |||
1122 | if (!vcpu) | ||
1123 | return -1; | ||
1124 | |||
1125 | kvm_debug("VCPU Register Dump:\n"); | ||
1126 | kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); | ||
1127 | kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); | ||
1128 | |||
1129 | for (i = 0; i < 32; i += 4) { | ||
1130 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, | ||
1131 | vcpu->arch.gprs[i], | ||
1132 | vcpu->arch.gprs[i + 1], | ||
1133 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); | ||
1134 | } | ||
1135 | kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); | ||
1136 | kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); | ||
1137 | |||
1138 | cop0 = vcpu->arch.cop0; | ||
1139 | kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", | ||
1140 | kvm_read_c0_guest_status(cop0), | ||
1141 | kvm_read_c0_guest_cause(cop0)); | ||
1142 | |||
1143 | kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); | ||
1144 | |||
1145 | return 0; | ||
1146 | } | ||
1147 | |||
1148 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
1149 | { | ||
1150 | int i; | ||
1151 | |||
1152 | vcpu_load(vcpu); | ||
1153 | |||
1154 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) | ||
1155 | vcpu->arch.gprs[i] = regs->gpr[i]; | ||
1156 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ | ||
1157 | vcpu->arch.hi = regs->hi; | ||
1158 | vcpu->arch.lo = regs->lo; | ||
1159 | vcpu->arch.pc = regs->pc; | ||
1160 | |||
1161 | vcpu_put(vcpu); | ||
1162 | return 0; | ||
1163 | } | ||
1164 | |||
1165 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | ||
1166 | { | ||
1167 | int i; | ||
1168 | |||
1169 | vcpu_load(vcpu); | ||
1170 | |||
1171 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) | ||
1172 | regs->gpr[i] = vcpu->arch.gprs[i]; | ||
1173 | |||
1174 | regs->hi = vcpu->arch.hi; | ||
1175 | regs->lo = vcpu->arch.lo; | ||
1176 | regs->pc = vcpu->arch.pc; | ||
1177 | |||
1178 | vcpu_put(vcpu); | ||
1179 | return 0; | ||
1180 | } | ||
1181 | |||
1182 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, | ||
1183 | struct kvm_translation *tr) | ||
1184 | { | ||
1185 | return 0; | ||
1186 | } | ||
1187 | |||
1188 | static void kvm_mips_set_c0_status(void) | ||
1189 | { | ||
1190 | u32 status = read_c0_status(); | ||
1191 | |||
1192 | if (cpu_has_dsp) | ||
1193 | status |= (ST0_MX); | ||
1194 | |||
1195 | write_c0_status(status); | ||
1196 | ehb(); | ||
1197 | } | ||
1198 | |||
1199 | /* | ||
1200 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | ||
1201 | */ | ||
1202 | int kvm_mips_handle_exit(struct kvm_vcpu *vcpu) | ||
1203 | { | ||
1204 | struct kvm_run *run = vcpu->run; | ||
1205 | u32 cause = vcpu->arch.host_cp0_cause; | ||
1206 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | ||
1207 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | ||
1208 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; | ||
1209 | enum emulation_result er = EMULATE_DONE; | ||
1210 | u32 inst; | ||
1211 | int ret = RESUME_GUEST; | ||
1212 | |||
1213 | vcpu->mode = OUTSIDE_GUEST_MODE; | ||
1214 | |||
1215 | /* re-enable HTW before enabling interrupts */ | ||
1216 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) | ||
1217 | htw_start(); | ||
1218 | |||
1219 | /* Set a default exit reason */ | ||
1220 | run->exit_reason = KVM_EXIT_UNKNOWN; | ||
1221 | run->ready_for_interrupt_injection = 1; | ||
1222 | |||
1223 | /* | ||
1224 | * Set the appropriate status bits based on host CPU features, | ||
1225 | * before we hit the scheduler | ||
1226 | */ | ||
1227 | kvm_mips_set_c0_status(); | ||
1228 | |||
1229 | local_irq_enable(); | ||
1230 | |||
1231 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", | ||
1232 | cause, opc, run, vcpu); | ||
1233 | trace_kvm_exit(vcpu, exccode); | ||
1234 | |||
1235 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { | ||
1236 | /* | ||
1237 | * Do a privilege check, if in UM most of these exit conditions | ||
1238 | * end up causing an exception to be delivered to the Guest | ||
1239 | * Kernel | ||
1240 | */ | ||
1241 | er = kvm_mips_check_privilege(cause, opc, vcpu); | ||
1242 | if (er == EMULATE_PRIV_FAIL) { | ||
1243 | goto skip_emul; | ||
1244 | } else if (er == EMULATE_FAIL) { | ||
1245 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1246 | ret = RESUME_HOST; | ||
1247 | goto skip_emul; | ||
1248 | } | ||
1249 | } | ||
1250 | |||
1251 | switch (exccode) { | ||
1252 | case EXCCODE_INT: | ||
1253 | kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); | ||
1254 | |||
1255 | ++vcpu->stat.int_exits; | ||
1256 | |||
1257 | if (need_resched()) | ||
1258 | cond_resched(); | ||
1259 | |||
1260 | ret = RESUME_GUEST; | ||
1261 | break; | ||
1262 | |||
1263 | case EXCCODE_CPU: | ||
1264 | kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); | ||
1265 | |||
1266 | ++vcpu->stat.cop_unusable_exits; | ||
1267 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); | ||
1268 | /* XXXKYMA: Might need to return to user space */ | ||
1269 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) | ||
1270 | ret = RESUME_HOST; | ||
1271 | break; | ||
1272 | |||
1273 | case EXCCODE_MOD: | ||
1274 | ++vcpu->stat.tlbmod_exits; | ||
1275 | ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); | ||
1276 | break; | ||
1277 | |||
1278 | case EXCCODE_TLBS: | ||
1279 | kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", | ||
1280 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, | ||
1281 | badvaddr); | ||
1282 | |||
1283 | ++vcpu->stat.tlbmiss_st_exits; | ||
1284 | ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); | ||
1285 | break; | ||
1286 | |||
1287 | case EXCCODE_TLBL: | ||
1288 | kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", | ||
1289 | cause, opc, badvaddr); | ||
1290 | |||
1291 | ++vcpu->stat.tlbmiss_ld_exits; | ||
1292 | ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); | ||
1293 | break; | ||
1294 | |||
1295 | case EXCCODE_ADES: | ||
1296 | ++vcpu->stat.addrerr_st_exits; | ||
1297 | ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); | ||
1298 | break; | ||
1299 | |||
1300 | case EXCCODE_ADEL: | ||
1301 | ++vcpu->stat.addrerr_ld_exits; | ||
1302 | ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); | ||
1303 | break; | ||
1304 | |||
1305 | case EXCCODE_SYS: | ||
1306 | ++vcpu->stat.syscall_exits; | ||
1307 | ret = kvm_mips_callbacks->handle_syscall(vcpu); | ||
1308 | break; | ||
1309 | |||
1310 | case EXCCODE_RI: | ||
1311 | ++vcpu->stat.resvd_inst_exits; | ||
1312 | ret = kvm_mips_callbacks->handle_res_inst(vcpu); | ||
1313 | break; | ||
1314 | |||
1315 | case EXCCODE_BP: | ||
1316 | ++vcpu->stat.break_inst_exits; | ||
1317 | ret = kvm_mips_callbacks->handle_break(vcpu); | ||
1318 | break; | ||
1319 | |||
1320 | case EXCCODE_TR: | ||
1321 | ++vcpu->stat.trap_inst_exits; | ||
1322 | ret = kvm_mips_callbacks->handle_trap(vcpu); | ||
1323 | break; | ||
1324 | |||
1325 | case EXCCODE_MSAFPE: | ||
1326 | ++vcpu->stat.msa_fpe_exits; | ||
1327 | ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); | ||
1328 | break; | ||
1329 | |||
1330 | case EXCCODE_FPE: | ||
1331 | ++vcpu->stat.fpe_exits; | ||
1332 | ret = kvm_mips_callbacks->handle_fpe(vcpu); | ||
1333 | break; | ||
1334 | |||
1335 | case EXCCODE_MSADIS: | ||
1336 | ++vcpu->stat.msa_disabled_exits; | ||
1337 | ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); | ||
1338 | break; | ||
1339 | |||
1340 | case EXCCODE_GE: | ||
1341 | /* defer exit accounting to handler */ | ||
1342 | ret = kvm_mips_callbacks->handle_guest_exit(vcpu); | ||
1343 | break; | ||
1344 | |||
1345 | default: | ||
1346 | if (cause & CAUSEF_BD) | ||
1347 | opc += 1; | ||
1348 | inst = 0; | ||
1349 | kvm_get_badinstr(opc, vcpu, &inst); | ||
1350 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", | ||
1351 | exccode, opc, inst, badvaddr, | ||
1352 | kvm_read_c0_guest_status(vcpu->arch.cop0)); | ||
1353 | kvm_arch_vcpu_dump_regs(vcpu); | ||
1354 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | ||
1355 | ret = RESUME_HOST; | ||
1356 | break; | ||
1357 | |||
1358 | } | ||
1359 | |||
1360 | skip_emul: | ||
1361 | local_irq_disable(); | ||
1362 | |||
1363 | if (ret == RESUME_GUEST) | ||
1364 | kvm_vz_acquire_htimer(vcpu); | ||
1365 | |||
1366 | if (er == EMULATE_DONE && !(ret & RESUME_HOST)) | ||
1367 | kvm_mips_deliver_interrupts(vcpu, cause); | ||
1368 | |||
1369 | if (!(ret & RESUME_HOST)) { | ||
1370 | /* Only check for signals if not already exiting to userspace */ | ||
1371 | if (signal_pending(current)) { | ||
1372 | run->exit_reason = KVM_EXIT_INTR; | ||
1373 | ret = (-EINTR << 2) | RESUME_HOST; | ||
1374 | ++vcpu->stat.signal_exits; | ||
1375 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); | ||
1376 | } | ||
1377 | } | ||
1378 | |||
1379 | if (ret == RESUME_GUEST) { | ||
1380 | trace_kvm_reenter(vcpu); | ||
1381 | |||
1382 | /* | ||
1383 | * Make sure the read of VCPU requests in vcpu_reenter() | ||
1384 | * callback is not reordered ahead of the write to vcpu->mode, | ||
1385 | * or we could miss a TLB flush request while the requester sees | ||
1386 | * the VCPU as outside of guest mode and not needing an IPI. | ||
1387 | */ | ||
1388 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | ||
1389 | |||
1390 | kvm_mips_callbacks->vcpu_reenter(vcpu); | ||
1391 | |||
1392 | /* | ||
1393 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context | ||
1394 | * is live), restore FCR31 / MSACSR. | ||
1395 | * | ||
1396 | * This should be before returning to the guest exception | ||
1397 | * vector, as it may well cause an [MSA] FP exception if there | ||
1398 | * are pending exception bits unmasked. (see | ||
1399 | * kvm_mips_csr_die_notifier() for how that is handled). | ||
1400 | */ | ||
1401 | if (kvm_mips_guest_has_fpu(&vcpu->arch) && | ||
1402 | read_c0_status() & ST0_CU1) | ||
1403 | __kvm_restore_fcsr(&vcpu->arch); | ||
1404 | |||
1405 | if (kvm_mips_guest_has_msa(&vcpu->arch) && | ||
1406 | read_c0_config5() & MIPS_CONF5_MSAEN) | ||
1407 | __kvm_restore_msacsr(&vcpu->arch); | ||
1408 | } | ||
1409 | |||
1410 | /* Disable HTW before returning to guest or host */ | ||
1411 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) | ||
1412 | htw_stop(); | ||
1413 | |||
1414 | return ret; | ||
1415 | } | ||
1416 | |||
1417 | /* Enable FPU for guest and restore context */ | ||
1418 | void kvm_own_fpu(struct kvm_vcpu *vcpu) | ||
1419 | { | ||
1420 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1421 | unsigned int sr, cfg5; | ||
1422 | |||
1423 | preempt_disable(); | ||
1424 | |||
1425 | sr = kvm_read_c0_guest_status(cop0); | ||
1426 | |||
1427 | /* | ||
1428 | * If MSA state is already live, it is undefined how it interacts with | ||
1429 | * FR=0 FPU state, and we don't want to hit reserved instruction | ||
1430 | * exceptions trying to save the MSA state later when CU=1 && FR=1, so | ||
1431 | * play it safe and save it first. | ||
1432 | * | ||
1433 | * In theory we shouldn't ever hit this case since kvm_lose_fpu() should | ||
1434 | * get called when guest CU1 is set, however we can't trust the guest | ||
1435 | * not to clobber the status register directly via the commpage. | ||
1436 | */ | ||
1437 | if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && | ||
1438 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) | ||
1439 | kvm_lose_fpu(vcpu); | ||
1440 | |||
1441 | /* | ||
1442 | * Enable FPU for guest | ||
1443 | * We set FR and FRE according to guest context | ||
1444 | */ | ||
1445 | change_c0_status(ST0_CU1 | ST0_FR, sr); | ||
1446 | if (cpu_has_fre) { | ||
1447 | cfg5 = kvm_read_c0_guest_config5(cop0); | ||
1448 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | ||
1449 | } | ||
1450 | enable_fpu_hazard(); | ||
1451 | |||
1452 | /* If guest FPU state not active, restore it now */ | ||
1453 | if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { | ||
1454 | __kvm_restore_fpu(&vcpu->arch); | ||
1455 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; | ||
1456 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); | ||
1457 | } else { | ||
1458 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); | ||
1459 | } | ||
1460 | |||
1461 | preempt_enable(); | ||
1462 | } | ||
1463 | |||
1464 | #ifdef CONFIG_CPU_HAS_MSA | ||
1465 | /* Enable MSA for guest and restore context */ | ||
1466 | void kvm_own_msa(struct kvm_vcpu *vcpu) | ||
1467 | { | ||
1468 | struct mips_coproc *cop0 = vcpu->arch.cop0; | ||
1469 | unsigned int sr, cfg5; | ||
1470 | |||
1471 | preempt_disable(); | ||
1472 | |||
1473 | /* | ||
1474 | * Enable FPU if enabled in guest, since we're restoring FPU context | ||
1475 | * anyway. We set FR and FRE according to guest context. | ||
1476 | */ | ||
1477 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | ||
1478 | sr = kvm_read_c0_guest_status(cop0); | ||
1479 | |||
1480 | /* | ||
1481 | * If FR=0 FPU state is already live, it is undefined how it | ||
1482 | * interacts with MSA state, so play it safe and save it first. | ||
1483 | */ | ||
1484 | if (!(sr & ST0_FR) && | ||
1485 | (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | | ||
1486 | KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) | ||
1487 | kvm_lose_fpu(vcpu); | ||
1488 | |||
1489 | change_c0_status(ST0_CU1 | ST0_FR, sr); | ||
1490 | if (sr & ST0_CU1 && cpu_has_fre) { | ||
1491 | cfg5 = kvm_read_c0_guest_config5(cop0); | ||
1492 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | ||
1493 | } | ||
1494 | } | ||
1495 | |||
1496 | /* Enable MSA for guest */ | ||
1497 | set_c0_config5(MIPS_CONF5_MSAEN); | ||
1498 | enable_fpu_hazard(); | ||
1499 | |||
1500 | switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { | ||
1501 | case KVM_MIPS_AUX_FPU: | ||
1502 | /* | ||
1503 | * Guest FPU state already loaded, only restore upper MSA state | ||
1504 | */ | ||
1505 | __kvm_restore_msa_upper(&vcpu->arch); | ||
1506 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; | ||
1507 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); | ||
1508 | break; | ||
1509 | case 0: | ||
1510 | /* Neither FPU or MSA already active, restore full MSA state */ | ||
1511 | __kvm_restore_msa(&vcpu->arch); | ||
1512 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; | ||
1513 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) | ||
1514 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; | ||
1515 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, | ||
1516 | KVM_TRACE_AUX_FPU_MSA); | ||
1517 | break; | ||
1518 | default: | ||
1519 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); | ||
1520 | break; | ||
1521 | } | ||
1522 | |||
1523 | preempt_enable(); | ||
1524 | } | ||
1525 | #endif | ||
1526 | |||
1527 | /* Drop FPU & MSA without saving it */ | ||
1528 | void kvm_drop_fpu(struct kvm_vcpu *vcpu) | ||
1529 | { | ||
1530 | preempt_disable(); | ||
1531 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { | ||
1532 | disable_msa(); | ||
1533 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); | ||
1534 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; | ||
1535 | } | ||
1536 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | ||
1537 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1538 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); | ||
1539 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; | ||
1540 | } | ||
1541 | preempt_enable(); | ||
1542 | } | ||
1543 | |||
1544 | /* Save and disable FPU & MSA */ | ||
1545 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) | ||
1546 | { | ||
1547 | /* | ||
1548 | * With T&E, FPU & MSA get disabled in root context (hardware) when it | ||
1549 | * is disabled in guest context (software), but the register state in | ||
1550 | * the hardware may still be in use. | ||
1551 | * This is why we explicitly re-enable the hardware before saving. | ||
1552 | */ | ||
1553 | |||
1554 | preempt_disable(); | ||
1555 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { | ||
1556 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { | ||
1557 | set_c0_config5(MIPS_CONF5_MSAEN); | ||
1558 | enable_fpu_hazard(); | ||
1559 | } | ||
1560 | |||
1561 | __kvm_save_msa(&vcpu->arch); | ||
1562 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); | ||
1563 | |||
1564 | /* Disable MSA & FPU */ | ||
1565 | disable_msa(); | ||
1566 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | ||
1567 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1568 | disable_fpu_hazard(); | ||
1569 | } | ||
1570 | vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); | ||
1571 | } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | ||
1572 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { | ||
1573 | set_c0_status(ST0_CU1); | ||
1574 | enable_fpu_hazard(); | ||
1575 | } | ||
1576 | |||
1577 | __kvm_save_fpu(&vcpu->arch); | ||
1578 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; | ||
1579 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); | ||
1580 | |||
1581 | /* Disable FPU */ | ||
1582 | clear_c0_status(ST0_CU1 | ST0_FR); | ||
1583 | disable_fpu_hazard(); | ||
1584 | } | ||
1585 | preempt_enable(); | ||
1586 | } | ||
1587 | |||
1588 | /* | ||
1589 | * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are | ||
1590 | * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP | ||
1591 | * exception if cause bits are set in the value being written. | ||
1592 | */ | ||
1593 | static int kvm_mips_csr_die_notify(struct notifier_block *self, | ||
1594 | unsigned long cmd, void *ptr) | ||
1595 | { | ||
1596 | struct die_args *args = (struct die_args *)ptr; | ||
1597 | struct pt_regs *regs = args->regs; | ||
1598 | unsigned long pc; | ||
1599 | |||
1600 | /* Only interested in FPE and MSAFPE */ | ||
1601 | if (cmd != DIE_FP && cmd != DIE_MSAFP) | ||
1602 | return NOTIFY_DONE; | ||
1603 | |||
1604 | /* Return immediately if guest context isn't active */ | ||
1605 | if (!(current->flags & PF_VCPU)) | ||
1606 | return NOTIFY_DONE; | ||
1607 | |||
1608 | /* Should never get here from user mode */ | ||
1609 | BUG_ON(user_mode(regs)); | ||
1610 | |||
1611 | pc = instruction_pointer(regs); | ||
1612 | switch (cmd) { | ||
1613 | case DIE_FP: | ||
1614 | /* match 2nd instruction in __kvm_restore_fcsr */ | ||
1615 | if (pc != (unsigned long)&__kvm_restore_fcsr + 4) | ||
1616 | return NOTIFY_DONE; | ||
1617 | break; | ||
1618 | case DIE_MSAFP: | ||
1619 | /* match 2nd/3rd instruction in __kvm_restore_msacsr */ | ||
1620 | if (!cpu_has_msa || | ||
1621 | pc < (unsigned long)&__kvm_restore_msacsr + 4 || | ||
1622 | pc > (unsigned long)&__kvm_restore_msacsr + 8) | ||
1623 | return NOTIFY_DONE; | ||
1624 | break; | ||
1625 | } | ||
1626 | |||
1627 | /* Move PC forward a little and continue executing */ | ||
1628 | instruction_pointer(regs) += 4; | ||
1629 | |||
1630 | return NOTIFY_STOP; | ||
1631 | } | ||
1632 | |||
1633 | static struct notifier_block kvm_mips_csr_die_notifier = { | ||
1634 | .notifier_call = kvm_mips_csr_die_notify, | ||
1635 | }; | ||
1636 | |||
1637 | static u32 kvm_default_priority_to_irq[MIPS_EXC_MAX] = { | ||
1638 | [MIPS_EXC_INT_TIMER] = C_IRQ5, | ||
1639 | [MIPS_EXC_INT_IO_1] = C_IRQ0, | ||
1640 | [MIPS_EXC_INT_IPI_1] = C_IRQ1, | ||
1641 | [MIPS_EXC_INT_IPI_2] = C_IRQ2, | ||
1642 | }; | ||
1643 | |||
1644 | static u32 kvm_loongson3_priority_to_irq[MIPS_EXC_MAX] = { | ||
1645 | [MIPS_EXC_INT_TIMER] = C_IRQ5, | ||
1646 | [MIPS_EXC_INT_IO_1] = C_IRQ0, | ||
1647 | [MIPS_EXC_INT_IO_2] = C_IRQ1, | ||
1648 | [MIPS_EXC_INT_IPI_1] = C_IRQ4, | ||
1649 | }; | ||
1650 | |||
1651 | u32 *kvm_priority_to_irq = kvm_default_priority_to_irq; | ||
1652 | |||
1653 | u32 kvm_irq_to_priority(u32 irq) | ||
1654 | { | ||
1655 | int i; | ||
1656 | |||
1657 | for (i = MIPS_EXC_INT_TIMER; i < MIPS_EXC_MAX; i++) { | ||
1658 | if (kvm_priority_to_irq[i] == (1 << (irq + 8))) | ||
1659 | return i; | ||
1660 | } | ||
1661 | |||
1662 | return MIPS_EXC_MAX; | ||
1663 | } | ||
1664 | |||
1665 | static int __init kvm_mips_init(void) | ||
1666 | { | ||
1667 | int ret; | ||
1668 | |||
1669 | if (cpu_has_mmid) { | ||
1670 | pr_warn("KVM does not yet support MMIDs. KVM Disabled\n"); | ||
1671 | return -EOPNOTSUPP; | ||
1672 | } | ||
1673 | |||
1674 | ret = kvm_mips_entry_setup(); | ||
1675 | if (ret) | ||
1676 | return ret; | ||
1677 | |||
1678 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); | ||
1679 | |||
1680 | if (ret) | ||
1681 | return ret; | ||
1682 | |||
1683 | if (boot_cpu_type() == CPU_LOONGSON64) | ||
1684 | kvm_priority_to_irq = kvm_loongson3_priority_to_irq; | ||
1685 | |||
1686 | register_die_notifier(&kvm_mips_csr_die_notifier); | ||
1687 | |||
1688 | return 0; | ||
1689 | } | ||
1690 | |||
1691 | static void __exit kvm_mips_exit(void) | ||
1692 | { | ||
1693 | kvm_exit(); | ||
1694 | |||
1695 | unregister_die_notifier(&kvm_mips_csr_die_notifier); | ||
1696 | } | ||
1697 | |||
1698 | module_init(kvm_mips_init); | ||
1699 | module_exit(kvm_mips_exit); | ||
1700 | |||
1701 | EXPORT_TRACEPOINT_SYMBOL(kvm_exit); | ||