diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /arch/mips/kernel | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'arch/mips/kernel')
118 files changed, 40283 insertions, 0 deletions
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore new file mode 100644 index 000000000..bbb90f92d --- /dev/null +++ b/arch/mips/kernel/.gitignore | |||
@@ -0,0 +1,2 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0-only | ||
2 | vmlinux.lds | ||
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile new file mode 100644 index 000000000..2a05b923f --- /dev/null +++ b/arch/mips/kernel/Makefile | |||
@@ -0,0 +1,119 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | # | ||
3 | # Makefile for the Linux/MIPS kernel. | ||
4 | # | ||
5 | |||
6 | extra-y := head.o vmlinux.lds | ||
7 | |||
8 | obj-y += branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \ | ||
9 | process.o prom.o ptrace.o reset.o setup.o signal.o \ | ||
10 | syscall.o time.o topology.o traps.o unaligned.o watch.o \ | ||
11 | vdso.o cacheinfo.o | ||
12 | |||
13 | ifdef CONFIG_CPU_R3K_TLB | ||
14 | obj-y += cpu-r3k-probe.o | ||
15 | else | ||
16 | obj-y += cpu-probe.o | ||
17 | endif | ||
18 | |||
19 | ifdef CONFIG_FUNCTION_TRACER | ||
20 | CFLAGS_REMOVE_ftrace.o = -pg | ||
21 | CFLAGS_REMOVE_early_printk.o = -pg | ||
22 | CFLAGS_REMOVE_perf_event.o = -pg | ||
23 | CFLAGS_REMOVE_perf_event_mipsxx.o = -pg | ||
24 | endif | ||
25 | |||
26 | obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o | ||
27 | obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o | ||
28 | obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o | ||
29 | obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o | ||
30 | obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o | ||
31 | obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o | ||
32 | obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o | ||
33 | obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o | ||
34 | obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o | ||
35 | obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o | ||
36 | obj-$(CONFIG_SYNC_R4K) += sync-r4k.o | ||
37 | |||
38 | obj-$(CONFIG_DEBUG_FS) += segment.o | ||
39 | obj-$(CONFIG_STACKTRACE) += stacktrace.o | ||
40 | obj-$(CONFIG_MODULES) += module.o | ||
41 | |||
42 | obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o | ||
43 | obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o | ||
44 | |||
45 | sw-y := r4k_switch.o | ||
46 | sw-$(CONFIG_CPU_R3000) := r2300_switch.o | ||
47 | sw-$(CONFIG_CPU_TX39XX) := r2300_switch.o | ||
48 | sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o | ||
49 | obj-y += $(sw-y) | ||
50 | |||
51 | obj-$(CONFIG_MIPS_FP_SUPPORT) += fpu-probe.o | ||
52 | obj-$(CONFIG_CPU_R2300_FPU) += r2300_fpu.o | ||
53 | obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o | ||
54 | |||
55 | obj-$(CONFIG_SMP) += smp.o | ||
56 | obj-$(CONFIG_SMP_UP) += smp-up.o | ||
57 | obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o bmips_5xxx_init.o | ||
58 | |||
59 | obj-$(CONFIG_MIPS_MT) += mips-mt.o | ||
60 | obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o | ||
61 | obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o | ||
62 | obj-$(CONFIG_MIPS_CMP) += smp-cmp.o | ||
63 | obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o | ||
64 | obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o | ||
65 | obj-$(CONFIG_MIPS_SPRAM) += spram.o | ||
66 | |||
67 | obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o | ||
68 | obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o | ||
69 | obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o | ||
70 | obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o | ||
71 | obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o | ||
72 | obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o | ||
73 | |||
74 | obj-$(CONFIG_IRQ_CPU_RM7K) += irq-rm7000.o | ||
75 | obj-$(CONFIG_MIPS_MSC) += irq-msc01.o | ||
76 | obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o | ||
77 | obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o | ||
78 | |||
79 | obj-$(CONFIG_KPROBES) += kprobes.o | ||
80 | obj-$(CONFIG_32BIT) += scall32-o32.o | ||
81 | obj-$(CONFIG_64BIT) += scall64-n64.o | ||
82 | obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o | ||
83 | obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o | ||
84 | obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o | ||
85 | |||
86 | obj-$(CONFIG_KGDB) += kgdb.o | ||
87 | obj-$(CONFIG_PROC_FS) += proc.o | ||
88 | obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o | ||
89 | |||
90 | obj-$(CONFIG_CPU_R4X00_BUGS64) += r4k-bugs64.o | ||
91 | |||
92 | obj-$(CONFIG_I8253) += i8253.o | ||
93 | |||
94 | obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o | ||
95 | |||
96 | obj-$(CONFIG_RELOCATABLE) += relocate.o | ||
97 | |||
98 | obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o | ||
99 | obj-$(CONFIG_CRASH_DUMP) += crash_dump.o | ||
100 | obj-$(CONFIG_EARLY_PRINTK) += early_printk.o | ||
101 | obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o | ||
102 | obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o | ||
103 | obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o | ||
104 | |||
105 | CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi) | ||
106 | |||
107 | obj-$(CONFIG_PERF_EVENTS) += perf_event.o | ||
108 | obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o | ||
109 | |||
110 | obj-$(CONFIG_JUMP_LABEL) += jump_label.o | ||
111 | obj-$(CONFIG_UPROBES) += uprobes.o | ||
112 | |||
113 | obj-$(CONFIG_MIPS_CM) += mips-cm.o | ||
114 | obj-$(CONFIG_MIPS_CPC) += mips-cpc.o | ||
115 | |||
116 | obj-$(CONFIG_CPU_PM) += pm.o | ||
117 | obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o | ||
118 | |||
119 | CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS) | ||
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c new file mode 100644 index 000000000..aebfda811 --- /dev/null +++ b/arch/mips/kernel/asm-offsets.c | |||
@@ -0,0 +1,405 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * asm-offsets.c: Calculate pt_regs and task_struct offsets. | ||
4 | * | ||
5 | * Copyright (C) 1996 David S. Miller | ||
6 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | * | ||
9 | * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | ||
10 | * Copyright (C) 2000 MIPS Technologies, Inc. | ||
11 | */ | ||
12 | #include <linux/compat.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/kbuild.h> | ||
17 | #include <linux/suspend.h> | ||
18 | #include <asm/cpu-info.h> | ||
19 | #include <asm/pm.h> | ||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/processor.h> | ||
22 | #include <asm/smp-cps.h> | ||
23 | |||
24 | #include <linux/kvm_host.h> | ||
25 | |||
26 | void output_ptreg_defines(void) | ||
27 | { | ||
28 | COMMENT("MIPS pt_regs offsets."); | ||
29 | OFFSET(PT_R0, pt_regs, regs[0]); | ||
30 | OFFSET(PT_R1, pt_regs, regs[1]); | ||
31 | OFFSET(PT_R2, pt_regs, regs[2]); | ||
32 | OFFSET(PT_R3, pt_regs, regs[3]); | ||
33 | OFFSET(PT_R4, pt_regs, regs[4]); | ||
34 | OFFSET(PT_R5, pt_regs, regs[5]); | ||
35 | OFFSET(PT_R6, pt_regs, regs[6]); | ||
36 | OFFSET(PT_R7, pt_regs, regs[7]); | ||
37 | OFFSET(PT_R8, pt_regs, regs[8]); | ||
38 | OFFSET(PT_R9, pt_regs, regs[9]); | ||
39 | OFFSET(PT_R10, pt_regs, regs[10]); | ||
40 | OFFSET(PT_R11, pt_regs, regs[11]); | ||
41 | OFFSET(PT_R12, pt_regs, regs[12]); | ||
42 | OFFSET(PT_R13, pt_regs, regs[13]); | ||
43 | OFFSET(PT_R14, pt_regs, regs[14]); | ||
44 | OFFSET(PT_R15, pt_regs, regs[15]); | ||
45 | OFFSET(PT_R16, pt_regs, regs[16]); | ||
46 | OFFSET(PT_R17, pt_regs, regs[17]); | ||
47 | OFFSET(PT_R18, pt_regs, regs[18]); | ||
48 | OFFSET(PT_R19, pt_regs, regs[19]); | ||
49 | OFFSET(PT_R20, pt_regs, regs[20]); | ||
50 | OFFSET(PT_R21, pt_regs, regs[21]); | ||
51 | OFFSET(PT_R22, pt_regs, regs[22]); | ||
52 | OFFSET(PT_R23, pt_regs, regs[23]); | ||
53 | OFFSET(PT_R24, pt_regs, regs[24]); | ||
54 | OFFSET(PT_R25, pt_regs, regs[25]); | ||
55 | OFFSET(PT_R26, pt_regs, regs[26]); | ||
56 | OFFSET(PT_R27, pt_regs, regs[27]); | ||
57 | OFFSET(PT_R28, pt_regs, regs[28]); | ||
58 | OFFSET(PT_R29, pt_regs, regs[29]); | ||
59 | OFFSET(PT_R30, pt_regs, regs[30]); | ||
60 | OFFSET(PT_R31, pt_regs, regs[31]); | ||
61 | OFFSET(PT_LO, pt_regs, lo); | ||
62 | OFFSET(PT_HI, pt_regs, hi); | ||
63 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
64 | OFFSET(PT_ACX, pt_regs, acx); | ||
65 | #endif | ||
66 | OFFSET(PT_EPC, pt_regs, cp0_epc); | ||
67 | OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); | ||
68 | OFFSET(PT_STATUS, pt_regs, cp0_status); | ||
69 | OFFSET(PT_CAUSE, pt_regs, cp0_cause); | ||
70 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
71 | OFFSET(PT_MPL, pt_regs, mpl); | ||
72 | OFFSET(PT_MTP, pt_regs, mtp); | ||
73 | #endif /* CONFIG_CPU_CAVIUM_OCTEON */ | ||
74 | DEFINE(PT_SIZE, sizeof(struct pt_regs)); | ||
75 | BLANK(); | ||
76 | } | ||
77 | |||
78 | void output_task_defines(void) | ||
79 | { | ||
80 | COMMENT("MIPS task_struct offsets."); | ||
81 | OFFSET(TASK_STATE, task_struct, state); | ||
82 | OFFSET(TASK_THREAD_INFO, task_struct, stack); | ||
83 | OFFSET(TASK_FLAGS, task_struct, flags); | ||
84 | OFFSET(TASK_MM, task_struct, mm); | ||
85 | OFFSET(TASK_PID, task_struct, pid); | ||
86 | #if defined(CONFIG_STACKPROTECTOR) | ||
87 | OFFSET(TASK_STACK_CANARY, task_struct, stack_canary); | ||
88 | #endif | ||
89 | DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct)); | ||
90 | BLANK(); | ||
91 | } | ||
92 | |||
93 | void output_thread_info_defines(void) | ||
94 | { | ||
95 | COMMENT("MIPS thread_info offsets."); | ||
96 | OFFSET(TI_TASK, thread_info, task); | ||
97 | OFFSET(TI_FLAGS, thread_info, flags); | ||
98 | OFFSET(TI_TP_VALUE, thread_info, tp_value); | ||
99 | OFFSET(TI_CPU, thread_info, cpu); | ||
100 | OFFSET(TI_PRE_COUNT, thread_info, preempt_count); | ||
101 | OFFSET(TI_ADDR_LIMIT, thread_info, addr_limit); | ||
102 | OFFSET(TI_REGS, thread_info, regs); | ||
103 | DEFINE(_THREAD_SIZE, THREAD_SIZE); | ||
104 | DEFINE(_THREAD_MASK, THREAD_MASK); | ||
105 | DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE); | ||
106 | DEFINE(_IRQ_STACK_START, IRQ_STACK_START); | ||
107 | BLANK(); | ||
108 | } | ||
109 | |||
110 | void output_thread_defines(void) | ||
111 | { | ||
112 | COMMENT("MIPS specific thread_struct offsets."); | ||
113 | OFFSET(THREAD_REG16, task_struct, thread.reg16); | ||
114 | OFFSET(THREAD_REG17, task_struct, thread.reg17); | ||
115 | OFFSET(THREAD_REG18, task_struct, thread.reg18); | ||
116 | OFFSET(THREAD_REG19, task_struct, thread.reg19); | ||
117 | OFFSET(THREAD_REG20, task_struct, thread.reg20); | ||
118 | OFFSET(THREAD_REG21, task_struct, thread.reg21); | ||
119 | OFFSET(THREAD_REG22, task_struct, thread.reg22); | ||
120 | OFFSET(THREAD_REG23, task_struct, thread.reg23); | ||
121 | OFFSET(THREAD_REG29, task_struct, thread.reg29); | ||
122 | OFFSET(THREAD_REG30, task_struct, thread.reg30); | ||
123 | OFFSET(THREAD_REG31, task_struct, thread.reg31); | ||
124 | OFFSET(THREAD_STATUS, task_struct, | ||
125 | thread.cp0_status); | ||
126 | |||
127 | OFFSET(THREAD_BVADDR, task_struct, \ | ||
128 | thread.cp0_badvaddr); | ||
129 | OFFSET(THREAD_BUADDR, task_struct, \ | ||
130 | thread.cp0_baduaddr); | ||
131 | OFFSET(THREAD_ECODE, task_struct, \ | ||
132 | thread.error_code); | ||
133 | OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr); | ||
134 | BLANK(); | ||
135 | } | ||
136 | |||
137 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
138 | void output_thread_fpu_defines(void) | ||
139 | { | ||
140 | OFFSET(THREAD_FPU, task_struct, thread.fpu); | ||
141 | |||
142 | OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]); | ||
143 | OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]); | ||
144 | OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]); | ||
145 | OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]); | ||
146 | OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]); | ||
147 | OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]); | ||
148 | OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]); | ||
149 | OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]); | ||
150 | OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]); | ||
151 | OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]); | ||
152 | OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]); | ||
153 | OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]); | ||
154 | OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]); | ||
155 | OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]); | ||
156 | OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]); | ||
157 | OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]); | ||
158 | OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]); | ||
159 | OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]); | ||
160 | OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]); | ||
161 | OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]); | ||
162 | OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]); | ||
163 | OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]); | ||
164 | OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]); | ||
165 | OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]); | ||
166 | OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]); | ||
167 | OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]); | ||
168 | OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]); | ||
169 | OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]); | ||
170 | OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]); | ||
171 | OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]); | ||
172 | OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]); | ||
173 | OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]); | ||
174 | |||
175 | OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31); | ||
176 | OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr); | ||
177 | BLANK(); | ||
178 | } | ||
179 | #endif | ||
180 | |||
181 | void output_mm_defines(void) | ||
182 | { | ||
183 | COMMENT("Size of struct page"); | ||
184 | DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page)); | ||
185 | BLANK(); | ||
186 | COMMENT("Linux mm_struct offsets."); | ||
187 | OFFSET(MM_USERS, mm_struct, mm_users); | ||
188 | OFFSET(MM_PGD, mm_struct, pgd); | ||
189 | OFFSET(MM_CONTEXT, mm_struct, context); | ||
190 | BLANK(); | ||
191 | DEFINE(_PGD_T_SIZE, sizeof(pgd_t)); | ||
192 | DEFINE(_PMD_T_SIZE, sizeof(pmd_t)); | ||
193 | DEFINE(_PTE_T_SIZE, sizeof(pte_t)); | ||
194 | BLANK(); | ||
195 | DEFINE(_PGD_T_LOG2, PGD_T_LOG2); | ||
196 | #ifndef __PAGETABLE_PMD_FOLDED | ||
197 | DEFINE(_PMD_T_LOG2, PMD_T_LOG2); | ||
198 | #endif | ||
199 | DEFINE(_PTE_T_LOG2, PTE_T_LOG2); | ||
200 | BLANK(); | ||
201 | DEFINE(_PGD_ORDER, PGD_ORDER); | ||
202 | #ifndef __PAGETABLE_PMD_FOLDED | ||
203 | DEFINE(_PMD_ORDER, PMD_ORDER); | ||
204 | #endif | ||
205 | DEFINE(_PTE_ORDER, PTE_ORDER); | ||
206 | BLANK(); | ||
207 | DEFINE(_PMD_SHIFT, PMD_SHIFT); | ||
208 | DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT); | ||
209 | BLANK(); | ||
210 | DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD); | ||
211 | DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD); | ||
212 | DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE); | ||
213 | BLANK(); | ||
214 | DEFINE(_PAGE_SHIFT, PAGE_SHIFT); | ||
215 | DEFINE(_PAGE_SIZE, PAGE_SIZE); | ||
216 | BLANK(); | ||
217 | } | ||
218 | |||
219 | #ifdef CONFIG_32BIT | ||
220 | void output_sc_defines(void) | ||
221 | { | ||
222 | COMMENT("Linux sigcontext offsets."); | ||
223 | OFFSET(SC_REGS, sigcontext, sc_regs); | ||
224 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); | ||
225 | OFFSET(SC_ACX, sigcontext, sc_acx); | ||
226 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); | ||
227 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); | ||
228 | OFFSET(SC_PC, sigcontext, sc_pc); | ||
229 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); | ||
230 | OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir); | ||
231 | OFFSET(SC_HI1, sigcontext, sc_hi1); | ||
232 | OFFSET(SC_LO1, sigcontext, sc_lo1); | ||
233 | OFFSET(SC_HI2, sigcontext, sc_hi2); | ||
234 | OFFSET(SC_LO2, sigcontext, sc_lo2); | ||
235 | OFFSET(SC_HI3, sigcontext, sc_hi3); | ||
236 | OFFSET(SC_LO3, sigcontext, sc_lo3); | ||
237 | BLANK(); | ||
238 | } | ||
239 | #endif | ||
240 | |||
241 | #ifdef CONFIG_64BIT | ||
242 | void output_sc_defines(void) | ||
243 | { | ||
244 | COMMENT("Linux sigcontext offsets."); | ||
245 | OFFSET(SC_REGS, sigcontext, sc_regs); | ||
246 | OFFSET(SC_FPREGS, sigcontext, sc_fpregs); | ||
247 | OFFSET(SC_MDHI, sigcontext, sc_mdhi); | ||
248 | OFFSET(SC_MDLO, sigcontext, sc_mdlo); | ||
249 | OFFSET(SC_PC, sigcontext, sc_pc); | ||
250 | OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr); | ||
251 | BLANK(); | ||
252 | } | ||
253 | #endif | ||
254 | |||
255 | void output_signal_defined(void) | ||
256 | { | ||
257 | COMMENT("Linux signal numbers."); | ||
258 | DEFINE(_SIGHUP, SIGHUP); | ||
259 | DEFINE(_SIGINT, SIGINT); | ||
260 | DEFINE(_SIGQUIT, SIGQUIT); | ||
261 | DEFINE(_SIGILL, SIGILL); | ||
262 | DEFINE(_SIGTRAP, SIGTRAP); | ||
263 | DEFINE(_SIGIOT, SIGIOT); | ||
264 | DEFINE(_SIGABRT, SIGABRT); | ||
265 | DEFINE(_SIGEMT, SIGEMT); | ||
266 | DEFINE(_SIGFPE, SIGFPE); | ||
267 | DEFINE(_SIGKILL, SIGKILL); | ||
268 | DEFINE(_SIGBUS, SIGBUS); | ||
269 | DEFINE(_SIGSEGV, SIGSEGV); | ||
270 | DEFINE(_SIGSYS, SIGSYS); | ||
271 | DEFINE(_SIGPIPE, SIGPIPE); | ||
272 | DEFINE(_SIGALRM, SIGALRM); | ||
273 | DEFINE(_SIGTERM, SIGTERM); | ||
274 | DEFINE(_SIGUSR1, SIGUSR1); | ||
275 | DEFINE(_SIGUSR2, SIGUSR2); | ||
276 | DEFINE(_SIGCHLD, SIGCHLD); | ||
277 | DEFINE(_SIGPWR, SIGPWR); | ||
278 | DEFINE(_SIGWINCH, SIGWINCH); | ||
279 | DEFINE(_SIGURG, SIGURG); | ||
280 | DEFINE(_SIGIO, SIGIO); | ||
281 | DEFINE(_SIGSTOP, SIGSTOP); | ||
282 | DEFINE(_SIGTSTP, SIGTSTP); | ||
283 | DEFINE(_SIGCONT, SIGCONT); | ||
284 | DEFINE(_SIGTTIN, SIGTTIN); | ||
285 | DEFINE(_SIGTTOU, SIGTTOU); | ||
286 | DEFINE(_SIGVTALRM, SIGVTALRM); | ||
287 | DEFINE(_SIGPROF, SIGPROF); | ||
288 | DEFINE(_SIGXCPU, SIGXCPU); | ||
289 | DEFINE(_SIGXFSZ, SIGXFSZ); | ||
290 | BLANK(); | ||
291 | } | ||
292 | |||
293 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
294 | void output_octeon_cop2_state_defines(void) | ||
295 | { | ||
296 | COMMENT("Octeon specific octeon_cop2_state offsets."); | ||
297 | OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv); | ||
298 | OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length); | ||
299 | OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly); | ||
300 | OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat); | ||
301 | OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv); | ||
302 | OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key); | ||
303 | OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result); | ||
304 | OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0); | ||
305 | OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv); | ||
306 | OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key); | ||
307 | OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen); | ||
308 | OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result); | ||
309 | OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult); | ||
310 | OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly); | ||
311 | OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result); | ||
312 | OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw); | ||
313 | OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw); | ||
314 | OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3); | ||
315 | OFFSET(THREAD_CP2, task_struct, thread.cp2); | ||
316 | OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg); | ||
317 | BLANK(); | ||
318 | } | ||
319 | #endif | ||
320 | |||
321 | #ifdef CONFIG_HIBERNATION | ||
322 | void output_pbe_defines(void) | ||
323 | { | ||
324 | COMMENT(" Linux struct pbe offsets. "); | ||
325 | OFFSET(PBE_ADDRESS, pbe, address); | ||
326 | OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address); | ||
327 | OFFSET(PBE_NEXT, pbe, next); | ||
328 | DEFINE(PBE_SIZE, sizeof(struct pbe)); | ||
329 | BLANK(); | ||
330 | } | ||
331 | #endif | ||
332 | |||
333 | #ifdef CONFIG_CPU_PM | ||
334 | void output_pm_defines(void) | ||
335 | { | ||
336 | COMMENT(" PM offsets. "); | ||
337 | #ifdef CONFIG_EVA | ||
338 | OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]); | ||
339 | OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]); | ||
340 | OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]); | ||
341 | #endif | ||
342 | OFFSET(SSS_SP, mips_static_suspend_state, sp); | ||
343 | BLANK(); | ||
344 | } | ||
345 | #endif | ||
346 | |||
347 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
348 | void output_kvm_defines(void) | ||
349 | { | ||
350 | COMMENT(" KVM/MIPS Specific offsets. "); | ||
351 | |||
352 | OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]); | ||
353 | OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]); | ||
354 | OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]); | ||
355 | OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]); | ||
356 | OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]); | ||
357 | OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]); | ||
358 | OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]); | ||
359 | OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]); | ||
360 | OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]); | ||
361 | OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]); | ||
362 | OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]); | ||
363 | OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]); | ||
364 | OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]); | ||
365 | OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]); | ||
366 | OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]); | ||
367 | OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]); | ||
368 | OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]); | ||
369 | OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]); | ||
370 | OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]); | ||
371 | OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]); | ||
372 | OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]); | ||
373 | OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]); | ||
374 | OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]); | ||
375 | OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]); | ||
376 | OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]); | ||
377 | OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]); | ||
378 | OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]); | ||
379 | OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]); | ||
380 | OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]); | ||
381 | OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]); | ||
382 | OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]); | ||
383 | OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]); | ||
384 | |||
385 | OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31); | ||
386 | OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr); | ||
387 | BLANK(); | ||
388 | } | ||
389 | #endif | ||
390 | |||
391 | #ifdef CONFIG_MIPS_CPS | ||
392 | void output_cps_defines(void) | ||
393 | { | ||
394 | COMMENT(" MIPS CPS offsets. "); | ||
395 | |||
396 | OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask); | ||
397 | OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config); | ||
398 | DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config)); | ||
399 | |||
400 | OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc); | ||
401 | OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp); | ||
402 | OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp); | ||
403 | DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config)); | ||
404 | } | ||
405 | #endif | ||
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c new file mode 100644 index 000000000..c4441416e --- /dev/null +++ b/arch/mips/kernel/binfmt_elfn32.c | |||
@@ -0,0 +1,113 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Support for n32 Linux/MIPS ELF binaries. | ||
4 | * Author: Ralf Baechle (ralf@linux-mips.org) | ||
5 | * | ||
6 | * Copyright (C) 1999, 2001 Ralf Baechle | ||
7 | * Copyright (C) 1999, 2001 Silicon Graphics, Inc. | ||
8 | * | ||
9 | * Heavily inspired by the 32-bit Sparc compat code which is | ||
10 | * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) | ||
11 | * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
12 | */ | ||
13 | |||
14 | #define ELF_ARCH EM_MIPS | ||
15 | #define ELF_CLASS ELFCLASS32 | ||
16 | #ifdef __MIPSEB__ | ||
17 | #define ELF_DATA ELFDATA2MSB; | ||
18 | #else /* __MIPSEL__ */ | ||
19 | #define ELF_DATA ELFDATA2LSB; | ||
20 | #endif | ||
21 | |||
22 | /* ELF register definitions */ | ||
23 | #define ELF_NGREG 45 | ||
24 | #define ELF_NFPREG 33 | ||
25 | |||
26 | typedef unsigned long elf_greg_t; | ||
27 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
28 | |||
29 | typedef double elf_fpreg_t; | ||
30 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
31 | |||
32 | /* | ||
33 | * This is used to ensure we don't load something for the wrong architecture. | ||
34 | */ | ||
35 | #define elf_check_arch elfn32_check_arch | ||
36 | |||
37 | #define TASK32_SIZE 0x7fff8000UL | ||
38 | #undef ELF_ET_DYN_BASE | ||
39 | #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) | ||
40 | |||
41 | #include <asm/processor.h> | ||
42 | #include <linux/elfcore.h> | ||
43 | #include <linux/compat.h> | ||
44 | #include <linux/math64.h> | ||
45 | |||
46 | #define elf_prstatus elf_prstatus32 | ||
47 | struct elf_prstatus32 | ||
48 | { | ||
49 | struct elf_siginfo pr_info; /* Info associated with signal */ | ||
50 | short pr_cursig; /* Current signal */ | ||
51 | unsigned int pr_sigpend; /* Set of pending signals */ | ||
52 | unsigned int pr_sighold; /* Set of held signals */ | ||
53 | pid_t pr_pid; | ||
54 | pid_t pr_ppid; | ||
55 | pid_t pr_pgrp; | ||
56 | pid_t pr_sid; | ||
57 | struct old_timeval32 pr_utime; /* User time */ | ||
58 | struct old_timeval32 pr_stime; /* System time */ | ||
59 | struct old_timeval32 pr_cutime;/* Cumulative user time */ | ||
60 | struct old_timeval32 pr_cstime;/* Cumulative system time */ | ||
61 | elf_gregset_t pr_reg; /* GP registers */ | ||
62 | int pr_fpvalid; /* True if math co-processor being used. */ | ||
63 | }; | ||
64 | |||
65 | #define elf_prpsinfo elf_prpsinfo32 | ||
66 | struct elf_prpsinfo32 | ||
67 | { | ||
68 | char pr_state; /* numeric process state */ | ||
69 | char pr_sname; /* char for pr_state */ | ||
70 | char pr_zomb; /* zombie */ | ||
71 | char pr_nice; /* nice val */ | ||
72 | unsigned int pr_flag; /* flags */ | ||
73 | __kernel_uid_t pr_uid; | ||
74 | __kernel_gid_t pr_gid; | ||
75 | pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; | ||
76 | /* Lots missing */ | ||
77 | char pr_fname[16]; /* filename of executable */ | ||
78 | char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ | ||
79 | }; | ||
80 | |||
81 | #define elf_caddr_t u32 | ||
82 | #define init_elf_binfmt init_elfn32_binfmt | ||
83 | |||
84 | #define jiffies_to_timeval jiffies_to_old_timeval32 | ||
85 | static __inline__ void | ||
86 | jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) | ||
87 | { | ||
88 | /* | ||
89 | * Convert jiffies to nanoseconds and separate with | ||
90 | * one divide. | ||
91 | */ | ||
92 | u64 nsec = (u64)jiffies * TICK_NSEC; | ||
93 | u32 rem; | ||
94 | value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); | ||
95 | value->tv_usec = rem / NSEC_PER_USEC; | ||
96 | } | ||
97 | |||
98 | #define ELF_CORE_EFLAGS EF_MIPS_ABI2 | ||
99 | |||
100 | #undef TASK_SIZE | ||
101 | #define TASK_SIZE TASK_SIZE32 | ||
102 | |||
103 | #undef ns_to_kernel_old_timeval | ||
104 | #define ns_to_kernel_old_timeval ns_to_old_timeval32 | ||
105 | |||
106 | /* | ||
107 | * Some data types as stored in coredump. | ||
108 | */ | ||
109 | #define user_long_t compat_long_t | ||
110 | #define user_siginfo_t compat_siginfo_t | ||
111 | #define copy_siginfo_to_external copy_siginfo_to_external32 | ||
112 | |||
113 | #include "../../../fs/binfmt_elf.c" | ||
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c new file mode 100644 index 000000000..7b2a23f48 --- /dev/null +++ b/arch/mips/kernel/binfmt_elfo32.c | |||
@@ -0,0 +1,116 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Support for o32 Linux/MIPS ELF binaries. | ||
4 | * Author: Ralf Baechle (ralf@linux-mips.org) | ||
5 | * | ||
6 | * Copyright (C) 1999, 2001 Ralf Baechle | ||
7 | * Copyright (C) 1999, 2001 Silicon Graphics, Inc. | ||
8 | * | ||
9 | * Heavily inspired by the 32-bit Sparc compat code which is | ||
10 | * Copyright (C) 1995, 1996, 1997, 1998 David S. Miller (davem@redhat.com) | ||
11 | * Copyright (C) 1995, 1996, 1997, 1998 Jakub Jelinek (jj@ultra.linux.cz) | ||
12 | */ | ||
13 | |||
14 | #define ELF_ARCH EM_MIPS | ||
15 | #define ELF_CLASS ELFCLASS32 | ||
16 | #ifdef __MIPSEB__ | ||
17 | #define ELF_DATA ELFDATA2MSB; | ||
18 | #else /* __MIPSEL__ */ | ||
19 | #define ELF_DATA ELFDATA2LSB; | ||
20 | #endif | ||
21 | |||
22 | /* ELF register definitions */ | ||
23 | #define ELF_NGREG 45 | ||
24 | #define ELF_NFPREG 33 | ||
25 | |||
26 | typedef unsigned int elf_greg_t; | ||
27 | typedef elf_greg_t elf_gregset_t[ELF_NGREG]; | ||
28 | |||
29 | typedef double elf_fpreg_t; | ||
30 | typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG]; | ||
31 | |||
32 | /* | ||
33 | * This is used to ensure we don't load something for the wrong architecture. | ||
34 | */ | ||
35 | #define elf_check_arch elfo32_check_arch | ||
36 | |||
37 | #ifdef CONFIG_KVM_GUEST | ||
38 | #define TASK32_SIZE 0x3fff8000UL | ||
39 | #else | ||
40 | #define TASK32_SIZE 0x7fff8000UL | ||
41 | #endif | ||
42 | #undef ELF_ET_DYN_BASE | ||
43 | #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) | ||
44 | |||
45 | #include <asm/processor.h> | ||
46 | |||
47 | #include <linux/elfcore.h> | ||
48 | #include <linux/compat.h> | ||
49 | #include <linux/math64.h> | ||
50 | |||
51 | #define elf_prstatus elf_prstatus32 | ||
52 | struct elf_prstatus32 | ||
53 | { | ||
54 | struct elf_siginfo pr_info; /* Info associated with signal */ | ||
55 | short pr_cursig; /* Current signal */ | ||
56 | unsigned int pr_sigpend; /* Set of pending signals */ | ||
57 | unsigned int pr_sighold; /* Set of held signals */ | ||
58 | pid_t pr_pid; | ||
59 | pid_t pr_ppid; | ||
60 | pid_t pr_pgrp; | ||
61 | pid_t pr_sid; | ||
62 | struct old_timeval32 pr_utime; /* User time */ | ||
63 | struct old_timeval32 pr_stime; /* System time */ | ||
64 | struct old_timeval32 pr_cutime;/* Cumulative user time */ | ||
65 | struct old_timeval32 pr_cstime;/* Cumulative system time */ | ||
66 | elf_gregset_t pr_reg; /* GP registers */ | ||
67 | int pr_fpvalid; /* True if math co-processor being used. */ | ||
68 | }; | ||
69 | |||
70 | #define elf_prpsinfo elf_prpsinfo32 | ||
71 | struct elf_prpsinfo32 | ||
72 | { | ||
73 | char pr_state; /* numeric process state */ | ||
74 | char pr_sname; /* char for pr_state */ | ||
75 | char pr_zomb; /* zombie */ | ||
76 | char pr_nice; /* nice val */ | ||
77 | unsigned int pr_flag; /* flags */ | ||
78 | __kernel_uid_t pr_uid; | ||
79 | __kernel_gid_t pr_gid; | ||
80 | pid_t pr_pid, pr_ppid, pr_pgrp, pr_sid; | ||
81 | /* Lots missing */ | ||
82 | char pr_fname[16]; /* filename of executable */ | ||
83 | char pr_psargs[ELF_PRARGSZ]; /* initial part of arg list */ | ||
84 | }; | ||
85 | |||
86 | #define elf_caddr_t u32 | ||
87 | #define init_elf_binfmt init_elf32_binfmt | ||
88 | |||
89 | #define jiffies_to_timeval jiffies_to_old_timeval32 | ||
90 | static inline void | ||
91 | jiffies_to_old_timeval32(unsigned long jiffies, struct old_timeval32 *value) | ||
92 | { | ||
93 | /* | ||
94 | * Convert jiffies to nanoseconds and separate with | ||
95 | * one divide. | ||
96 | */ | ||
97 | u64 nsec = (u64)jiffies * TICK_NSEC; | ||
98 | u32 rem; | ||
99 | value->tv_sec = div_u64_rem(nsec, NSEC_PER_SEC, &rem); | ||
100 | value->tv_usec = rem / NSEC_PER_USEC; | ||
101 | } | ||
102 | |||
103 | #undef TASK_SIZE | ||
104 | #define TASK_SIZE TASK_SIZE32 | ||
105 | |||
106 | #undef ns_to_kernel_old_timeval | ||
107 | #define ns_to_kernel_old_timeval ns_to_old_timeval32 | ||
108 | |||
109 | /* | ||
110 | * Some data types as stored in coredump. | ||
111 | */ | ||
112 | #define user_long_t compat_long_t | ||
113 | #define user_siginfo_t compat_siginfo_t | ||
114 | #define copy_siginfo_to_external copy_siginfo_to_external32 | ||
115 | |||
116 | #include "../../../fs/binfmt_elf.c" | ||
diff --git a/arch/mips/kernel/bmips_5xxx_init.S b/arch/mips/kernel/bmips_5xxx_init.S new file mode 100644 index 000000000..9e422d186 --- /dev/null +++ b/arch/mips/kernel/bmips_5xxx_init.S | |||
@@ -0,0 +1,747 @@ | |||
1 | |||
2 | /* | ||
3 | * This file is subject to the terms and conditions of the GNU General Public | ||
4 | * License. See the file "COPYING" in the main directory of this archive | ||
5 | * for more details. | ||
6 | * | ||
7 | * Copyright (C) 2011-2012 by Broadcom Corporation | ||
8 | * | ||
9 | * Init for bmips 5000. | ||
10 | * Used to init second core in dual core 5000's. | ||
11 | */ | ||
12 | |||
13 | #include <linux/init.h> | ||
14 | |||
15 | #include <asm/asm.h> | ||
16 | #include <asm/asmmacro.h> | ||
17 | #include <asm/cacheops.h> | ||
18 | #include <asm/regdef.h> | ||
19 | #include <asm/mipsregs.h> | ||
20 | #include <asm/stackframe.h> | ||
21 | #include <asm/addrspace.h> | ||
22 | #include <asm/hazards.h> | ||
23 | #include <asm/bmips.h> | ||
24 | |||
25 | #ifdef CONFIG_CPU_BMIPS5000 | ||
26 | |||
27 | |||
28 | #define cacheop(kva, size, linesize, op) \ | ||
29 | .set noreorder ; \ | ||
30 | addu t1, kva, size ; \ | ||
31 | subu t2, linesize, 1 ; \ | ||
32 | not t2 ; \ | ||
33 | and t0, kva, t2 ; \ | ||
34 | addiu t1, t1, -1 ; \ | ||
35 | and t1, t2 ; \ | ||
36 | 9: cache op, 0(t0) ; \ | ||
37 | bne t0, t1, 9b ; \ | ||
38 | addu t0, linesize ; \ | ||
39 | .set reorder ; | ||
40 | |||
41 | |||
42 | |||
43 | #define IS_SHIFT 22 | ||
44 | #define IL_SHIFT 19 | ||
45 | #define IA_SHIFT 16 | ||
46 | #define DS_SHIFT 13 | ||
47 | #define DL_SHIFT 10 | ||
48 | #define DA_SHIFT 7 | ||
49 | #define IS_MASK 7 | ||
50 | #define IL_MASK 7 | ||
51 | #define IA_MASK 7 | ||
52 | #define DS_MASK 7 | ||
53 | #define DL_MASK 7 | ||
54 | #define DA_MASK 7 | ||
55 | #define ICE_MASK 0x80000000 | ||
56 | #define DCE_MASK 0x40000000 | ||
57 | |||
58 | #define CP0_BRCM_CONFIG0 $22, 0 | ||
59 | #define CP0_BRCM_MODE $22, 1 | ||
60 | #define CP0_CONFIG_K0_MASK 7 | ||
61 | |||
62 | #define CP0_ICACHE_TAG_LO $28 | ||
63 | #define CP0_ICACHE_DATA_LO $28, 1 | ||
64 | #define CP0_DCACHE_TAG_LO $28, 2 | ||
65 | #define CP0_D_SEC_CACHE_DATA_LO $28, 3 | ||
66 | #define CP0_ICACHE_TAG_HI $29 | ||
67 | #define CP0_ICACHE_DATA_HI $29, 1 | ||
68 | #define CP0_DCACHE_TAG_HI $29, 2 | ||
69 | |||
70 | #define CP0_BRCM_MODE_Luc_MASK (1 << 11) | ||
71 | #define CP0_BRCM_CONFIG0_CWF_MASK (1 << 20) | ||
72 | #define CP0_BRCM_CONFIG0_TSE_MASK (1 << 19) | ||
73 | #define CP0_BRCM_MODE_SET_MASK (1 << 7) | ||
74 | #define CP0_BRCM_MODE_ClkRATIO_MASK (7 << 4) | ||
75 | #define CP0_BRCM_MODE_BrPRED_MASK (3 << 24) | ||
76 | #define CP0_BRCM_MODE_BrPRED_SHIFT 24 | ||
77 | #define CP0_BRCM_MODE_BrHIST_MASK (0x1f << 20) | ||
78 | #define CP0_BRCM_MODE_BrHIST_SHIFT 20 | ||
79 | |||
80 | /* ZSC L2 Cache Register Access Register Definitions */ | ||
81 | #define BRCM_ZSC_ALL_REGS_SELECT 0x7 << 24 | ||
82 | |||
83 | #define BRCM_ZSC_CONFIG_REG 0 << 3 | ||
84 | #define BRCM_ZSC_REQ_BUFFER_REG 2 << 3 | ||
85 | #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG0 4 << 3 | ||
86 | #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG1 6 << 3 | ||
87 | #define BRCM_ZSC_RBUS_ADDR_MAPPING_REG2 8 << 3 | ||
88 | |||
89 | #define BRCM_ZSC_SCB0_ADDR_MAPPING_REG0 0xa << 3 | ||
90 | #define BRCM_ZSC_SCB0_ADDR_MAPPING_REG1 0xc << 3 | ||
91 | |||
92 | #define BRCM_ZSC_SCB1_ADDR_MAPPING_REG0 0xe << 3 | ||
93 | #define BRCM_ZSC_SCB1_ADDR_MAPPING_REG1 0x10 << 3 | ||
94 | |||
95 | #define BRCM_ZSC_CONFIG_LMB1En 1 << (15) | ||
96 | #define BRCM_ZSC_CONFIG_LMB0En 1 << (14) | ||
97 | |||
98 | /* branch predition values */ | ||
99 | |||
100 | #define BRCM_BrPRED_ALL_TAKEN (0x0) | ||
101 | #define BRCM_BrPRED_ALL_NOT_TAKEN (0x1) | ||
102 | #define BRCM_BrPRED_BHT_ENABLE (0x2) | ||
103 | #define BRCM_BrPRED_PREDICT_BACKWARD (0x3) | ||
104 | |||
105 | |||
106 | |||
107 | .align 2 | ||
108 | /* | ||
109 | * Function: size_i_cache | ||
110 | * Arguments: None | ||
111 | * Returns: v0 = i cache size, v1 = I cache line size | ||
112 | * Description: compute the I-cache size and I-cache line size | ||
113 | * Trashes: v0, v1, a0, t0 | ||
114 | * | ||
115 | * pseudo code: | ||
116 | * | ||
117 | */ | ||
118 | |||
119 | LEAF(size_i_cache) | ||
120 | .set noreorder | ||
121 | |||
122 | mfc0 a0, CP0_CONFIG, 1 | ||
123 | move t0, a0 | ||
124 | |||
125 | /* | ||
126 | * Determine sets per way: IS | ||
127 | * | ||
128 | * This field contains the number of sets (i.e., indices) per way of | ||
129 | * the instruction cache: | ||
130 | * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k | ||
131 | * vi) 0x5 - 0x7: Reserved. | ||
132 | */ | ||
133 | |||
134 | srl a0, a0, IS_SHIFT | ||
135 | and a0, a0, IS_MASK | ||
136 | |||
137 | /* sets per way = (64<<IS) */ | ||
138 | |||
139 | li v0, 0x40 | ||
140 | sllv v0, v0, a0 | ||
141 | |||
142 | /* | ||
143 | * Determine line size | ||
144 | * | ||
145 | * This field contains the line size of the instruction cache: | ||
146 | * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii) | ||
147 | * 0x5: 64 bytes, iv) the rest: Reserved. | ||
148 | */ | ||
149 | |||
150 | move a0, t0 | ||
151 | |||
152 | srl a0, a0, IL_SHIFT | ||
153 | and a0, a0, IL_MASK | ||
154 | |||
155 | beqz a0, no_i_cache | ||
156 | nop | ||
157 | |||
158 | /* line size = 2 ^ (IL+1) */ | ||
159 | |||
160 | addi a0, a0, 1 | ||
161 | li v1, 1 | ||
162 | sll v1, v1, a0 | ||
163 | |||
164 | /* v0 now have sets per way, multiply it by line size now | ||
165 | * that will give the set size | ||
166 | */ | ||
167 | |||
168 | sll v0, v0, a0 | ||
169 | |||
170 | /* | ||
171 | * Determine set associativity | ||
172 | * | ||
173 | * This field contains the set associativity of the instruction cache. | ||
174 | * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: | ||
175 | * 4-way, v) 0x4 - 0x7: Reserved. | ||
176 | */ | ||
177 | |||
178 | move a0, t0 | ||
179 | |||
180 | srl a0, a0, IA_SHIFT | ||
181 | and a0, a0, IA_MASK | ||
182 | addi a0, a0, 0x1 | ||
183 | |||
184 | /* v0 has the set size, multiply it by | ||
185 | * set associativiy, to get the cache size | ||
186 | */ | ||
187 | |||
188 | multu v0, a0 /*multu is interlocked, so no need to insert nops */ | ||
189 | mflo v0 | ||
190 | b 1f | ||
191 | nop | ||
192 | |||
193 | no_i_cache: | ||
194 | move v0, zero | ||
195 | move v1, zero | ||
196 | 1: | ||
197 | jr ra | ||
198 | nop | ||
199 | .set reorder | ||
200 | |||
201 | END(size_i_cache) | ||
202 | |||
203 | /* | ||
204 | * Function: size_d_cache | ||
205 | * Arguments: None | ||
206 | * Returns: v0 = d cache size, v1 = d cache line size | ||
207 | * Description: compute the D-cache size and D-cache line size. | ||
208 | * Trashes: v0, v1, a0, t0 | ||
209 | * | ||
210 | */ | ||
211 | |||
212 | LEAF(size_d_cache) | ||
213 | .set noreorder | ||
214 | |||
215 | mfc0 a0, CP0_CONFIG, 1 | ||
216 | move t0, a0 | ||
217 | |||
218 | /* | ||
219 | * Determine sets per way: IS | ||
220 | * | ||
221 | * This field contains the number of sets (i.e., indices) per way of | ||
222 | * the instruction cache: | ||
223 | * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k | ||
224 | * vi) 0x5 - 0x7: Reserved. | ||
225 | */ | ||
226 | |||
227 | srl a0, a0, DS_SHIFT | ||
228 | and a0, a0, DS_MASK | ||
229 | |||
230 | /* sets per way = (64<<IS) */ | ||
231 | |||
232 | li v0, 0x40 | ||
233 | sllv v0, v0, a0 | ||
234 | |||
235 | /* | ||
236 | * Determine line size | ||
237 | * | ||
238 | * This field contains the line size of the instruction cache: | ||
239 | * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii) | ||
240 | * 0x5: 64 bytes, iv) the rest: Reserved. | ||
241 | */ | ||
242 | move a0, t0 | ||
243 | |||
244 | srl a0, a0, DL_SHIFT | ||
245 | and a0, a0, DL_MASK | ||
246 | |||
247 | beqz a0, no_d_cache | ||
248 | nop | ||
249 | |||
250 | /* line size = 2 ^ (IL+1) */ | ||
251 | |||
252 | addi a0, a0, 1 | ||
253 | li v1, 1 | ||
254 | sll v1, v1, a0 | ||
255 | |||
256 | /* v0 now have sets per way, multiply it by line size now | ||
257 | * that will give the set size | ||
258 | */ | ||
259 | |||
260 | sll v0, v0, a0 | ||
261 | |||
262 | /* determine set associativity | ||
263 | * | ||
264 | * This field contains the set associativity of the instruction cache. | ||
265 | * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3: | ||
266 | * 4-way, v) 0x4 - 0x7: Reserved. | ||
267 | */ | ||
268 | |||
269 | move a0, t0 | ||
270 | |||
271 | srl a0, a0, DA_SHIFT | ||
272 | and a0, a0, DA_MASK | ||
273 | addi a0, a0, 0x1 | ||
274 | |||
275 | /* v0 has the set size, multiply it by | ||
276 | * set associativiy, to get the cache size | ||
277 | */ | ||
278 | |||
279 | multu v0, a0 /*multu is interlocked, so no need to insert nops */ | ||
280 | mflo v0 | ||
281 | |||
282 | b 1f | ||
283 | nop | ||
284 | |||
285 | no_d_cache: | ||
286 | move v0, zero | ||
287 | move v1, zero | ||
288 | 1: | ||
289 | jr ra | ||
290 | nop | ||
291 | .set reorder | ||
292 | |||
293 | END(size_d_cache) | ||
294 | |||
295 | |||
296 | /* | ||
297 | * Function: enable_ID | ||
298 | * Arguments: None | ||
299 | * Returns: None | ||
300 | * Description: Enable I and D caches, initialize I and D-caches, also set | ||
301 | * hardware delay for d-cache (TP0). | ||
302 | * Trashes: t0 | ||
303 | * | ||
304 | */ | ||
305 | .global enable_ID | ||
306 | .ent enable_ID | ||
307 | .set noreorder | ||
308 | enable_ID: | ||
309 | mfc0 t0, CP0_BRCM_CONFIG0 | ||
310 | or t0, t0, (ICE_MASK | DCE_MASK) | ||
311 | mtc0 t0, CP0_BRCM_CONFIG0 | ||
312 | jr ra | ||
313 | nop | ||
314 | |||
315 | .end enable_ID | ||
316 | .set reorder | ||
317 | |||
318 | |||
319 | /* | ||
320 | * Function: l1_init | ||
321 | * Arguments: None | ||
322 | * Returns: None | ||
323 | * Description: Enable I and D caches, and initialize I and D-caches | ||
324 | * Trashes: a0, v0, v1, t0, t1, t2, t8 | ||
325 | * | ||
326 | */ | ||
327 | .globl l1_init | ||
328 | .ent l1_init | ||
329 | .set noreorder | ||
330 | l1_init: | ||
331 | |||
332 | /* save return address */ | ||
333 | move t8, ra | ||
334 | |||
335 | |||
336 | /* initialize I and D cache Data and Tag registers. */ | ||
337 | mtc0 zero, CP0_ICACHE_TAG_LO | ||
338 | mtc0 zero, CP0_ICACHE_TAG_HI | ||
339 | mtc0 zero, CP0_ICACHE_DATA_LO | ||
340 | mtc0 zero, CP0_ICACHE_DATA_HI | ||
341 | mtc0 zero, CP0_DCACHE_TAG_LO | ||
342 | mtc0 zero, CP0_DCACHE_TAG_HI | ||
343 | |||
344 | /* Enable Caches before Clearing. If the caches are disabled | ||
345 | * then the cache operations to clear the cache will be ignored | ||
346 | */ | ||
347 | |||
348 | jal enable_ID | ||
349 | nop | ||
350 | |||
351 | jal size_i_cache /* v0 = i-cache size, v1 = i-cache line size */ | ||
352 | nop | ||
353 | |||
354 | /* run uncached in kseg 1 */ | ||
355 | la k0, 1f | ||
356 | lui k1, 0x2000 | ||
357 | or k0, k1, k0 | ||
358 | jr k0 | ||
359 | nop | ||
360 | 1: | ||
361 | |||
362 | /* | ||
363 | * set K0 cache mode | ||
364 | */ | ||
365 | |||
366 | mfc0 t0, CP0_CONFIG | ||
367 | and t0, t0, ~CP0_CONFIG_K0_MASK | ||
368 | or t0, t0, 3 /* Write Back mode */ | ||
369 | mtc0 t0, CP0_CONFIG | ||
370 | |||
371 | /* | ||
372 | * Initialize instruction cache. | ||
373 | */ | ||
374 | |||
375 | li a0, KSEG0 | ||
376 | cacheop(a0, v0, v1, Index_Store_Tag_I) | ||
377 | |||
378 | /* | ||
379 | * Now we can run from I-$, kseg 0 | ||
380 | */ | ||
381 | la k0, 1f | ||
382 | lui k1, 0x2000 | ||
383 | or k0, k1, k0 | ||
384 | xor k0, k1, k0 | ||
385 | jr k0 | ||
386 | nop | ||
387 | 1: | ||
388 | /* | ||
389 | * Initialize data cache. | ||
390 | */ | ||
391 | |||
392 | jal size_d_cache /* v0 = d-cache size, v1 = d-cache line size */ | ||
393 | nop | ||
394 | |||
395 | |||
396 | li a0, KSEG0 | ||
397 | cacheop(a0, v0, v1, Index_Store_Tag_D) | ||
398 | |||
399 | jr t8 | ||
400 | nop | ||
401 | |||
402 | .end l1_init | ||
403 | .set reorder | ||
404 | |||
405 | |||
406 | /* | ||
407 | * Function: set_other_config | ||
408 | * Arguments: none | ||
409 | * Returns: None | ||
410 | * Description: initialize other remainder configuration to defaults. | ||
411 | * Trashes: t0, t1 | ||
412 | * | ||
413 | * pseudo code: | ||
414 | * | ||
415 | */ | ||
416 | LEAF(set_other_config) | ||
417 | .set noreorder | ||
418 | |||
419 | /* enable Bus error for I-fetch */ | ||
420 | mfc0 t0, CP0_CACHEERR, 0 | ||
421 | li t1, 0x4 | ||
422 | or t0, t1 | ||
423 | mtc0 t0, CP0_CACHEERR, 0 | ||
424 | |||
425 | /* enable Bus error for Load */ | ||
426 | mfc0 t0, CP0_CACHEERR, 1 | ||
427 | li t1, 0x4 | ||
428 | or t0, t1 | ||
429 | mtc0 t0, CP0_CACHEERR, 1 | ||
430 | |||
431 | /* enable Bus Error for Store */ | ||
432 | mfc0 t0, CP0_CACHEERR, 2 | ||
433 | li t1, 0x4 | ||
434 | or t0, t1 | ||
435 | mtc0 t0, CP0_CACHEERR, 2 | ||
436 | |||
437 | jr ra | ||
438 | nop | ||
439 | .set reorder | ||
440 | END(set_other_config) | ||
441 | |||
442 | /* | ||
443 | * Function: set_branch_pred | ||
444 | * Arguments: none | ||
445 | * Returns: None | ||
446 | * Description: | ||
447 | * Trashes: t0, t1 | ||
448 | * | ||
449 | * pseudo code: | ||
450 | * | ||
451 | */ | ||
452 | |||
453 | LEAF(set_branch_pred) | ||
454 | .set noreorder | ||
455 | mfc0 t0, CP0_BRCM_MODE | ||
456 | li t1, ~(CP0_BRCM_MODE_BrPRED_MASK | CP0_BRCM_MODE_BrHIST_MASK ) | ||
457 | and t0, t0, t1 | ||
458 | |||
459 | /* enable Branch prediction */ | ||
460 | li t1, BRCM_BrPRED_BHT_ENABLE | ||
461 | sll t1, CP0_BRCM_MODE_BrPRED_SHIFT | ||
462 | or t0, t0, t1 | ||
463 | |||
464 | /* set history count to 8 */ | ||
465 | li t1, 8 | ||
466 | sll t1, CP0_BRCM_MODE_BrHIST_SHIFT | ||
467 | or t0, t0, t1 | ||
468 | |||
469 | mtc0 t0, CP0_BRCM_MODE | ||
470 | jr ra | ||
471 | nop | ||
472 | .set reorder | ||
473 | END(set_branch_pred) | ||
474 | |||
475 | |||
476 | /* | ||
477 | * Function: set_luc | ||
478 | * Arguments: set link uncached. | ||
479 | * Returns: None | ||
480 | * Description: | ||
481 | * Trashes: t0, t1 | ||
482 | * | ||
483 | */ | ||
484 | LEAF(set_luc) | ||
485 | .set noreorder | ||
486 | mfc0 t0, CP0_BRCM_MODE | ||
487 | li t1, ~(CP0_BRCM_MODE_Luc_MASK) | ||
488 | and t0, t0, t1 | ||
489 | |||
490 | /* set Luc */ | ||
491 | ori t0, t0, CP0_BRCM_MODE_Luc_MASK | ||
492 | |||
493 | mtc0 t0, CP0_BRCM_MODE | ||
494 | jr ra | ||
495 | nop | ||
496 | .set reorder | ||
497 | END(set_luc) | ||
498 | |||
499 | /* | ||
500 | * Function: set_cwf_tse | ||
501 | * Arguments: set CWF and TSE bits | ||
502 | * Returns: None | ||
503 | * Description: | ||
504 | * Trashes: t0, t1 | ||
505 | * | ||
506 | */ | ||
507 | LEAF(set_cwf_tse) | ||
508 | .set noreorder | ||
509 | mfc0 t0, CP0_BRCM_CONFIG0 | ||
510 | li t1, (CP0_BRCM_CONFIG0_CWF_MASK | CP0_BRCM_CONFIG0_TSE_MASK) | ||
511 | or t0, t0, t1 | ||
512 | |||
513 | mtc0 t0, CP0_BRCM_CONFIG0 | ||
514 | jr ra | ||
515 | nop | ||
516 | .set reorder | ||
517 | END(set_cwf_tse) | ||
518 | |||
519 | /* | ||
520 | * Function: set_clock_ratio | ||
521 | * Arguments: set clock ratio specified by a0 | ||
522 | * Returns: None | ||
523 | * Description: | ||
524 | * Trashes: v0, v1, a0, a1 | ||
525 | * | ||
526 | * pseudo code: | ||
527 | * | ||
528 | */ | ||
529 | LEAF(set_clock_ratio) | ||
530 | .set noreorder | ||
531 | |||
532 | mfc0 t0, CP0_BRCM_MODE | ||
533 | li t1, ~(CP0_BRCM_MODE_SET_MASK | CP0_BRCM_MODE_ClkRATIO_MASK) | ||
534 | and t0, t0, t1 | ||
535 | li t1, CP0_BRCM_MODE_SET_MASK | ||
536 | or t0, t0, t1 | ||
537 | or t0, t0, a0 | ||
538 | mtc0 t0, CP0_BRCM_MODE | ||
539 | jr ra | ||
540 | nop | ||
541 | .set reorder | ||
542 | END(set_clock_ratio) | ||
543 | /* | ||
544 | * Function: set_zephyr | ||
545 | * Arguments: None | ||
546 | * Returns: None | ||
547 | * Description: Set any zephyr bits | ||
548 | * Trashes: t0 & t1 | ||
549 | * | ||
550 | */ | ||
551 | LEAF(set_zephyr) | ||
552 | .set noreorder | ||
553 | |||
554 | /* enable read/write of CP0 #22 sel. 8 */ | ||
555 | li t0, 0x5a455048 | ||
556 | .word 0x4088b00f /* mtc0 t0, $22, 15 */ | ||
557 | |||
558 | .word 0x4008b008 /* mfc0 t0, $22, 8 */ | ||
559 | li t1, 0x09008000 /* turn off pref, jtb */ | ||
560 | or t0, t0, t1 | ||
561 | .word 0x4088b008 /* mtc0 t0, $22, 8 */ | ||
562 | sync | ||
563 | |||
564 | /* disable read/write of CP0 #22 sel 8 */ | ||
565 | li t0, 0x0 | ||
566 | .word 0x4088b00f /* mtc0 t0, $22, 15 */ | ||
567 | |||
568 | |||
569 | jr ra | ||
570 | nop | ||
571 | .set reorder | ||
572 | |||
573 | END(set_zephyr) | ||
574 | |||
575 | |||
576 | /* | ||
577 | * Function: set_llmb | ||
578 | * Arguments: a0=0 disable llmb, a0=1 enables llmb | ||
579 | * Returns: None | ||
580 | * Description: | ||
581 | * Trashes: t0, t1, t2 | ||
582 | * | ||
583 | * pseudo code: | ||
584 | * | ||
585 | */ | ||
586 | LEAF(set_llmb) | ||
587 | .set noreorder | ||
588 | |||
589 | li t2, 0x90000000 | BRCM_ZSC_ALL_REGS_SELECT | BRCM_ZSC_CONFIG_REG | ||
590 | sync | ||
591 | cache 0x7, 0x0(t2) | ||
592 | sync | ||
593 | mfc0 t0, CP0_D_SEC_CACHE_DATA_LO | ||
594 | li t1, ~(BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En) | ||
595 | and t0, t0, t1 | ||
596 | |||
597 | beqz a0, svlmb | ||
598 | nop | ||
599 | |||
600 | enable_lmb: | ||
601 | li t1, (BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En) | ||
602 | or t0, t0, t1 | ||
603 | |||
604 | svlmb: | ||
605 | mtc0 t0, CP0_D_SEC_CACHE_DATA_LO | ||
606 | sync | ||
607 | cache 0xb, 0x0(t2) | ||
608 | sync | ||
609 | |||
610 | jr ra | ||
611 | nop | ||
612 | .set reorder | ||
613 | |||
614 | END(set_llmb) | ||
615 | /* | ||
616 | * Function: core_init | ||
617 | * Arguments: none | ||
618 | * Returns: None | ||
619 | * Description: initialize core related configuration | ||
620 | * Trashes: v0,v1,a0,a1,t8 | ||
621 | * | ||
622 | * pseudo code: | ||
623 | * | ||
624 | */ | ||
625 | .globl core_init | ||
626 | .ent core_init | ||
627 | .set noreorder | ||
628 | core_init: | ||
629 | move t8, ra | ||
630 | |||
631 | /* set Zephyr bits. */ | ||
632 | bal set_zephyr | ||
633 | nop | ||
634 | |||
635 | /* set low latency memory bus */ | ||
636 | li a0, 1 | ||
637 | bal set_llmb | ||
638 | nop | ||
639 | |||
640 | /* set branch prediction (TP0 only) */ | ||
641 | bal set_branch_pred | ||
642 | nop | ||
643 | |||
644 | /* set link uncached */ | ||
645 | bal set_luc | ||
646 | nop | ||
647 | |||
648 | /* set CWF and TSE */ | ||
649 | bal set_cwf_tse | ||
650 | nop | ||
651 | |||
652 | /* | ||
653 | *set clock ratio by setting 1 to 'set' | ||
654 | * and 0 to ClkRatio, (TP0 only) | ||
655 | */ | ||
656 | li a0, 0 | ||
657 | bal set_clock_ratio | ||
658 | nop | ||
659 | |||
660 | /* set other configuration to defaults */ | ||
661 | bal set_other_config | ||
662 | nop | ||
663 | |||
664 | move ra, t8 | ||
665 | jr ra | ||
666 | nop | ||
667 | |||
668 | .set reorder | ||
669 | .end core_init | ||
670 | |||
671 | /* | ||
672 | * Function: clear_jump_target_buffer | ||
673 | * Arguments: None | ||
674 | * Returns: None | ||
675 | * Description: | ||
676 | * Trashes: t0, t1, t2 | ||
677 | * | ||
678 | */ | ||
679 | #define RESET_CALL_RETURN_STACK_THIS_THREAD (0x06<<16) | ||
680 | #define RESET_JUMP_TARGET_BUFFER_THIS_THREAD (0x04<<16) | ||
681 | #define JTB_CS_CNTL_MASK (0xFF<<16) | ||
682 | |||
683 | .globl clear_jump_target_buffer | ||
684 | .ent clear_jump_target_buffer | ||
685 | .set noreorder | ||
686 | clear_jump_target_buffer: | ||
687 | |||
688 | mfc0 t0, $22, 2 | ||
689 | nop | ||
690 | nop | ||
691 | |||
692 | li t1, ~JTB_CS_CNTL_MASK | ||
693 | and t0, t0, t1 | ||
694 | li t2, RESET_CALL_RETURN_STACK_THIS_THREAD | ||
695 | or t0, t0, t2 | ||
696 | mtc0 t0, $22, 2 | ||
697 | nop | ||
698 | nop | ||
699 | |||
700 | and t0, t0, t1 | ||
701 | li t2, RESET_JUMP_TARGET_BUFFER_THIS_THREAD | ||
702 | or t0, t0, t2 | ||
703 | mtc0 t0, $22, 2 | ||
704 | nop | ||
705 | nop | ||
706 | jr ra | ||
707 | nop | ||
708 | |||
709 | .end clear_jump_target_buffer | ||
710 | .set reorder | ||
711 | /* | ||
712 | * Function: bmips_cache_init | ||
713 | * Arguments: None | ||
714 | * Returns: None | ||
715 | * Description: Enable I and D caches, and initialize I and D-caches | ||
716 | * Trashes: v0, v1, t0, t1, t2, t5, t7, t8 | ||
717 | * | ||
718 | */ | ||
719 | .globl bmips_5xxx_init | ||
720 | .ent bmips_5xxx_init | ||
721 | .set noreorder | ||
722 | bmips_5xxx_init: | ||
723 | |||
724 | /* save return address and A0 */ | ||
725 | move t7, ra | ||
726 | move t5, a0 | ||
727 | |||
728 | jal l1_init | ||
729 | nop | ||
730 | |||
731 | jal core_init | ||
732 | nop | ||
733 | |||
734 | jal clear_jump_target_buffer | ||
735 | nop | ||
736 | |||
737 | mtc0 zero, CP0_CAUSE | ||
738 | |||
739 | move a0, t5 | ||
740 | jr t7 | ||
741 | nop | ||
742 | |||
743 | .end bmips_5xxx_init | ||
744 | .set reorder | ||
745 | |||
746 | |||
747 | #endif | ||
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S new file mode 100644 index 000000000..921a5fa55 --- /dev/null +++ b/arch/mips/kernel/bmips_vec.S | |||
@@ -0,0 +1,322 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) | ||
7 | * | ||
8 | * Reset/NMI/re-entry vectors for BMIPS processors | ||
9 | */ | ||
10 | |||
11 | |||
12 | #include <asm/asm.h> | ||
13 | #include <asm/asmmacro.h> | ||
14 | #include <asm/cacheops.h> | ||
15 | #include <asm/cpu.h> | ||
16 | #include <asm/regdef.h> | ||
17 | #include <asm/mipsregs.h> | ||
18 | #include <asm/stackframe.h> | ||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/hazards.h> | ||
21 | #include <asm/bmips.h> | ||
22 | |||
23 | .macro BARRIER | ||
24 | .set mips32 | ||
25 | _ssnop | ||
26 | _ssnop | ||
27 | _ssnop | ||
28 | .set mips0 | ||
29 | .endm | ||
30 | |||
31 | /*********************************************************************** | ||
32 | * Alternate CPU1 startup vector for BMIPS4350 | ||
33 | * | ||
34 | * On some systems the bootloader has already started CPU1 and configured | ||
35 | * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is | ||
36 | * triggered by the SW1 interrupt. If that is the case we try to move | ||
37 | * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380. | ||
38 | ***********************************************************************/ | ||
39 | |||
40 | LEAF(bmips_smp_movevec) | ||
41 | la k0, 1f | ||
42 | li k1, CKSEG1 | ||
43 | or k0, k1 | ||
44 | jr k0 | ||
45 | |||
46 | 1: | ||
47 | /* clear IV, pending IPIs */ | ||
48 | mtc0 zero, CP0_CAUSE | ||
49 | |||
50 | /* re-enable IRQs to wait for SW1 */ | ||
51 | li k0, ST0_IE | ST0_BEV | STATUSF_IP1 | ||
52 | mtc0 k0, CP0_STATUS | ||
53 | |||
54 | /* set up CPU1 CBR; move BASE to 0xa000_0000 */ | ||
55 | li k0, 0xff400000 | ||
56 | mtc0 k0, $22, 6 | ||
57 | /* set up relocation vector address based on thread ID */ | ||
58 | mfc0 k1, $22, 3 | ||
59 | srl k1, 16 | ||
60 | andi k1, 0x8000 | ||
61 | or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0 | ||
62 | or k0, k1 | ||
63 | li k1, 0xa0080000 | ||
64 | sw k1, 0(k0) | ||
65 | |||
66 | /* wait here for SW1 interrupt from bmips_boot_secondary() */ | ||
67 | wait | ||
68 | |||
69 | la k0, bmips_reset_nmi_vec | ||
70 | li k1, CKSEG1 | ||
71 | or k0, k1 | ||
72 | jr k0 | ||
73 | END(bmips_smp_movevec) | ||
74 | |||
75 | /*********************************************************************** | ||
76 | * Reset/NMI vector | ||
77 | * For BMIPS processors that can relocate their exception vectors, this | ||
78 | * entire function gets copied to 0x8000_0000. | ||
79 | ***********************************************************************/ | ||
80 | |||
81 | NESTED(bmips_reset_nmi_vec, PT_SIZE, sp) | ||
82 | .set push | ||
83 | .set noat | ||
84 | .align 4 | ||
85 | |||
86 | #ifdef CONFIG_SMP | ||
87 | /* if the NMI bit is clear, assume this is a CPU1 reset instead */ | ||
88 | li k1, (1 << 19) | ||
89 | mfc0 k0, CP0_STATUS | ||
90 | and k0, k1 | ||
91 | beqz k0, soft_reset | ||
92 | |||
93 | #if defined(CONFIG_CPU_BMIPS5000) | ||
94 | mfc0 k0, CP0_PRID | ||
95 | li k1, PRID_IMP_BMIPS5000 | ||
96 | /* mask with PRID_IMP_BMIPS5000 to cover both variants */ | ||
97 | andi k0, PRID_IMP_BMIPS5000 | ||
98 | bne k0, k1, 1f | ||
99 | |||
100 | /* if we're not on core 0, this must be the SMP boot signal */ | ||
101 | li k1, (3 << 25) | ||
102 | mfc0 k0, $22 | ||
103 | and k0, k1 | ||
104 | bnez k0, bmips_smp_entry | ||
105 | 1: | ||
106 | #endif /* CONFIG_CPU_BMIPS5000 */ | ||
107 | #endif /* CONFIG_SMP */ | ||
108 | |||
109 | /* nope, it's just a regular NMI */ | ||
110 | SAVE_ALL | ||
111 | move a0, sp | ||
112 | |||
113 | /* clear EXL, ERL, BEV so that TLB refills still work */ | ||
114 | mfc0 k0, CP0_STATUS | ||
115 | li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE | ||
116 | or k0, k1 | ||
117 | xor k0, k1 | ||
118 | mtc0 k0, CP0_STATUS | ||
119 | BARRIER | ||
120 | |||
121 | /* jump to the NMI handler function */ | ||
122 | la k0, nmi_handler | ||
123 | jr k0 | ||
124 | |||
125 | RESTORE_ALL | ||
126 | .set arch=r4000 | ||
127 | eret | ||
128 | |||
129 | #ifdef CONFIG_SMP | ||
130 | soft_reset: | ||
131 | |||
132 | #if defined(CONFIG_CPU_BMIPS5000) | ||
133 | mfc0 k0, CP0_PRID | ||
134 | andi k0, 0xff00 | ||
135 | li k1, PRID_IMP_BMIPS5200 | ||
136 | bne k0, k1, bmips_smp_entry | ||
137 | |||
138 | /* if running on TP 1, jump to bmips_smp_entry */ | ||
139 | mfc0 k0, $22 | ||
140 | li k1, (1 << 24) | ||
141 | and k1, k0 | ||
142 | bnez k1, bmips_smp_entry | ||
143 | nop | ||
144 | |||
145 | /* | ||
146 | * running on TP0, can not be core 0 (the boot core). | ||
147 | * Check for soft reset. Indicates a warm boot | ||
148 | */ | ||
149 | mfc0 k0, $12 | ||
150 | li k1, (1 << 20) | ||
151 | and k0, k1 | ||
152 | beqz k0, bmips_smp_entry | ||
153 | |||
154 | /* | ||
155 | * Warm boot. | ||
156 | * Cache init is only done on TP0 | ||
157 | */ | ||
158 | la k0, bmips_5xxx_init | ||
159 | jalr k0 | ||
160 | nop | ||
161 | |||
162 | b bmips_smp_entry | ||
163 | nop | ||
164 | #endif | ||
165 | |||
166 | /*********************************************************************** | ||
167 | * CPU1 reset vector (used for the initial boot only) | ||
168 | * This is still part of bmips_reset_nmi_vec(). | ||
169 | ***********************************************************************/ | ||
170 | |||
171 | bmips_smp_entry: | ||
172 | |||
173 | /* set up CP0 STATUS; enable FPU */ | ||
174 | li k0, 0x30000000 | ||
175 | mtc0 k0, CP0_STATUS | ||
176 | BARRIER | ||
177 | |||
178 | /* set local CP0 CONFIG to make kseg0 cacheable, write-back */ | ||
179 | mfc0 k0, CP0_CONFIG | ||
180 | ori k0, 0x07 | ||
181 | xori k0, 0x04 | ||
182 | mtc0 k0, CP0_CONFIG | ||
183 | |||
184 | mfc0 k0, CP0_PRID | ||
185 | andi k0, 0xff00 | ||
186 | #if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380) | ||
187 | li k1, PRID_IMP_BMIPS43XX | ||
188 | bne k0, k1, 2f | ||
189 | |||
190 | /* initialize CPU1's local I-cache */ | ||
191 | li k0, 0x80000000 | ||
192 | li k1, 0x80010000 | ||
193 | mtc0 zero, $28 | ||
194 | mtc0 zero, $28, 1 | ||
195 | BARRIER | ||
196 | |||
197 | 1: cache Index_Store_Tag_I, 0(k0) | ||
198 | addiu k0, 16 | ||
199 | bne k0, k1, 1b | ||
200 | |||
201 | b 3f | ||
202 | 2: | ||
203 | #endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */ | ||
204 | #if defined(CONFIG_CPU_BMIPS5000) | ||
205 | /* mask with PRID_IMP_BMIPS5000 to cover both variants */ | ||
206 | li k1, PRID_IMP_BMIPS5000 | ||
207 | andi k0, PRID_IMP_BMIPS5000 | ||
208 | bne k0, k1, 3f | ||
209 | |||
210 | /* set exception vector base */ | ||
211 | la k0, ebase | ||
212 | lw k0, 0(k0) | ||
213 | mtc0 k0, $15, 1 | ||
214 | BARRIER | ||
215 | #endif /* CONFIG_CPU_BMIPS5000 */ | ||
216 | 3: | ||
217 | /* jump back to kseg0 in case we need to remap the kseg1 area */ | ||
218 | la k0, 1f | ||
219 | jr k0 | ||
220 | 1: | ||
221 | la k0, bmips_enable_xks01 | ||
222 | jalr k0 | ||
223 | |||
224 | /* use temporary stack to set up upper memory TLB */ | ||
225 | li sp, BMIPS_WARM_RESTART_VEC | ||
226 | la k0, plat_wired_tlb_setup | ||
227 | jalr k0 | ||
228 | |||
229 | /* switch to permanent stack and continue booting */ | ||
230 | |||
231 | .global bmips_secondary_reentry | ||
232 | bmips_secondary_reentry: | ||
233 | la k0, bmips_smp_boot_sp | ||
234 | lw sp, 0(k0) | ||
235 | la k0, bmips_smp_boot_gp | ||
236 | lw gp, 0(k0) | ||
237 | la k0, start_secondary | ||
238 | jr k0 | ||
239 | |||
240 | #endif /* CONFIG_SMP */ | ||
241 | |||
242 | .align 4 | ||
243 | .global bmips_reset_nmi_vec_end | ||
244 | bmips_reset_nmi_vec_end: | ||
245 | |||
246 | END(bmips_reset_nmi_vec) | ||
247 | |||
248 | .set pop | ||
249 | |||
250 | /*********************************************************************** | ||
251 | * CPU1 warm restart vector (used for second and subsequent boots). | ||
252 | * Also used for S2 standby recovery (PM). | ||
253 | * This entire function gets copied to (BMIPS_WARM_RESTART_VEC) | ||
254 | ***********************************************************************/ | ||
255 | |||
256 | LEAF(bmips_smp_int_vec) | ||
257 | |||
258 | .align 4 | ||
259 | mfc0 k0, CP0_STATUS | ||
260 | ori k0, 0x01 | ||
261 | xori k0, 0x01 | ||
262 | mtc0 k0, CP0_STATUS | ||
263 | eret | ||
264 | |||
265 | .align 4 | ||
266 | .global bmips_smp_int_vec_end | ||
267 | bmips_smp_int_vec_end: | ||
268 | |||
269 | END(bmips_smp_int_vec) | ||
270 | |||
271 | /*********************************************************************** | ||
272 | * XKS01 support | ||
273 | * Certain CPUs support extending kseg0 to 1024MB. | ||
274 | ***********************************************************************/ | ||
275 | |||
276 | LEAF(bmips_enable_xks01) | ||
277 | |||
278 | #if defined(CONFIG_XKS01) | ||
279 | mfc0 t0, CP0_PRID | ||
280 | andi t2, t0, 0xff00 | ||
281 | #if defined(CONFIG_CPU_BMIPS4380) | ||
282 | li t1, PRID_IMP_BMIPS43XX | ||
283 | bne t2, t1, 1f | ||
284 | |||
285 | andi t0, 0xff | ||
286 | addiu t1, t0, -PRID_REV_BMIPS4380_HI | ||
287 | bgtz t1, 2f | ||
288 | addiu t0, -PRID_REV_BMIPS4380_LO | ||
289 | bltz t0, 2f | ||
290 | |||
291 | mfc0 t0, $22, 3 | ||
292 | li t1, 0x1ff0 | ||
293 | li t2, (1 << 12) | (1 << 9) | ||
294 | or t0, t1 | ||
295 | xor t0, t1 | ||
296 | or t0, t2 | ||
297 | mtc0 t0, $22, 3 | ||
298 | BARRIER | ||
299 | b 2f | ||
300 | 1: | ||
301 | #endif /* CONFIG_CPU_BMIPS4380 */ | ||
302 | #if defined(CONFIG_CPU_BMIPS5000) | ||
303 | li t1, PRID_IMP_BMIPS5000 | ||
304 | /* mask with PRID_IMP_BMIPS5000 to cover both variants */ | ||
305 | andi t2, PRID_IMP_BMIPS5000 | ||
306 | bne t2, t1, 2f | ||
307 | |||
308 | mfc0 t0, $22, 5 | ||
309 | li t1, 0x01ff | ||
310 | li t2, (1 << 8) | (1 << 5) | ||
311 | or t0, t1 | ||
312 | xor t0, t1 | ||
313 | or t0, t2 | ||
314 | mtc0 t0, $22, 5 | ||
315 | BARRIER | ||
316 | #endif /* CONFIG_CPU_BMIPS5000 */ | ||
317 | 2: | ||
318 | #endif /* defined(CONFIG_XKS01) */ | ||
319 | |||
320 | jr ra | ||
321 | |||
322 | END(bmips_enable_xks01) | ||
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c new file mode 100644 index 000000000..0216ff24c --- /dev/null +++ b/arch/mips/kernel/branch.c | |||
@@ -0,0 +1,908 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle | ||
7 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched/signal.h> | ||
11 | #include <linux/signal.h> | ||
12 | #include <linux/export.h> | ||
13 | #include <asm/branch.h> | ||
14 | #include <asm/cpu.h> | ||
15 | #include <asm/cpu-features.h> | ||
16 | #include <asm/fpu.h> | ||
17 | #include <asm/fpu_emulator.h> | ||
18 | #include <asm/inst.h> | ||
19 | #include <asm/mips-r2-to-r6-emul.h> | ||
20 | #include <asm/ptrace.h> | ||
21 | #include <linux/uaccess.h> | ||
22 | |||
23 | #include "probes-common.h" | ||
24 | |||
25 | /* | ||
26 | * Calculate and return exception PC in case of branch delay slot | ||
27 | * for microMIPS and MIPS16e. It does not clear the ISA mode bit. | ||
28 | */ | ||
29 | int __isa_exception_epc(struct pt_regs *regs) | ||
30 | { | ||
31 | unsigned short inst; | ||
32 | long epc = regs->cp0_epc; | ||
33 | |||
34 | /* Calculate exception PC in branch delay slot. */ | ||
35 | if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) { | ||
36 | /* This should never happen because delay slot was checked. */ | ||
37 | force_sig(SIGSEGV); | ||
38 | return epc; | ||
39 | } | ||
40 | if (cpu_has_mips16) { | ||
41 | union mips16e_instruction inst_mips16e; | ||
42 | |||
43 | inst_mips16e.full = inst; | ||
44 | if (inst_mips16e.ri.opcode == MIPS16e_jal_op) | ||
45 | epc += 4; | ||
46 | else | ||
47 | epc += 2; | ||
48 | } else if (mm_insn_16bit(inst)) | ||
49 | epc += 2; | ||
50 | else | ||
51 | epc += 4; | ||
52 | |||
53 | return epc; | ||
54 | } | ||
55 | |||
56 | /* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */ | ||
57 | static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7}; | ||
58 | |||
59 | int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn, | ||
60 | unsigned long *contpc) | ||
61 | { | ||
62 | union mips_instruction insn = (union mips_instruction)dec_insn.insn; | ||
63 | int __maybe_unused bc_false = 0; | ||
64 | |||
65 | if (!cpu_has_mmips) | ||
66 | return 0; | ||
67 | |||
68 | switch (insn.mm_i_format.opcode) { | ||
69 | case mm_pool32a_op: | ||
70 | if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) == | ||
71 | mm_pool32axf_op) { | ||
72 | switch (insn.mm_i_format.simmediate >> | ||
73 | MM_POOL32A_MINOR_SHIFT) { | ||
74 | case mm_jalr_op: | ||
75 | case mm_jalrhb_op: | ||
76 | case mm_jalrs_op: | ||
77 | case mm_jalrshb_op: | ||
78 | if (insn.mm_i_format.rt != 0) /* Not mm_jr */ | ||
79 | regs->regs[insn.mm_i_format.rt] = | ||
80 | regs->cp0_epc + | ||
81 | dec_insn.pc_inc + | ||
82 | dec_insn.next_pc_inc; | ||
83 | *contpc = regs->regs[insn.mm_i_format.rs]; | ||
84 | return 1; | ||
85 | } | ||
86 | } | ||
87 | break; | ||
88 | case mm_pool32i_op: | ||
89 | switch (insn.mm_i_format.rt) { | ||
90 | case mm_bltzals_op: | ||
91 | case mm_bltzal_op: | ||
92 | regs->regs[31] = regs->cp0_epc + | ||
93 | dec_insn.pc_inc + | ||
94 | dec_insn.next_pc_inc; | ||
95 | fallthrough; | ||
96 | case mm_bltz_op: | ||
97 | if ((long)regs->regs[insn.mm_i_format.rs] < 0) | ||
98 | *contpc = regs->cp0_epc + | ||
99 | dec_insn.pc_inc + | ||
100 | (insn.mm_i_format.simmediate << 1); | ||
101 | else | ||
102 | *contpc = regs->cp0_epc + | ||
103 | dec_insn.pc_inc + | ||
104 | dec_insn.next_pc_inc; | ||
105 | return 1; | ||
106 | case mm_bgezals_op: | ||
107 | case mm_bgezal_op: | ||
108 | regs->regs[31] = regs->cp0_epc + | ||
109 | dec_insn.pc_inc + | ||
110 | dec_insn.next_pc_inc; | ||
111 | fallthrough; | ||
112 | case mm_bgez_op: | ||
113 | if ((long)regs->regs[insn.mm_i_format.rs] >= 0) | ||
114 | *contpc = regs->cp0_epc + | ||
115 | dec_insn.pc_inc + | ||
116 | (insn.mm_i_format.simmediate << 1); | ||
117 | else | ||
118 | *contpc = regs->cp0_epc + | ||
119 | dec_insn.pc_inc + | ||
120 | dec_insn.next_pc_inc; | ||
121 | return 1; | ||
122 | case mm_blez_op: | ||
123 | if ((long)regs->regs[insn.mm_i_format.rs] <= 0) | ||
124 | *contpc = regs->cp0_epc + | ||
125 | dec_insn.pc_inc + | ||
126 | (insn.mm_i_format.simmediate << 1); | ||
127 | else | ||
128 | *contpc = regs->cp0_epc + | ||
129 | dec_insn.pc_inc + | ||
130 | dec_insn.next_pc_inc; | ||
131 | return 1; | ||
132 | case mm_bgtz_op: | ||
133 | if ((long)regs->regs[insn.mm_i_format.rs] <= 0) | ||
134 | *contpc = regs->cp0_epc + | ||
135 | dec_insn.pc_inc + | ||
136 | (insn.mm_i_format.simmediate << 1); | ||
137 | else | ||
138 | *contpc = regs->cp0_epc + | ||
139 | dec_insn.pc_inc + | ||
140 | dec_insn.next_pc_inc; | ||
141 | return 1; | ||
142 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
143 | case mm_bc2f_op: | ||
144 | case mm_bc1f_op: { | ||
145 | unsigned int fcr31; | ||
146 | unsigned int bit; | ||
147 | |||
148 | bc_false = 1; | ||
149 | fallthrough; | ||
150 | case mm_bc2t_op: | ||
151 | case mm_bc1t_op: | ||
152 | preempt_disable(); | ||
153 | if (is_fpu_owner()) | ||
154 | fcr31 = read_32bit_cp1_register(CP1_STATUS); | ||
155 | else | ||
156 | fcr31 = current->thread.fpu.fcr31; | ||
157 | preempt_enable(); | ||
158 | |||
159 | if (bc_false) | ||
160 | fcr31 = ~fcr31; | ||
161 | |||
162 | bit = (insn.mm_i_format.rs >> 2); | ||
163 | bit += (bit != 0); | ||
164 | bit += 23; | ||
165 | if (fcr31 & (1 << bit)) | ||
166 | *contpc = regs->cp0_epc + | ||
167 | dec_insn.pc_inc + | ||
168 | (insn.mm_i_format.simmediate << 1); | ||
169 | else | ||
170 | *contpc = regs->cp0_epc + | ||
171 | dec_insn.pc_inc + dec_insn.next_pc_inc; | ||
172 | return 1; | ||
173 | } | ||
174 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
175 | } | ||
176 | break; | ||
177 | case mm_pool16c_op: | ||
178 | switch (insn.mm_i_format.rt) { | ||
179 | case mm_jalr16_op: | ||
180 | case mm_jalrs16_op: | ||
181 | regs->regs[31] = regs->cp0_epc + | ||
182 | dec_insn.pc_inc + dec_insn.next_pc_inc; | ||
183 | fallthrough; | ||
184 | case mm_jr16_op: | ||
185 | *contpc = regs->regs[insn.mm_i_format.rs]; | ||
186 | return 1; | ||
187 | } | ||
188 | break; | ||
189 | case mm_beqz16_op: | ||
190 | if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0) | ||
191 | *contpc = regs->cp0_epc + | ||
192 | dec_insn.pc_inc + | ||
193 | (insn.mm_b1_format.simmediate << 1); | ||
194 | else | ||
195 | *contpc = regs->cp0_epc + | ||
196 | dec_insn.pc_inc + dec_insn.next_pc_inc; | ||
197 | return 1; | ||
198 | case mm_bnez16_op: | ||
199 | if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0) | ||
200 | *contpc = regs->cp0_epc + | ||
201 | dec_insn.pc_inc + | ||
202 | (insn.mm_b1_format.simmediate << 1); | ||
203 | else | ||
204 | *contpc = regs->cp0_epc + | ||
205 | dec_insn.pc_inc + dec_insn.next_pc_inc; | ||
206 | return 1; | ||
207 | case mm_b16_op: | ||
208 | *contpc = regs->cp0_epc + dec_insn.pc_inc + | ||
209 | (insn.mm_b0_format.simmediate << 1); | ||
210 | return 1; | ||
211 | case mm_beq32_op: | ||
212 | if (regs->regs[insn.mm_i_format.rs] == | ||
213 | regs->regs[insn.mm_i_format.rt]) | ||
214 | *contpc = regs->cp0_epc + | ||
215 | dec_insn.pc_inc + | ||
216 | (insn.mm_i_format.simmediate << 1); | ||
217 | else | ||
218 | *contpc = regs->cp0_epc + | ||
219 | dec_insn.pc_inc + | ||
220 | dec_insn.next_pc_inc; | ||
221 | return 1; | ||
222 | case mm_bne32_op: | ||
223 | if (regs->regs[insn.mm_i_format.rs] != | ||
224 | regs->regs[insn.mm_i_format.rt]) | ||
225 | *contpc = regs->cp0_epc + | ||
226 | dec_insn.pc_inc + | ||
227 | (insn.mm_i_format.simmediate << 1); | ||
228 | else | ||
229 | *contpc = regs->cp0_epc + | ||
230 | dec_insn.pc_inc + dec_insn.next_pc_inc; | ||
231 | return 1; | ||
232 | case mm_jalx32_op: | ||
233 | regs->regs[31] = regs->cp0_epc + | ||
234 | dec_insn.pc_inc + dec_insn.next_pc_inc; | ||
235 | *contpc = regs->cp0_epc + dec_insn.pc_inc; | ||
236 | *contpc >>= 28; | ||
237 | *contpc <<= 28; | ||
238 | *contpc |= (insn.j_format.target << 2); | ||
239 | return 1; | ||
240 | case mm_jals32_op: | ||
241 | case mm_jal32_op: | ||
242 | regs->regs[31] = regs->cp0_epc + | ||
243 | dec_insn.pc_inc + dec_insn.next_pc_inc; | ||
244 | fallthrough; | ||
245 | case mm_j32_op: | ||
246 | *contpc = regs->cp0_epc + dec_insn.pc_inc; | ||
247 | *contpc >>= 27; | ||
248 | *contpc <<= 27; | ||
249 | *contpc |= (insn.j_format.target << 1); | ||
250 | set_isa16_mode(*contpc); | ||
251 | return 1; | ||
252 | } | ||
253 | return 0; | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Compute return address and emulate branch in microMIPS mode after an | ||
258 | * exception only. It does not handle compact branches/jumps and cannot | ||
259 | * be used in interrupt context. (Compact branches/jumps do not cause | ||
260 | * exceptions.) | ||
261 | */ | ||
262 | int __microMIPS_compute_return_epc(struct pt_regs *regs) | ||
263 | { | ||
264 | u16 __user *pc16; | ||
265 | u16 halfword; | ||
266 | unsigned int word; | ||
267 | unsigned long contpc; | ||
268 | struct mm_decoded_insn mminsn = { 0 }; | ||
269 | |||
270 | mminsn.micro_mips_mode = 1; | ||
271 | |||
272 | /* This load never faults. */ | ||
273 | pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); | ||
274 | __get_user(halfword, pc16); | ||
275 | pc16++; | ||
276 | contpc = regs->cp0_epc + 2; | ||
277 | word = ((unsigned int)halfword << 16); | ||
278 | mminsn.pc_inc = 2; | ||
279 | |||
280 | if (!mm_insn_16bit(halfword)) { | ||
281 | __get_user(halfword, pc16); | ||
282 | pc16++; | ||
283 | contpc = regs->cp0_epc + 4; | ||
284 | mminsn.pc_inc = 4; | ||
285 | word |= halfword; | ||
286 | } | ||
287 | mminsn.insn = word; | ||
288 | |||
289 | if (get_user(halfword, pc16)) | ||
290 | goto sigsegv; | ||
291 | mminsn.next_pc_inc = 2; | ||
292 | word = ((unsigned int)halfword << 16); | ||
293 | |||
294 | if (!mm_insn_16bit(halfword)) { | ||
295 | pc16++; | ||
296 | if (get_user(halfword, pc16)) | ||
297 | goto sigsegv; | ||
298 | mminsn.next_pc_inc = 4; | ||
299 | word |= halfword; | ||
300 | } | ||
301 | mminsn.next_insn = word; | ||
302 | |||
303 | mm_isBranchInstr(regs, mminsn, &contpc); | ||
304 | |||
305 | regs->cp0_epc = contpc; | ||
306 | |||
307 | return 0; | ||
308 | |||
309 | sigsegv: | ||
310 | force_sig(SIGSEGV); | ||
311 | return -EFAULT; | ||
312 | } | ||
313 | |||
314 | /* | ||
315 | * Compute return address and emulate branch in MIPS16e mode after an | ||
316 | * exception only. It does not handle compact branches/jumps and cannot | ||
317 | * be used in interrupt context. (Compact branches/jumps do not cause | ||
318 | * exceptions.) | ||
319 | */ | ||
320 | int __MIPS16e_compute_return_epc(struct pt_regs *regs) | ||
321 | { | ||
322 | u16 __user *addr; | ||
323 | union mips16e_instruction inst; | ||
324 | u16 inst2; | ||
325 | u32 fullinst; | ||
326 | long epc; | ||
327 | |||
328 | epc = regs->cp0_epc; | ||
329 | |||
330 | /* Read the instruction. */ | ||
331 | addr = (u16 __user *)msk_isa16_mode(epc); | ||
332 | if (__get_user(inst.full, addr)) { | ||
333 | force_sig(SIGSEGV); | ||
334 | return -EFAULT; | ||
335 | } | ||
336 | |||
337 | switch (inst.ri.opcode) { | ||
338 | case MIPS16e_extend_op: | ||
339 | regs->cp0_epc += 4; | ||
340 | return 0; | ||
341 | |||
342 | /* | ||
343 | * JAL and JALX in MIPS16e mode | ||
344 | */ | ||
345 | case MIPS16e_jal_op: | ||
346 | addr += 1; | ||
347 | if (__get_user(inst2, addr)) { | ||
348 | force_sig(SIGSEGV); | ||
349 | return -EFAULT; | ||
350 | } | ||
351 | fullinst = ((unsigned)inst.full << 16) | inst2; | ||
352 | regs->regs[31] = epc + 6; | ||
353 | epc += 4; | ||
354 | epc >>= 28; | ||
355 | epc <<= 28; | ||
356 | /* | ||
357 | * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16 | ||
358 | * | ||
359 | * ......TARGET[15:0].................TARGET[20:16]........... | ||
360 | * ......TARGET[25:21] | ||
361 | */ | ||
362 | epc |= | ||
363 | ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) | | ||
364 | ((fullinst & 0x1f0000) << 7); | ||
365 | if (!inst.jal.x) | ||
366 | set_isa16_mode(epc); /* Set ISA mode bit. */ | ||
367 | regs->cp0_epc = epc; | ||
368 | return 0; | ||
369 | |||
370 | /* | ||
371 | * J(AL)R(C) | ||
372 | */ | ||
373 | case MIPS16e_rr_op: | ||
374 | if (inst.rr.func == MIPS16e_jr_func) { | ||
375 | |||
376 | if (inst.rr.ra) | ||
377 | regs->cp0_epc = regs->regs[31]; | ||
378 | else | ||
379 | regs->cp0_epc = | ||
380 | regs->regs[reg16to32[inst.rr.rx]]; | ||
381 | |||
382 | if (inst.rr.l) { | ||
383 | if (inst.rr.nd) | ||
384 | regs->regs[31] = epc + 2; | ||
385 | else | ||
386 | regs->regs[31] = epc + 4; | ||
387 | } | ||
388 | return 0; | ||
389 | } | ||
390 | break; | ||
391 | } | ||
392 | |||
393 | /* | ||
394 | * All other cases have no branch delay slot and are 16-bits. | ||
395 | * Branches do not cause an exception. | ||
396 | */ | ||
397 | regs->cp0_epc += 2; | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | /** | ||
403 | * __compute_return_epc_for_insn - Computes the return address and do emulate | ||
404 | * branch simulation, if required. | ||
405 | * | ||
406 | * @regs: Pointer to pt_regs | ||
407 | * @insn: branch instruction to decode | ||
408 | * Return: -EFAULT on error and forces SIGILL, and on success | ||
409 | * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after | ||
410 | * evaluating the branch. | ||
411 | * | ||
412 | * MIPS R6 Compact branches and forbidden slots: | ||
413 | * Compact branches do not throw exceptions because they do | ||
414 | * not have delay slots. The forbidden slot instruction ($PC+4) | ||
415 | * is only executed if the branch was not taken. Otherwise the | ||
416 | * forbidden slot is skipped entirely. This means that the | ||
417 | * only possible reason to be here because of a MIPS R6 compact | ||
418 | * branch instruction is that the forbidden slot has thrown one. | ||
419 | * In that case the branch was not taken, so the EPC can be safely | ||
420 | * set to EPC + 8. | ||
421 | */ | ||
422 | int __compute_return_epc_for_insn(struct pt_regs *regs, | ||
423 | union mips_instruction insn) | ||
424 | { | ||
425 | long epc = regs->cp0_epc; | ||
426 | unsigned int dspcontrol; | ||
427 | int ret = 0; | ||
428 | |||
429 | switch (insn.i_format.opcode) { | ||
430 | /* | ||
431 | * jr and jalr are in r_format format. | ||
432 | */ | ||
433 | case spec_op: | ||
434 | switch (insn.r_format.func) { | ||
435 | case jalr_op: | ||
436 | regs->regs[insn.r_format.rd] = epc + 8; | ||
437 | fallthrough; | ||
438 | case jr_op: | ||
439 | if (NO_R6EMU && insn.r_format.func == jr_op) | ||
440 | goto sigill_r2r6; | ||
441 | regs->cp0_epc = regs->regs[insn.r_format.rs]; | ||
442 | break; | ||
443 | } | ||
444 | break; | ||
445 | |||
446 | /* | ||
447 | * This group contains: | ||
448 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | ||
449 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | ||
450 | */ | ||
451 | case bcond_op: | ||
452 | switch (insn.i_format.rt) { | ||
453 | case bltzl_op: | ||
454 | if (NO_R6EMU) | ||
455 | goto sigill_r2r6; | ||
456 | fallthrough; | ||
457 | case bltz_op: | ||
458 | if ((long)regs->regs[insn.i_format.rs] < 0) { | ||
459 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
460 | if (insn.i_format.rt == bltzl_op) | ||
461 | ret = BRANCH_LIKELY_TAKEN; | ||
462 | } else | ||
463 | epc += 8; | ||
464 | regs->cp0_epc = epc; | ||
465 | break; | ||
466 | |||
467 | case bgezl_op: | ||
468 | if (NO_R6EMU) | ||
469 | goto sigill_r2r6; | ||
470 | fallthrough; | ||
471 | case bgez_op: | ||
472 | if ((long)regs->regs[insn.i_format.rs] >= 0) { | ||
473 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
474 | if (insn.i_format.rt == bgezl_op) | ||
475 | ret = BRANCH_LIKELY_TAKEN; | ||
476 | } else | ||
477 | epc += 8; | ||
478 | regs->cp0_epc = epc; | ||
479 | break; | ||
480 | |||
481 | case bltzal_op: | ||
482 | case bltzall_op: | ||
483 | if (NO_R6EMU && (insn.i_format.rs || | ||
484 | insn.i_format.rt == bltzall_op)) | ||
485 | goto sigill_r2r6; | ||
486 | regs->regs[31] = epc + 8; | ||
487 | /* | ||
488 | * OK we are here either because we hit a NAL | ||
489 | * instruction or because we are emulating an | ||
490 | * old bltzal{,l} one. Let's figure out what the | ||
491 | * case really is. | ||
492 | */ | ||
493 | if (!insn.i_format.rs) { | ||
494 | /* | ||
495 | * NAL or BLTZAL with rs == 0 | ||
496 | * Doesn't matter if we are R6 or not. The | ||
497 | * result is the same | ||
498 | */ | ||
499 | regs->cp0_epc += 4 + | ||
500 | (insn.i_format.simmediate << 2); | ||
501 | break; | ||
502 | } | ||
503 | /* Now do the real thing for non-R6 BLTZAL{,L} */ | ||
504 | if ((long)regs->regs[insn.i_format.rs] < 0) { | ||
505 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
506 | if (insn.i_format.rt == bltzall_op) | ||
507 | ret = BRANCH_LIKELY_TAKEN; | ||
508 | } else | ||
509 | epc += 8; | ||
510 | regs->cp0_epc = epc; | ||
511 | break; | ||
512 | |||
513 | case bgezal_op: | ||
514 | case bgezall_op: | ||
515 | if (NO_R6EMU && (insn.i_format.rs || | ||
516 | insn.i_format.rt == bgezall_op)) | ||
517 | goto sigill_r2r6; | ||
518 | regs->regs[31] = epc + 8; | ||
519 | /* | ||
520 | * OK we are here either because we hit a BAL | ||
521 | * instruction or because we are emulating an | ||
522 | * old bgezal{,l} one. Let's figure out what the | ||
523 | * case really is. | ||
524 | */ | ||
525 | if (!insn.i_format.rs) { | ||
526 | /* | ||
527 | * BAL or BGEZAL with rs == 0 | ||
528 | * Doesn't matter if we are R6 or not. The | ||
529 | * result is the same | ||
530 | */ | ||
531 | regs->cp0_epc += 4 + | ||
532 | (insn.i_format.simmediate << 2); | ||
533 | break; | ||
534 | } | ||
535 | /* Now do the real thing for non-R6 BGEZAL{,L} */ | ||
536 | if ((long)regs->regs[insn.i_format.rs] >= 0) { | ||
537 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
538 | if (insn.i_format.rt == bgezall_op) | ||
539 | ret = BRANCH_LIKELY_TAKEN; | ||
540 | } else | ||
541 | epc += 8; | ||
542 | regs->cp0_epc = epc; | ||
543 | break; | ||
544 | |||
545 | case bposge32_op: | ||
546 | if (!cpu_has_dsp) | ||
547 | goto sigill_dsp; | ||
548 | |||
549 | dspcontrol = rddsp(0x01); | ||
550 | |||
551 | if (dspcontrol >= 32) { | ||
552 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
553 | } else | ||
554 | epc += 8; | ||
555 | regs->cp0_epc = epc; | ||
556 | break; | ||
557 | } | ||
558 | break; | ||
559 | |||
560 | /* | ||
561 | * These are unconditional and in j_format. | ||
562 | */ | ||
563 | case jalx_op: | ||
564 | case jal_op: | ||
565 | regs->regs[31] = regs->cp0_epc + 8; | ||
566 | fallthrough; | ||
567 | case j_op: | ||
568 | epc += 4; | ||
569 | epc >>= 28; | ||
570 | epc <<= 28; | ||
571 | epc |= (insn.j_format.target << 2); | ||
572 | regs->cp0_epc = epc; | ||
573 | if (insn.i_format.opcode == jalx_op) | ||
574 | set_isa16_mode(regs->cp0_epc); | ||
575 | break; | ||
576 | |||
577 | /* | ||
578 | * These are conditional and in i_format. | ||
579 | */ | ||
580 | case beql_op: | ||
581 | if (NO_R6EMU) | ||
582 | goto sigill_r2r6; | ||
583 | fallthrough; | ||
584 | case beq_op: | ||
585 | if (regs->regs[insn.i_format.rs] == | ||
586 | regs->regs[insn.i_format.rt]) { | ||
587 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
588 | if (insn.i_format.opcode == beql_op) | ||
589 | ret = BRANCH_LIKELY_TAKEN; | ||
590 | } else | ||
591 | epc += 8; | ||
592 | regs->cp0_epc = epc; | ||
593 | break; | ||
594 | |||
595 | case bnel_op: | ||
596 | if (NO_R6EMU) | ||
597 | goto sigill_r2r6; | ||
598 | fallthrough; | ||
599 | case bne_op: | ||
600 | if (regs->regs[insn.i_format.rs] != | ||
601 | regs->regs[insn.i_format.rt]) { | ||
602 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
603 | if (insn.i_format.opcode == bnel_op) | ||
604 | ret = BRANCH_LIKELY_TAKEN; | ||
605 | } else | ||
606 | epc += 8; | ||
607 | regs->cp0_epc = epc; | ||
608 | break; | ||
609 | |||
610 | case blezl_op: /* not really i_format */ | ||
611 | if (!insn.i_format.rt && NO_R6EMU) | ||
612 | goto sigill_r2r6; | ||
613 | fallthrough; | ||
614 | case blez_op: | ||
615 | /* | ||
616 | * Compact branches for R6 for the | ||
617 | * blez and blezl opcodes. | ||
618 | * BLEZ | rs = 0 | rt != 0 == BLEZALC | ||
619 | * BLEZ | rs = rt != 0 == BGEZALC | ||
620 | * BLEZ | rs != 0 | rt != 0 == BGEUC | ||
621 | * BLEZL | rs = 0 | rt != 0 == BLEZC | ||
622 | * BLEZL | rs = rt != 0 == BGEZC | ||
623 | * BLEZL | rs != 0 | rt != 0 == BGEC | ||
624 | * | ||
625 | * For real BLEZ{,L}, rt is always 0. | ||
626 | */ | ||
627 | |||
628 | if (cpu_has_mips_r6 && insn.i_format.rt) { | ||
629 | if ((insn.i_format.opcode == blez_op) && | ||
630 | ((!insn.i_format.rs && insn.i_format.rt) || | ||
631 | (insn.i_format.rs == insn.i_format.rt))) | ||
632 | regs->regs[31] = epc + 4; | ||
633 | regs->cp0_epc += 8; | ||
634 | break; | ||
635 | } | ||
636 | /* rt field assumed to be zero */ | ||
637 | if ((long)regs->regs[insn.i_format.rs] <= 0) { | ||
638 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
639 | if (insn.i_format.opcode == blezl_op) | ||
640 | ret = BRANCH_LIKELY_TAKEN; | ||
641 | } else | ||
642 | epc += 8; | ||
643 | regs->cp0_epc = epc; | ||
644 | break; | ||
645 | |||
646 | case bgtzl_op: | ||
647 | if (!insn.i_format.rt && NO_R6EMU) | ||
648 | goto sigill_r2r6; | ||
649 | fallthrough; | ||
650 | case bgtz_op: | ||
651 | /* | ||
652 | * Compact branches for R6 for the | ||
653 | * bgtz and bgtzl opcodes. | ||
654 | * BGTZ | rs = 0 | rt != 0 == BGTZALC | ||
655 | * BGTZ | rs = rt != 0 == BLTZALC | ||
656 | * BGTZ | rs != 0 | rt != 0 == BLTUC | ||
657 | * BGTZL | rs = 0 | rt != 0 == BGTZC | ||
658 | * BGTZL | rs = rt != 0 == BLTZC | ||
659 | * BGTZL | rs != 0 | rt != 0 == BLTC | ||
660 | * | ||
661 | * *ZALC varint for BGTZ &&& rt != 0 | ||
662 | * For real GTZ{,L}, rt is always 0. | ||
663 | */ | ||
664 | if (cpu_has_mips_r6 && insn.i_format.rt) { | ||
665 | if ((insn.i_format.opcode == blez_op) && | ||
666 | ((!insn.i_format.rs && insn.i_format.rt) || | ||
667 | (insn.i_format.rs == insn.i_format.rt))) | ||
668 | regs->regs[31] = epc + 4; | ||
669 | regs->cp0_epc += 8; | ||
670 | break; | ||
671 | } | ||
672 | |||
673 | /* rt field assumed to be zero */ | ||
674 | if ((long)regs->regs[insn.i_format.rs] > 0) { | ||
675 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
676 | if (insn.i_format.opcode == bgtzl_op) | ||
677 | ret = BRANCH_LIKELY_TAKEN; | ||
678 | } else | ||
679 | epc += 8; | ||
680 | regs->cp0_epc = epc; | ||
681 | break; | ||
682 | |||
683 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
684 | /* | ||
685 | * And now the FPA/cp1 branch instructions. | ||
686 | */ | ||
687 | case cop1_op: { | ||
688 | unsigned int bit, fcr31, reg; | ||
689 | |||
690 | if (cpu_has_mips_r6 && | ||
691 | ((insn.i_format.rs == bc1eqz_op) || | ||
692 | (insn.i_format.rs == bc1nez_op))) { | ||
693 | if (!init_fp_ctx(current)) | ||
694 | lose_fpu(1); | ||
695 | reg = insn.i_format.rt; | ||
696 | bit = get_fpr32(¤t->thread.fpu.fpr[reg], 0) & 0x1; | ||
697 | if (insn.i_format.rs == bc1eqz_op) | ||
698 | bit = !bit; | ||
699 | own_fpu(1); | ||
700 | if (bit) | ||
701 | epc = epc + 4 + | ||
702 | (insn.i_format.simmediate << 2); | ||
703 | else | ||
704 | epc += 8; | ||
705 | regs->cp0_epc = epc; | ||
706 | |||
707 | break; | ||
708 | } else { | ||
709 | |||
710 | preempt_disable(); | ||
711 | if (is_fpu_owner()) | ||
712 | fcr31 = read_32bit_cp1_register(CP1_STATUS); | ||
713 | else | ||
714 | fcr31 = current->thread.fpu.fcr31; | ||
715 | preempt_enable(); | ||
716 | |||
717 | bit = (insn.i_format.rt >> 2); | ||
718 | bit += (bit != 0); | ||
719 | bit += 23; | ||
720 | switch (insn.i_format.rt & 3) { | ||
721 | case 0: /* bc1f */ | ||
722 | case 2: /* bc1fl */ | ||
723 | if (~fcr31 & (1 << bit)) { | ||
724 | epc = epc + 4 + | ||
725 | (insn.i_format.simmediate << 2); | ||
726 | if (insn.i_format.rt == 2) | ||
727 | ret = BRANCH_LIKELY_TAKEN; | ||
728 | } else | ||
729 | epc += 8; | ||
730 | regs->cp0_epc = epc; | ||
731 | break; | ||
732 | |||
733 | case 1: /* bc1t */ | ||
734 | case 3: /* bc1tl */ | ||
735 | if (fcr31 & (1 << bit)) { | ||
736 | epc = epc + 4 + | ||
737 | (insn.i_format.simmediate << 2); | ||
738 | if (insn.i_format.rt == 3) | ||
739 | ret = BRANCH_LIKELY_TAKEN; | ||
740 | } else | ||
741 | epc += 8; | ||
742 | regs->cp0_epc = epc; | ||
743 | break; | ||
744 | } | ||
745 | break; | ||
746 | } | ||
747 | } | ||
748 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
749 | |||
750 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
751 | case lwc2_op: /* This is bbit0 on Octeon */ | ||
752 | if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) | ||
753 | == 0) | ||
754 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
755 | else | ||
756 | epc += 8; | ||
757 | regs->cp0_epc = epc; | ||
758 | break; | ||
759 | case ldc2_op: /* This is bbit032 on Octeon */ | ||
760 | if ((regs->regs[insn.i_format.rs] & | ||
761 | (1ull<<(insn.i_format.rt+32))) == 0) | ||
762 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
763 | else | ||
764 | epc += 8; | ||
765 | regs->cp0_epc = epc; | ||
766 | break; | ||
767 | case swc2_op: /* This is bbit1 on Octeon */ | ||
768 | if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt)) | ||
769 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
770 | else | ||
771 | epc += 8; | ||
772 | regs->cp0_epc = epc; | ||
773 | break; | ||
774 | case sdc2_op: /* This is bbit132 on Octeon */ | ||
775 | if (regs->regs[insn.i_format.rs] & | ||
776 | (1ull<<(insn.i_format.rt+32))) | ||
777 | epc = epc + 4 + (insn.i_format.simmediate << 2); | ||
778 | else | ||
779 | epc += 8; | ||
780 | regs->cp0_epc = epc; | ||
781 | break; | ||
782 | #else | ||
783 | case bc6_op: | ||
784 | /* Only valid for MIPS R6 */ | ||
785 | if (!cpu_has_mips_r6) | ||
786 | goto sigill_r6; | ||
787 | regs->cp0_epc += 8; | ||
788 | break; | ||
789 | case balc6_op: | ||
790 | if (!cpu_has_mips_r6) | ||
791 | goto sigill_r6; | ||
792 | /* Compact branch: BALC */ | ||
793 | regs->regs[31] = epc + 4; | ||
794 | epc += 4 + (insn.i_format.simmediate << 2); | ||
795 | regs->cp0_epc = epc; | ||
796 | break; | ||
797 | case pop66_op: | ||
798 | if (!cpu_has_mips_r6) | ||
799 | goto sigill_r6; | ||
800 | /* Compact branch: BEQZC || JIC */ | ||
801 | regs->cp0_epc += 8; | ||
802 | break; | ||
803 | case pop76_op: | ||
804 | if (!cpu_has_mips_r6) | ||
805 | goto sigill_r6; | ||
806 | /* Compact branch: BNEZC || JIALC */ | ||
807 | if (!insn.i_format.rs) { | ||
808 | /* JIALC: set $31/ra */ | ||
809 | regs->regs[31] = epc + 4; | ||
810 | } | ||
811 | regs->cp0_epc += 8; | ||
812 | break; | ||
813 | #endif | ||
814 | case pop10_op: | ||
815 | case pop30_op: | ||
816 | /* Only valid for MIPS R6 */ | ||
817 | if (!cpu_has_mips_r6) | ||
818 | goto sigill_r6; | ||
819 | /* | ||
820 | * Compact branches: | ||
821 | * bovc, beqc, beqzalc, bnvc, bnec, bnezlac | ||
822 | */ | ||
823 | if (insn.i_format.rt && !insn.i_format.rs) | ||
824 | regs->regs[31] = epc + 4; | ||
825 | regs->cp0_epc += 8; | ||
826 | break; | ||
827 | } | ||
828 | |||
829 | return ret; | ||
830 | |||
831 | sigill_dsp: | ||
832 | pr_debug("%s: DSP branch but not DSP ASE - sending SIGILL.\n", | ||
833 | current->comm); | ||
834 | force_sig(SIGILL); | ||
835 | return -EFAULT; | ||
836 | sigill_r2r6: | ||
837 | pr_debug("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n", | ||
838 | current->comm); | ||
839 | force_sig(SIGILL); | ||
840 | return -EFAULT; | ||
841 | sigill_r6: | ||
842 | pr_debug("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n", | ||
843 | current->comm); | ||
844 | force_sig(SIGILL); | ||
845 | return -EFAULT; | ||
846 | } | ||
847 | EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn); | ||
848 | |||
849 | int __compute_return_epc(struct pt_regs *regs) | ||
850 | { | ||
851 | unsigned int __user *addr; | ||
852 | long epc; | ||
853 | union mips_instruction insn; | ||
854 | |||
855 | epc = regs->cp0_epc; | ||
856 | if (epc & 3) | ||
857 | goto unaligned; | ||
858 | |||
859 | /* | ||
860 | * Read the instruction | ||
861 | */ | ||
862 | addr = (unsigned int __user *) epc; | ||
863 | if (__get_user(insn.word, addr)) { | ||
864 | force_sig(SIGSEGV); | ||
865 | return -EFAULT; | ||
866 | } | ||
867 | |||
868 | return __compute_return_epc_for_insn(regs, insn); | ||
869 | |||
870 | unaligned: | ||
871 | printk("%s: unaligned epc - sending SIGBUS.\n", current->comm); | ||
872 | force_sig(SIGBUS); | ||
873 | return -EFAULT; | ||
874 | } | ||
875 | |||
876 | #if (defined CONFIG_KPROBES) || (defined CONFIG_UPROBES) | ||
877 | |||
878 | int __insn_is_compact_branch(union mips_instruction insn) | ||
879 | { | ||
880 | if (!cpu_has_mips_r6) | ||
881 | return 0; | ||
882 | |||
883 | switch (insn.i_format.opcode) { | ||
884 | case blezl_op: | ||
885 | case bgtzl_op: | ||
886 | case blez_op: | ||
887 | case bgtz_op: | ||
888 | /* | ||
889 | * blez[l] and bgtz[l] opcodes with non-zero rt | ||
890 | * are MIPS R6 compact branches | ||
891 | */ | ||
892 | if (insn.i_format.rt) | ||
893 | return 1; | ||
894 | break; | ||
895 | case bc6_op: | ||
896 | case balc6_op: | ||
897 | case pop10_op: | ||
898 | case pop30_op: | ||
899 | case pop66_op: | ||
900 | case pop76_op: | ||
901 | return 1; | ||
902 | } | ||
903 | |||
904 | return 0; | ||
905 | } | ||
906 | EXPORT_SYMBOL_GPL(__insn_is_compact_branch); | ||
907 | |||
908 | #endif /* CONFIG_KPROBES || CONFIG_UPROBES */ | ||
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c new file mode 100644 index 000000000..529dab855 --- /dev/null +++ b/arch/mips/kernel/cacheinfo.c | |||
@@ -0,0 +1,100 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * MIPS cacheinfo support | ||
4 | */ | ||
5 | #include <linux/cacheinfo.h> | ||
6 | |||
7 | /* Populates leaf and increments to next leaf */ | ||
8 | #define populate_cache(cache, leaf, c_level, c_type) \ | ||
9 | do { \ | ||
10 | leaf->type = c_type; \ | ||
11 | leaf->level = c_level; \ | ||
12 | leaf->coherency_line_size = c->cache.linesz; \ | ||
13 | leaf->number_of_sets = c->cache.sets; \ | ||
14 | leaf->ways_of_associativity = c->cache.ways; \ | ||
15 | leaf->size = c->cache.linesz * c->cache.sets * \ | ||
16 | c->cache.ways; \ | ||
17 | leaf++; \ | ||
18 | } while (0) | ||
19 | |||
20 | int init_cache_level(unsigned int cpu) | ||
21 | { | ||
22 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
23 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | ||
24 | int levels = 0, leaves = 0; | ||
25 | |||
26 | /* | ||
27 | * If Dcache is not set, we assume the cache structures | ||
28 | * are not properly initialized. | ||
29 | */ | ||
30 | if (c->dcache.waysize) | ||
31 | levels += 1; | ||
32 | else | ||
33 | return -ENOENT; | ||
34 | |||
35 | |||
36 | leaves += (c->icache.waysize) ? 2 : 1; | ||
37 | |||
38 | if (c->scache.waysize) { | ||
39 | levels++; | ||
40 | leaves++; | ||
41 | } | ||
42 | |||
43 | if (c->tcache.waysize) { | ||
44 | levels++; | ||
45 | leaves++; | ||
46 | } | ||
47 | |||
48 | this_cpu_ci->num_levels = levels; | ||
49 | this_cpu_ci->num_leaves = leaves; | ||
50 | return 0; | ||
51 | } | ||
52 | |||
53 | static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map) | ||
54 | { | ||
55 | int cpu1; | ||
56 | |||
57 | for_each_possible_cpu(cpu1) | ||
58 | if (cpus_are_siblings(cpu, cpu1)) | ||
59 | cpumask_set_cpu(cpu1, cpu_map); | ||
60 | } | ||
61 | |||
62 | static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map) | ||
63 | { | ||
64 | int cpu1; | ||
65 | int cluster = cpu_cluster(&cpu_data[cpu]); | ||
66 | |||
67 | for_each_possible_cpu(cpu1) | ||
68 | if (cpu_cluster(&cpu_data[cpu1]) == cluster) | ||
69 | cpumask_set_cpu(cpu1, cpu_map); | ||
70 | } | ||
71 | |||
72 | int populate_cache_leaves(unsigned int cpu) | ||
73 | { | ||
74 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
75 | struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu); | ||
76 | struct cacheinfo *this_leaf = this_cpu_ci->info_list; | ||
77 | |||
78 | if (c->icache.waysize) { | ||
79 | /* L1 caches are per core */ | ||
80 | fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); | ||
81 | populate_cache(dcache, this_leaf, 1, CACHE_TYPE_DATA); | ||
82 | fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map); | ||
83 | populate_cache(icache, this_leaf, 1, CACHE_TYPE_INST); | ||
84 | } else { | ||
85 | populate_cache(dcache, this_leaf, 1, CACHE_TYPE_UNIFIED); | ||
86 | } | ||
87 | |||
88 | if (c->scache.waysize) { | ||
89 | /* L2 cache is per cluster */ | ||
90 | fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map); | ||
91 | populate_cache(scache, this_leaf, 2, CACHE_TYPE_UNIFIED); | ||
92 | } | ||
93 | |||
94 | if (c->tcache.waysize) | ||
95 | populate_cache(tcache, this_leaf, 3, CACHE_TYPE_UNIFIED); | ||
96 | |||
97 | this_cpu_ci->cpu_map_populated = true; | ||
98 | |||
99 | return 0; | ||
100 | } | ||
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c new file mode 100644 index 000000000..d39a2963b --- /dev/null +++ b/arch/mips/kernel/cevt-bcm1480.c | |||
@@ -0,0 +1,138 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2000,2001,2004 Broadcom Corporation | ||
4 | */ | ||
5 | #include <linux/clockchips.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/percpu.h> | ||
8 | #include <linux/smp.h> | ||
9 | #include <linux/irq.h> | ||
10 | |||
11 | #include <asm/addrspace.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/time.h> | ||
14 | |||
15 | #include <asm/sibyte/bcm1480_regs.h> | ||
16 | #include <asm/sibyte/sb1250_regs.h> | ||
17 | #include <asm/sibyte/bcm1480_int.h> | ||
18 | #include <asm/sibyte/bcm1480_scd.h> | ||
19 | |||
20 | #include <asm/sibyte/sb1250.h> | ||
21 | |||
22 | #define IMR_IP2_VAL K_BCM1480_INT_MAP_I0 | ||
23 | #define IMR_IP3_VAL K_BCM1480_INT_MAP_I1 | ||
24 | #define IMR_IP4_VAL K_BCM1480_INT_MAP_I2 | ||
25 | |||
26 | /* | ||
27 | * The general purpose timer ticks at 1MHz independent if | ||
28 | * the rest of the system | ||
29 | */ | ||
30 | |||
31 | static int sibyte_set_periodic(struct clock_event_device *evt) | ||
32 | { | ||
33 | unsigned int cpu = smp_processor_id(); | ||
34 | void __iomem *cfg, *init; | ||
35 | |||
36 | cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); | ||
37 | init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); | ||
38 | |||
39 | __raw_writeq(0, cfg); | ||
40 | __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init); | ||
41 | __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg); | ||
42 | return 0; | ||
43 | } | ||
44 | |||
45 | static int sibyte_shutdown(struct clock_event_device *evt) | ||
46 | { | ||
47 | unsigned int cpu = smp_processor_id(); | ||
48 | void __iomem *cfg; | ||
49 | |||
50 | cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); | ||
51 | |||
52 | /* Stop the timer until we actually program a shot */ | ||
53 | __raw_writeq(0, cfg); | ||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd) | ||
58 | { | ||
59 | unsigned int cpu = smp_processor_id(); | ||
60 | void __iomem *cfg, *init; | ||
61 | |||
62 | cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); | ||
63 | init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); | ||
64 | |||
65 | __raw_writeq(0, cfg); | ||
66 | __raw_writeq(delta - 1, init); | ||
67 | __raw_writeq(M_SCD_TIMER_ENABLE, cfg); | ||
68 | |||
69 | return 0; | ||
70 | } | ||
71 | |||
72 | static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) | ||
73 | { | ||
74 | unsigned int cpu = smp_processor_id(); | ||
75 | struct clock_event_device *cd = dev_id; | ||
76 | void __iomem *cfg; | ||
77 | unsigned long tmode; | ||
78 | |||
79 | if (clockevent_state_periodic(cd)) | ||
80 | tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS; | ||
81 | else | ||
82 | tmode = 0; | ||
83 | |||
84 | /* ACK interrupt */ | ||
85 | cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); | ||
86 | ____raw_writeq(tmode, cfg); | ||
87 | |||
88 | cd->event_handler(cd); | ||
89 | |||
90 | return IRQ_HANDLED; | ||
91 | } | ||
92 | |||
93 | static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); | ||
94 | static DEFINE_PER_CPU(char [18], sibyte_hpt_name); | ||
95 | |||
96 | void sb1480_clockevent_init(void) | ||
97 | { | ||
98 | unsigned int cpu = smp_processor_id(); | ||
99 | unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu; | ||
100 | struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); | ||
101 | unsigned char *name = per_cpu(sibyte_hpt_name, cpu); | ||
102 | unsigned long flags = IRQF_PERCPU | IRQF_TIMER; | ||
103 | |||
104 | BUG_ON(cpu > 3); /* Only have 4 general purpose timers */ | ||
105 | |||
106 | sprintf(name, "bcm1480-counter-%d", cpu); | ||
107 | cd->name = name; | ||
108 | cd->features = CLOCK_EVT_FEAT_PERIODIC | | ||
109 | CLOCK_EVT_FEAT_ONESHOT; | ||
110 | clockevent_set_clock(cd, V_SCD_TIMER_FREQ); | ||
111 | cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); | ||
112 | cd->max_delta_ticks = 0x7fffff; | ||
113 | cd->min_delta_ns = clockevent_delta2ns(2, cd); | ||
114 | cd->min_delta_ticks = 2; | ||
115 | cd->rating = 200; | ||
116 | cd->irq = irq; | ||
117 | cd->cpumask = cpumask_of(cpu); | ||
118 | cd->set_next_event = sibyte_next_event; | ||
119 | cd->set_state_shutdown = sibyte_shutdown; | ||
120 | cd->set_state_periodic = sibyte_set_periodic; | ||
121 | cd->set_state_oneshot = sibyte_shutdown; | ||
122 | clockevents_register_device(cd); | ||
123 | |||
124 | bcm1480_mask_irq(cpu, irq); | ||
125 | |||
126 | /* | ||
127 | * Map the timer interrupt to IP[4] of this cpu | ||
128 | */ | ||
129 | __raw_writeq(IMR_IP4_VAL, | ||
130 | IOADDR(A_BCM1480_IMR_REGISTER(cpu, | ||
131 | R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3))); | ||
132 | |||
133 | bcm1480_unmask_irq(cpu, irq); | ||
134 | |||
135 | irq_set_affinity(irq, cpumask_of(cpu)); | ||
136 | if (request_irq(irq, sibyte_counter_handler, flags, name, cd)) | ||
137 | pr_err("Failed to request irq %d (%s)\n", irq, name); | ||
138 | } | ||
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c new file mode 100644 index 000000000..9a47fbcd4 --- /dev/null +++ b/arch/mips/kernel/cevt-ds1287.c | |||
@@ -0,0 +1,121 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * DS1287 clockevent driver | ||
4 | * | ||
5 | * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> | ||
6 | */ | ||
7 | #include <linux/clockchips.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/mc146818rtc.h> | ||
11 | #include <linux/irq.h> | ||
12 | |||
13 | #include <asm/time.h> | ||
14 | |||
15 | int ds1287_timer_state(void) | ||
16 | { | ||
17 | return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0; | ||
18 | } | ||
19 | |||
20 | int ds1287_set_base_clock(unsigned int hz) | ||
21 | { | ||
22 | u8 rate; | ||
23 | |||
24 | switch (hz) { | ||
25 | case 128: | ||
26 | rate = 0x9; | ||
27 | break; | ||
28 | case 256: | ||
29 | rate = 0x8; | ||
30 | break; | ||
31 | case 1024: | ||
32 | rate = 0x6; | ||
33 | break; | ||
34 | default: | ||
35 | return -EINVAL; | ||
36 | } | ||
37 | |||
38 | CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A); | ||
39 | |||
40 | return 0; | ||
41 | } | ||
42 | |||
43 | static int ds1287_set_next_event(unsigned long delta, | ||
44 | struct clock_event_device *evt) | ||
45 | { | ||
46 | return -EINVAL; | ||
47 | } | ||
48 | |||
49 | static int ds1287_shutdown(struct clock_event_device *evt) | ||
50 | { | ||
51 | u8 val; | ||
52 | |||
53 | spin_lock(&rtc_lock); | ||
54 | |||
55 | val = CMOS_READ(RTC_REG_B); | ||
56 | val &= ~RTC_PIE; | ||
57 | CMOS_WRITE(val, RTC_REG_B); | ||
58 | |||
59 | spin_unlock(&rtc_lock); | ||
60 | return 0; | ||
61 | } | ||
62 | |||
63 | static int ds1287_set_periodic(struct clock_event_device *evt) | ||
64 | { | ||
65 | u8 val; | ||
66 | |||
67 | spin_lock(&rtc_lock); | ||
68 | |||
69 | val = CMOS_READ(RTC_REG_B); | ||
70 | val |= RTC_PIE; | ||
71 | CMOS_WRITE(val, RTC_REG_B); | ||
72 | |||
73 | spin_unlock(&rtc_lock); | ||
74 | return 0; | ||
75 | } | ||
76 | |||
77 | static void ds1287_event_handler(struct clock_event_device *dev) | ||
78 | { | ||
79 | } | ||
80 | |||
81 | static struct clock_event_device ds1287_clockevent = { | ||
82 | .name = "ds1287", | ||
83 | .features = CLOCK_EVT_FEAT_PERIODIC, | ||
84 | .set_next_event = ds1287_set_next_event, | ||
85 | .set_state_shutdown = ds1287_shutdown, | ||
86 | .set_state_periodic = ds1287_set_periodic, | ||
87 | .tick_resume = ds1287_shutdown, | ||
88 | .event_handler = ds1287_event_handler, | ||
89 | }; | ||
90 | |||
91 | static irqreturn_t ds1287_interrupt(int irq, void *dev_id) | ||
92 | { | ||
93 | struct clock_event_device *cd = &ds1287_clockevent; | ||
94 | |||
95 | /* Ack the RTC interrupt. */ | ||
96 | CMOS_READ(RTC_REG_C); | ||
97 | |||
98 | cd->event_handler(cd); | ||
99 | |||
100 | return IRQ_HANDLED; | ||
101 | } | ||
102 | |||
103 | int __init ds1287_clockevent_init(int irq) | ||
104 | { | ||
105 | unsigned long flags = IRQF_PERCPU | IRQF_TIMER; | ||
106 | struct clock_event_device *cd; | ||
107 | |||
108 | cd = &ds1287_clockevent; | ||
109 | cd->rating = 100; | ||
110 | cd->irq = irq; | ||
111 | clockevent_set_clock(cd, 32768); | ||
112 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
113 | cd->max_delta_ticks = 0x7fffffff; | ||
114 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
115 | cd->min_delta_ticks = 0x300; | ||
116 | cd->cpumask = cpumask_of(0); | ||
117 | |||
118 | clockevents_register_device(&ds1287_clockevent); | ||
119 | |||
120 | return request_irq(irq, ds1287_interrupt, flags, "ds1287", NULL); | ||
121 | } | ||
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c new file mode 100644 index 000000000..5b132e8c5 --- /dev/null +++ b/arch/mips/kernel/cevt-gt641xx.c | |||
@@ -0,0 +1,146 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * GT641xx clockevent routines. | ||
4 | * | ||
5 | * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> | ||
6 | */ | ||
7 | #include <linux/clockchips.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/irq.h> | ||
12 | |||
13 | #include <asm/gt64120.h> | ||
14 | #include <asm/time.h> | ||
15 | |||
16 | static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock); | ||
17 | static unsigned int gt641xx_base_clock; | ||
18 | |||
19 | void gt641xx_set_base_clock(unsigned int clock) | ||
20 | { | ||
21 | gt641xx_base_clock = clock; | ||
22 | } | ||
23 | |||
24 | int gt641xx_timer0_state(void) | ||
25 | { | ||
26 | if (GT_READ(GT_TC0_OFS)) | ||
27 | return 0; | ||
28 | |||
29 | GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ); | ||
30 | GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK); | ||
31 | |||
32 | return 1; | ||
33 | } | ||
34 | |||
35 | static int gt641xx_timer0_set_next_event(unsigned long delta, | ||
36 | struct clock_event_device *evt) | ||
37 | { | ||
38 | u32 ctrl; | ||
39 | |||
40 | raw_spin_lock(>641xx_timer_lock); | ||
41 | |||
42 | ctrl = GT_READ(GT_TC_CONTROL_OFS); | ||
43 | ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK); | ||
44 | ctrl |= GT_TC_CONTROL_ENTC0_MSK; | ||
45 | |||
46 | GT_WRITE(GT_TC0_OFS, delta); | ||
47 | GT_WRITE(GT_TC_CONTROL_OFS, ctrl); | ||
48 | |||
49 | raw_spin_unlock(>641xx_timer_lock); | ||
50 | |||
51 | return 0; | ||
52 | } | ||
53 | |||
54 | static int gt641xx_timer0_shutdown(struct clock_event_device *evt) | ||
55 | { | ||
56 | u32 ctrl; | ||
57 | |||
58 | raw_spin_lock(>641xx_timer_lock); | ||
59 | |||
60 | ctrl = GT_READ(GT_TC_CONTROL_OFS); | ||
61 | ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK); | ||
62 | GT_WRITE(GT_TC_CONTROL_OFS, ctrl); | ||
63 | |||
64 | raw_spin_unlock(>641xx_timer_lock); | ||
65 | return 0; | ||
66 | } | ||
67 | |||
68 | static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt) | ||
69 | { | ||
70 | u32 ctrl; | ||
71 | |||
72 | raw_spin_lock(>641xx_timer_lock); | ||
73 | |||
74 | ctrl = GT_READ(GT_TC_CONTROL_OFS); | ||
75 | ctrl &= ~GT_TC_CONTROL_SELTC0_MSK; | ||
76 | ctrl |= GT_TC_CONTROL_ENTC0_MSK; | ||
77 | GT_WRITE(GT_TC_CONTROL_OFS, ctrl); | ||
78 | |||
79 | raw_spin_unlock(>641xx_timer_lock); | ||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | static int gt641xx_timer0_set_periodic(struct clock_event_device *evt) | ||
84 | { | ||
85 | u32 ctrl; | ||
86 | |||
87 | raw_spin_lock(>641xx_timer_lock); | ||
88 | |||
89 | ctrl = GT_READ(GT_TC_CONTROL_OFS); | ||
90 | ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK; | ||
91 | GT_WRITE(GT_TC_CONTROL_OFS, ctrl); | ||
92 | |||
93 | raw_spin_unlock(>641xx_timer_lock); | ||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static void gt641xx_timer0_event_handler(struct clock_event_device *dev) | ||
98 | { | ||
99 | } | ||
100 | |||
101 | static struct clock_event_device gt641xx_timer0_clockevent = { | ||
102 | .name = "gt641xx-timer0", | ||
103 | .features = CLOCK_EVT_FEAT_PERIODIC | | ||
104 | CLOCK_EVT_FEAT_ONESHOT, | ||
105 | .irq = GT641XX_TIMER0_IRQ, | ||
106 | .set_next_event = gt641xx_timer0_set_next_event, | ||
107 | .set_state_shutdown = gt641xx_timer0_shutdown, | ||
108 | .set_state_periodic = gt641xx_timer0_set_periodic, | ||
109 | .set_state_oneshot = gt641xx_timer0_set_oneshot, | ||
110 | .tick_resume = gt641xx_timer0_shutdown, | ||
111 | .event_handler = gt641xx_timer0_event_handler, | ||
112 | }; | ||
113 | |||
114 | static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id) | ||
115 | { | ||
116 | struct clock_event_device *cd = >641xx_timer0_clockevent; | ||
117 | |||
118 | cd->event_handler(cd); | ||
119 | |||
120 | return IRQ_HANDLED; | ||
121 | } | ||
122 | |||
123 | static int __init gt641xx_timer0_clockevent_init(void) | ||
124 | { | ||
125 | struct clock_event_device *cd; | ||
126 | |||
127 | if (!gt641xx_base_clock) | ||
128 | return 0; | ||
129 | |||
130 | GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ); | ||
131 | |||
132 | cd = >641xx_timer0_clockevent; | ||
133 | cd->rating = 200 + gt641xx_base_clock / 10000000; | ||
134 | clockevent_set_clock(cd, gt641xx_base_clock); | ||
135 | cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd); | ||
136 | cd->max_delta_ticks = 0x7fffffff; | ||
137 | cd->min_delta_ns = clockevent_delta2ns(0x300, cd); | ||
138 | cd->min_delta_ticks = 0x300; | ||
139 | cd->cpumask = cpumask_of(0); | ||
140 | |||
141 | clockevents_register_device(>641xx_timer0_clockevent); | ||
142 | |||
143 | return request_irq(GT641XX_TIMER0_IRQ, gt641xx_timer0_interrupt, | ||
144 | IRQF_PERCPU | IRQF_TIMER, "gt641xx_timer0", NULL); | ||
145 | } | ||
146 | arch_initcall(gt641xx_timer0_clockevent_init); | ||
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c new file mode 100644 index 000000000..995ad9e69 --- /dev/null +++ b/arch/mips/kernel/cevt-r4k.c | |||
@@ -0,0 +1,345 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
7 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | #include <linux/clockchips.h> | ||
10 | #include <linux/interrupt.h> | ||
11 | #include <linux/cpufreq.h> | ||
12 | #include <linux/percpu.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/irq.h> | ||
15 | |||
16 | #include <asm/time.h> | ||
17 | #include <asm/cevt-r4k.h> | ||
18 | |||
19 | static int mips_next_event(unsigned long delta, | ||
20 | struct clock_event_device *evt) | ||
21 | { | ||
22 | unsigned int cnt; | ||
23 | int res; | ||
24 | |||
25 | cnt = read_c0_count(); | ||
26 | cnt += delta; | ||
27 | write_c0_compare(cnt); | ||
28 | res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0; | ||
29 | return res; | ||
30 | } | ||
31 | |||
32 | /** | ||
33 | * calculate_min_delta() - Calculate a good minimum delta for mips_next_event(). | ||
34 | * | ||
35 | * Running under virtualisation can introduce overhead into mips_next_event() in | ||
36 | * the form of hypervisor emulation of CP0_Count/CP0_Compare registers, | ||
37 | * potentially with an unnatural frequency, which makes a fixed min_delta_ns | ||
38 | * value inappropriate as it may be too small. | ||
39 | * | ||
40 | * It can also introduce occasional latency from the guest being descheduled. | ||
41 | * | ||
42 | * This function calculates a good minimum delta based roughly on the 75th | ||
43 | * percentile of the time taken to do the mips_next_event() sequence, in order | ||
44 | * to handle potentially higher overhead while also eliminating outliers due to | ||
45 | * unpredictable hypervisor latency (which can be handled by retries). | ||
46 | * | ||
47 | * Return: An appropriate minimum delta for the clock event device. | ||
48 | */ | ||
49 | static unsigned int calculate_min_delta(void) | ||
50 | { | ||
51 | unsigned int cnt, i, j, k, l; | ||
52 | unsigned int buf1[4], buf2[3]; | ||
53 | unsigned int min_delta; | ||
54 | |||
55 | /* | ||
56 | * Calculate the median of 5 75th percentiles of 5 samples of how long | ||
57 | * it takes to set CP0_Compare = CP0_Count + delta. | ||
58 | */ | ||
59 | for (i = 0; i < 5; ++i) { | ||
60 | for (j = 0; j < 5; ++j) { | ||
61 | /* | ||
62 | * This is like the code in mips_next_event(), and | ||
63 | * directly measures the borderline "safe" delta. | ||
64 | */ | ||
65 | cnt = read_c0_count(); | ||
66 | write_c0_compare(cnt); | ||
67 | cnt = read_c0_count() - cnt; | ||
68 | |||
69 | /* Sorted insert into buf1 */ | ||
70 | for (k = 0; k < j; ++k) { | ||
71 | if (cnt < buf1[k]) { | ||
72 | l = min_t(unsigned int, | ||
73 | j, ARRAY_SIZE(buf1) - 1); | ||
74 | for (; l > k; --l) | ||
75 | buf1[l] = buf1[l - 1]; | ||
76 | break; | ||
77 | } | ||
78 | } | ||
79 | if (k < ARRAY_SIZE(buf1)) | ||
80 | buf1[k] = cnt; | ||
81 | } | ||
82 | |||
83 | /* Sorted insert of 75th percentile into buf2 */ | ||
84 | for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) { | ||
85 | if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) { | ||
86 | l = min_t(unsigned int, | ||
87 | i, ARRAY_SIZE(buf2) - 1); | ||
88 | for (; l > k; --l) | ||
89 | buf2[l] = buf2[l - 1]; | ||
90 | break; | ||
91 | } | ||
92 | } | ||
93 | if (k < ARRAY_SIZE(buf2)) | ||
94 | buf2[k] = buf1[ARRAY_SIZE(buf1) - 1]; | ||
95 | } | ||
96 | |||
97 | /* Use 2 * median of 75th percentiles */ | ||
98 | min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2; | ||
99 | |||
100 | /* Don't go too low */ | ||
101 | if (min_delta < 0x300) | ||
102 | min_delta = 0x300; | ||
103 | |||
104 | pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n", | ||
105 | __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta); | ||
106 | return min_delta; | ||
107 | } | ||
108 | |||
109 | DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); | ||
110 | int cp0_timer_irq_installed; | ||
111 | |||
112 | /* | ||
113 | * Possibly handle a performance counter interrupt. | ||
114 | * Return true if the timer interrupt should not be checked | ||
115 | */ | ||
116 | static inline int handle_perf_irq(int r2) | ||
117 | { | ||
118 | /* | ||
119 | * The performance counter overflow interrupt may be shared with the | ||
120 | * timer interrupt (cp0_perfcount_irq < 0). If it is and a | ||
121 | * performance counter has overflowed (perf_irq() == IRQ_HANDLED) | ||
122 | * and we can't reliably determine if a counter interrupt has also | ||
123 | * happened (!r2) then don't check for a timer interrupt. | ||
124 | */ | ||
125 | return (cp0_perfcount_irq < 0) && | ||
126 | perf_irq() == IRQ_HANDLED && | ||
127 | !r2; | ||
128 | } | ||
129 | |||
130 | irqreturn_t c0_compare_interrupt(int irq, void *dev_id) | ||
131 | { | ||
132 | const int r2 = cpu_has_mips_r2_r6; | ||
133 | struct clock_event_device *cd; | ||
134 | int cpu = smp_processor_id(); | ||
135 | |||
136 | /* | ||
137 | * Suckage alert: | ||
138 | * Before R2 of the architecture there was no way to see if a | ||
139 | * performance counter interrupt was pending, so we have to run | ||
140 | * the performance counter interrupt handler anyway. | ||
141 | */ | ||
142 | if (handle_perf_irq(r2)) | ||
143 | return IRQ_HANDLED; | ||
144 | |||
145 | /* | ||
146 | * The same applies to performance counter interrupts. But with the | ||
147 | * above we now know that the reason we got here must be a timer | ||
148 | * interrupt. Being the paranoiacs we are we check anyway. | ||
149 | */ | ||
150 | if (!r2 || (read_c0_cause() & CAUSEF_TI)) { | ||
151 | /* Clear Count/Compare Interrupt */ | ||
152 | write_c0_compare(read_c0_compare()); | ||
153 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
154 | cd->event_handler(cd); | ||
155 | |||
156 | return IRQ_HANDLED; | ||
157 | } | ||
158 | |||
159 | return IRQ_NONE; | ||
160 | } | ||
161 | |||
162 | struct irqaction c0_compare_irqaction = { | ||
163 | .handler = c0_compare_interrupt, | ||
164 | /* | ||
165 | * IRQF_SHARED: The timer interrupt may be shared with other interrupts | ||
166 | * such as perf counter and FDC interrupts. | ||
167 | */ | ||
168 | .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED, | ||
169 | .name = "timer", | ||
170 | }; | ||
171 | |||
172 | |||
173 | void mips_event_handler(struct clock_event_device *dev) | ||
174 | { | ||
175 | } | ||
176 | |||
177 | /* | ||
178 | * FIXME: This doesn't hold for the relocated E9000 compare interrupt. | ||
179 | */ | ||
180 | static int c0_compare_int_pending(void) | ||
181 | { | ||
182 | /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */ | ||
183 | return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP); | ||
184 | } | ||
185 | |||
186 | /* | ||
187 | * Compare interrupt can be routed and latched outside the core, | ||
188 | * so wait up to worst case number of cycle counter ticks for timer interrupt | ||
189 | * changes to propagate to the cause register. | ||
190 | */ | ||
191 | #define COMPARE_INT_SEEN_TICKS 50 | ||
192 | |||
193 | int c0_compare_int_usable(void) | ||
194 | { | ||
195 | unsigned int delta; | ||
196 | unsigned int cnt; | ||
197 | |||
198 | #ifdef CONFIG_KVM_GUEST | ||
199 | return 1; | ||
200 | #endif | ||
201 | |||
202 | /* | ||
203 | * IP7 already pending? Try to clear it by acking the timer. | ||
204 | */ | ||
205 | if (c0_compare_int_pending()) { | ||
206 | cnt = read_c0_count(); | ||
207 | write_c0_compare(cnt); | ||
208 | back_to_back_c0_hazard(); | ||
209 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) | ||
210 | if (!c0_compare_int_pending()) | ||
211 | break; | ||
212 | if (c0_compare_int_pending()) | ||
213 | return 0; | ||
214 | } | ||
215 | |||
216 | for (delta = 0x10; delta <= 0x400000; delta <<= 1) { | ||
217 | cnt = read_c0_count(); | ||
218 | cnt += delta; | ||
219 | write_c0_compare(cnt); | ||
220 | back_to_back_c0_hazard(); | ||
221 | if ((int)(read_c0_count() - cnt) < 0) | ||
222 | break; | ||
223 | /* increase delta if the timer was already expired */ | ||
224 | } | ||
225 | |||
226 | while ((int)(read_c0_count() - cnt) <= 0) | ||
227 | ; /* Wait for expiry */ | ||
228 | |||
229 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) | ||
230 | if (c0_compare_int_pending()) | ||
231 | break; | ||
232 | if (!c0_compare_int_pending()) | ||
233 | return 0; | ||
234 | cnt = read_c0_count(); | ||
235 | write_c0_compare(cnt); | ||
236 | back_to_back_c0_hazard(); | ||
237 | while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS)) | ||
238 | if (!c0_compare_int_pending()) | ||
239 | break; | ||
240 | if (c0_compare_int_pending()) | ||
241 | return 0; | ||
242 | |||
243 | /* | ||
244 | * Feels like a real count / compare timer. | ||
245 | */ | ||
246 | return 1; | ||
247 | } | ||
248 | |||
249 | unsigned int __weak get_c0_compare_int(void) | ||
250 | { | ||
251 | return MIPS_CPU_IRQ_BASE + cp0_compare_irq; | ||
252 | } | ||
253 | |||
254 | #ifdef CONFIG_CPU_FREQ | ||
255 | |||
256 | static unsigned long mips_ref_freq; | ||
257 | |||
258 | static int r4k_cpufreq_callback(struct notifier_block *nb, | ||
259 | unsigned long val, void *data) | ||
260 | { | ||
261 | struct cpufreq_freqs *freq = data; | ||
262 | struct clock_event_device *cd; | ||
263 | unsigned long rate; | ||
264 | int cpu; | ||
265 | |||
266 | if (!mips_ref_freq) | ||
267 | mips_ref_freq = freq->old; | ||
268 | |||
269 | if (val == CPUFREQ_POSTCHANGE) { | ||
270 | rate = cpufreq_scale(mips_hpt_frequency, mips_ref_freq, | ||
271 | freq->new); | ||
272 | |||
273 | for_each_cpu(cpu, freq->policy->cpus) { | ||
274 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
275 | |||
276 | clockevents_update_freq(cd, rate); | ||
277 | } | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | static struct notifier_block r4k_cpufreq_notifier = { | ||
284 | .notifier_call = r4k_cpufreq_callback, | ||
285 | }; | ||
286 | |||
287 | static int __init r4k_register_cpufreq_notifier(void) | ||
288 | { | ||
289 | return cpufreq_register_notifier(&r4k_cpufreq_notifier, | ||
290 | CPUFREQ_TRANSITION_NOTIFIER); | ||
291 | |||
292 | } | ||
293 | core_initcall(r4k_register_cpufreq_notifier); | ||
294 | |||
295 | #endif /* !CONFIG_CPU_FREQ */ | ||
296 | |||
297 | int r4k_clockevent_init(void) | ||
298 | { | ||
299 | unsigned long flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED; | ||
300 | unsigned int cpu = smp_processor_id(); | ||
301 | struct clock_event_device *cd; | ||
302 | unsigned int irq, min_delta; | ||
303 | |||
304 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
305 | return -ENXIO; | ||
306 | |||
307 | if (!c0_compare_int_usable()) | ||
308 | return -ENXIO; | ||
309 | |||
310 | /* | ||
311 | * With vectored interrupts things are getting platform specific. | ||
312 | * get_c0_compare_int is a hook to allow a platform to return the | ||
313 | * interrupt number of its liking. | ||
314 | */ | ||
315 | irq = get_c0_compare_int(); | ||
316 | |||
317 | cd = &per_cpu(mips_clockevent_device, cpu); | ||
318 | |||
319 | cd->name = "MIPS"; | ||
320 | cd->features = CLOCK_EVT_FEAT_ONESHOT | | ||
321 | CLOCK_EVT_FEAT_C3STOP | | ||
322 | CLOCK_EVT_FEAT_PERCPU; | ||
323 | |||
324 | min_delta = calculate_min_delta(); | ||
325 | |||
326 | cd->rating = 300; | ||
327 | cd->irq = irq; | ||
328 | cd->cpumask = cpumask_of(cpu); | ||
329 | cd->set_next_event = mips_next_event; | ||
330 | cd->event_handler = mips_event_handler; | ||
331 | |||
332 | clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff); | ||
333 | |||
334 | if (cp0_timer_irq_installed) | ||
335 | return 0; | ||
336 | |||
337 | cp0_timer_irq_installed = 1; | ||
338 | |||
339 | if (request_irq(irq, c0_compare_interrupt, flags, "timer", | ||
340 | c0_compare_interrupt)) | ||
341 | pr_err("Failed to request irq %d (timer)\n", irq); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c new file mode 100644 index 000000000..0451273fa --- /dev/null +++ b/arch/mips/kernel/cevt-sb1250.c | |||
@@ -0,0 +1,138 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2000, 2001 Broadcom Corporation | ||
4 | */ | ||
5 | #include <linux/clockchips.h> | ||
6 | #include <linux/interrupt.h> | ||
7 | #include <linux/irq.h> | ||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/smp.h> | ||
10 | |||
11 | #include <asm/addrspace.h> | ||
12 | #include <asm/io.h> | ||
13 | #include <asm/time.h> | ||
14 | |||
15 | #include <asm/sibyte/sb1250.h> | ||
16 | #include <asm/sibyte/sb1250_regs.h> | ||
17 | #include <asm/sibyte/sb1250_int.h> | ||
18 | #include <asm/sibyte/sb1250_scd.h> | ||
19 | |||
20 | #define IMR_IP2_VAL K_INT_MAP_I0 | ||
21 | #define IMR_IP3_VAL K_INT_MAP_I1 | ||
22 | #define IMR_IP4_VAL K_INT_MAP_I2 | ||
23 | |||
24 | /* | ||
25 | * The general purpose timer ticks at 1MHz independent if | ||
26 | * the rest of the system | ||
27 | */ | ||
28 | |||
29 | static int sibyte_shutdown(struct clock_event_device *evt) | ||
30 | { | ||
31 | void __iomem *cfg; | ||
32 | |||
33 | cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG)); | ||
34 | |||
35 | /* Stop the timer until we actually program a shot */ | ||
36 | __raw_writeq(0, cfg); | ||
37 | |||
38 | return 0; | ||
39 | } | ||
40 | |||
41 | static int sibyte_set_periodic(struct clock_event_device *evt) | ||
42 | { | ||
43 | unsigned int cpu = smp_processor_id(); | ||
44 | void __iomem *cfg, *init; | ||
45 | |||
46 | cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); | ||
47 | init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); | ||
48 | |||
49 | __raw_writeq(0, cfg); | ||
50 | __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init); | ||
51 | __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg); | ||
52 | |||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd) | ||
57 | { | ||
58 | unsigned int cpu = smp_processor_id(); | ||
59 | void __iomem *cfg, *init; | ||
60 | |||
61 | cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); | ||
62 | init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT)); | ||
63 | |||
64 | __raw_writeq(0, cfg); | ||
65 | __raw_writeq(delta - 1, init); | ||
66 | __raw_writeq(M_SCD_TIMER_ENABLE, cfg); | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | |||
71 | static irqreturn_t sibyte_counter_handler(int irq, void *dev_id) | ||
72 | { | ||
73 | unsigned int cpu = smp_processor_id(); | ||
74 | struct clock_event_device *cd = dev_id; | ||
75 | void __iomem *cfg; | ||
76 | unsigned long tmode; | ||
77 | |||
78 | if (clockevent_state_periodic(cd)) | ||
79 | tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS; | ||
80 | else | ||
81 | tmode = 0; | ||
82 | |||
83 | /* ACK interrupt */ | ||
84 | cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG)); | ||
85 | ____raw_writeq(tmode, cfg); | ||
86 | |||
87 | cd->event_handler(cd); | ||
88 | |||
89 | return IRQ_HANDLED; | ||
90 | } | ||
91 | |||
92 | static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent); | ||
93 | static DEFINE_PER_CPU(char [18], sibyte_hpt_name); | ||
94 | |||
95 | void sb1250_clockevent_init(void) | ||
96 | { | ||
97 | unsigned int cpu = smp_processor_id(); | ||
98 | unsigned int irq = K_INT_TIMER_0 + cpu; | ||
99 | struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu); | ||
100 | unsigned char *name = per_cpu(sibyte_hpt_name, cpu); | ||
101 | unsigned long flags = IRQF_PERCPU | IRQF_TIMER; | ||
102 | |||
103 | /* Only have 4 general purpose timers, and we use last one as hpt */ | ||
104 | BUG_ON(cpu > 2); | ||
105 | |||
106 | sprintf(name, "sb1250-counter-%d", cpu); | ||
107 | cd->name = name; | ||
108 | cd->features = CLOCK_EVT_FEAT_PERIODIC | | ||
109 | CLOCK_EVT_FEAT_ONESHOT; | ||
110 | clockevent_set_clock(cd, V_SCD_TIMER_FREQ); | ||
111 | cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd); | ||
112 | cd->max_delta_ticks = 0x7fffff; | ||
113 | cd->min_delta_ns = clockevent_delta2ns(2, cd); | ||
114 | cd->min_delta_ticks = 2; | ||
115 | cd->rating = 200; | ||
116 | cd->irq = irq; | ||
117 | cd->cpumask = cpumask_of(cpu); | ||
118 | cd->set_next_event = sibyte_next_event; | ||
119 | cd->set_state_shutdown = sibyte_shutdown; | ||
120 | cd->set_state_periodic = sibyte_set_periodic; | ||
121 | cd->set_state_oneshot = sibyte_shutdown; | ||
122 | clockevents_register_device(cd); | ||
123 | |||
124 | sb1250_mask_irq(cpu, irq); | ||
125 | |||
126 | /* | ||
127 | * Map the timer interrupt to IP[4] of this cpu | ||
128 | */ | ||
129 | __raw_writeq(IMR_IP4_VAL, | ||
130 | IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) + | ||
131 | (irq << 3))); | ||
132 | |||
133 | sb1250_unmask_irq(cpu, irq); | ||
134 | |||
135 | irq_set_affinity(irq, cpumask_of(cpu)); | ||
136 | if (request_irq(irq, sibyte_counter_handler, flags, name, cd)) | ||
137 | pr_err("Failed to request irq %d (%s)\n", irq, name); | ||
138 | } | ||
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c new file mode 100644 index 000000000..5709469c2 --- /dev/null +++ b/arch/mips/kernel/cevt-txx9.c | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Based on linux/arch/mips/kernel/cevt-r4k.c, | ||
7 | * linux/arch/mips/jmr3927/rbhma3100/setup.c | ||
8 | * | ||
9 | * Copyright 2001 MontaVista Software Inc. | ||
10 | * Copyright (C) 2000-2001 Toshiba Corporation | ||
11 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
12 | * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org> | ||
13 | */ | ||
14 | #include <linux/init.h> | ||
15 | #include <linux/interrupt.h> | ||
16 | #include <linux/irq.h> | ||
17 | #include <linux/sched_clock.h> | ||
18 | #include <asm/time.h> | ||
19 | #include <asm/txx9tmr.h> | ||
20 | |||
21 | #define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL) | ||
22 | #define TIMER_CCD 0 /* 1/2 */ | ||
23 | #define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD)) | ||
24 | |||
25 | struct txx9_clocksource { | ||
26 | struct clocksource cs; | ||
27 | struct txx9_tmr_reg __iomem *tmrptr; | ||
28 | }; | ||
29 | |||
30 | static u64 txx9_cs_read(struct clocksource *cs) | ||
31 | { | ||
32 | struct txx9_clocksource *txx9_cs = | ||
33 | container_of(cs, struct txx9_clocksource, cs); | ||
34 | return __raw_readl(&txx9_cs->tmrptr->trr); | ||
35 | } | ||
36 | |||
37 | /* Use 1 bit smaller width to use full bits in that width */ | ||
38 | #define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1) | ||
39 | |||
40 | static struct txx9_clocksource txx9_clocksource = { | ||
41 | .cs = { | ||
42 | .name = "TXx9", | ||
43 | .rating = 200, | ||
44 | .read = txx9_cs_read, | ||
45 | .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS), | ||
46 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
47 | }, | ||
48 | }; | ||
49 | |||
50 | static u64 notrace txx9_read_sched_clock(void) | ||
51 | { | ||
52 | return __raw_readl(&txx9_clocksource.tmrptr->trr); | ||
53 | } | ||
54 | |||
55 | void __init txx9_clocksource_init(unsigned long baseaddr, | ||
56 | unsigned int imbusclk) | ||
57 | { | ||
58 | struct txx9_tmr_reg __iomem *tmrptr; | ||
59 | |||
60 | clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk)); | ||
61 | |||
62 | tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); | ||
63 | __raw_writel(TCR_BASE, &tmrptr->tcr); | ||
64 | __raw_writel(0, &tmrptr->tisr); | ||
65 | __raw_writel(TIMER_CCD, &tmrptr->ccdr); | ||
66 | __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr); | ||
67 | __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra); | ||
68 | __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); | ||
69 | txx9_clocksource.tmrptr = tmrptr; | ||
70 | |||
71 | sched_clock_register(txx9_read_sched_clock, TXX9_CLOCKSOURCE_BITS, | ||
72 | TIMER_CLK(imbusclk)); | ||
73 | } | ||
74 | |||
75 | struct txx9_clock_event_device { | ||
76 | struct clock_event_device cd; | ||
77 | struct txx9_tmr_reg __iomem *tmrptr; | ||
78 | }; | ||
79 | |||
80 | static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr) | ||
81 | { | ||
82 | /* stop and reset counter */ | ||
83 | __raw_writel(TCR_BASE, &tmrptr->tcr); | ||
84 | /* clear pending interrupt */ | ||
85 | __raw_writel(0, &tmrptr->tisr); | ||
86 | } | ||
87 | |||
88 | static int txx9tmr_set_state_periodic(struct clock_event_device *evt) | ||
89 | { | ||
90 | struct txx9_clock_event_device *txx9_cd = | ||
91 | container_of(evt, struct txx9_clock_event_device, cd); | ||
92 | struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; | ||
93 | |||
94 | txx9tmr_stop_and_clear(tmrptr); | ||
95 | |||
96 | __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr); | ||
97 | /* start timer */ | ||
98 | __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift, | ||
99 | &tmrptr->cpra); | ||
100 | __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | static int txx9tmr_set_state_oneshot(struct clock_event_device *evt) | ||
105 | { | ||
106 | struct txx9_clock_event_device *txx9_cd = | ||
107 | container_of(evt, struct txx9_clock_event_device, cd); | ||
108 | struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; | ||
109 | |||
110 | txx9tmr_stop_and_clear(tmrptr); | ||
111 | __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr); | ||
112 | return 0; | ||
113 | } | ||
114 | |||
115 | static int txx9tmr_set_state_shutdown(struct clock_event_device *evt) | ||
116 | { | ||
117 | struct txx9_clock_event_device *txx9_cd = | ||
118 | container_of(evt, struct txx9_clock_event_device, cd); | ||
119 | struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; | ||
120 | |||
121 | txx9tmr_stop_and_clear(tmrptr); | ||
122 | __raw_writel(0, &tmrptr->itmr); | ||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | static int txx9tmr_tick_resume(struct clock_event_device *evt) | ||
127 | { | ||
128 | struct txx9_clock_event_device *txx9_cd = | ||
129 | container_of(evt, struct txx9_clock_event_device, cd); | ||
130 | struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; | ||
131 | |||
132 | txx9tmr_stop_and_clear(tmrptr); | ||
133 | __raw_writel(TIMER_CCD, &tmrptr->ccdr); | ||
134 | __raw_writel(0, &tmrptr->itmr); | ||
135 | return 0; | ||
136 | } | ||
137 | |||
138 | static int txx9tmr_set_next_event(unsigned long delta, | ||
139 | struct clock_event_device *evt) | ||
140 | { | ||
141 | struct txx9_clock_event_device *txx9_cd = | ||
142 | container_of(evt, struct txx9_clock_event_device, cd); | ||
143 | struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; | ||
144 | |||
145 | txx9tmr_stop_and_clear(tmrptr); | ||
146 | /* start timer */ | ||
147 | __raw_writel(delta, &tmrptr->cpra); | ||
148 | __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr); | ||
149 | return 0; | ||
150 | } | ||
151 | |||
152 | static struct txx9_clock_event_device txx9_clock_event_device = { | ||
153 | .cd = { | ||
154 | .name = "TXx9", | ||
155 | .features = CLOCK_EVT_FEAT_PERIODIC | | ||
156 | CLOCK_EVT_FEAT_ONESHOT, | ||
157 | .rating = 200, | ||
158 | .set_state_shutdown = txx9tmr_set_state_shutdown, | ||
159 | .set_state_periodic = txx9tmr_set_state_periodic, | ||
160 | .set_state_oneshot = txx9tmr_set_state_oneshot, | ||
161 | .tick_resume = txx9tmr_tick_resume, | ||
162 | .set_next_event = txx9tmr_set_next_event, | ||
163 | }, | ||
164 | }; | ||
165 | |||
166 | static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id) | ||
167 | { | ||
168 | struct txx9_clock_event_device *txx9_cd = dev_id; | ||
169 | struct clock_event_device *cd = &txx9_cd->cd; | ||
170 | struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr; | ||
171 | |||
172 | __raw_writel(0, &tmrptr->tisr); /* ack interrupt */ | ||
173 | cd->event_handler(cd); | ||
174 | return IRQ_HANDLED; | ||
175 | } | ||
176 | |||
177 | void __init txx9_clockevent_init(unsigned long baseaddr, int irq, | ||
178 | unsigned int imbusclk) | ||
179 | { | ||
180 | struct clock_event_device *cd = &txx9_clock_event_device.cd; | ||
181 | struct txx9_tmr_reg __iomem *tmrptr; | ||
182 | |||
183 | tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); | ||
184 | txx9tmr_stop_and_clear(tmrptr); | ||
185 | __raw_writel(TIMER_CCD, &tmrptr->ccdr); | ||
186 | __raw_writel(0, &tmrptr->itmr); | ||
187 | txx9_clock_event_device.tmrptr = tmrptr; | ||
188 | |||
189 | clockevent_set_clock(cd, TIMER_CLK(imbusclk)); | ||
190 | cd->max_delta_ns = | ||
191 | clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd); | ||
192 | cd->max_delta_ticks = 0xffffffff >> (32 - TXX9_TIMER_BITS); | ||
193 | cd->min_delta_ns = clockevent_delta2ns(0xf, cd); | ||
194 | cd->min_delta_ticks = 0xf; | ||
195 | cd->irq = irq; | ||
196 | cd->cpumask = cpumask_of(0), | ||
197 | clockevents_register_device(cd); | ||
198 | if (request_irq(irq, txx9tmr_interrupt, IRQF_PERCPU | IRQF_TIMER, | ||
199 | "txx9tmr", &txx9_clock_event_device)) | ||
200 | pr_err("Failed to request irq %d (txx9tmr)\n", irq); | ||
201 | printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n", | ||
202 | baseaddr, irq); | ||
203 | } | ||
204 | |||
205 | void __init txx9_tmr_init(unsigned long baseaddr) | ||
206 | { | ||
207 | struct txx9_tmr_reg __iomem *tmrptr; | ||
208 | |||
209 | tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg)); | ||
210 | /* Start once to make CounterResetEnable effective */ | ||
211 | __raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr); | ||
212 | /* Stop and reset the counter */ | ||
213 | __raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr); | ||
214 | __raw_writel(0, &tmrptr->tisr); | ||
215 | __raw_writel(0xffffffff, &tmrptr->cpra); | ||
216 | __raw_writel(0, &tmrptr->itmr); | ||
217 | __raw_writel(0, &tmrptr->ccdr); | ||
218 | __raw_writel(0, &tmrptr->pgmr); | ||
219 | iounmap(tmrptr); | ||
220 | } | ||
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c new file mode 100644 index 000000000..89107deb0 --- /dev/null +++ b/arch/mips/kernel/cmpxchg.c | |||
@@ -0,0 +1,104 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2017 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/bitops.h> | ||
8 | #include <asm/cmpxchg.h> | ||
9 | |||
10 | unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size) | ||
11 | { | ||
12 | u32 old32, new32, load32, mask; | ||
13 | volatile u32 *ptr32; | ||
14 | unsigned int shift; | ||
15 | |||
16 | /* Check that ptr is naturally aligned */ | ||
17 | WARN_ON((unsigned long)ptr & (size - 1)); | ||
18 | |||
19 | /* Mask value to the correct size. */ | ||
20 | mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); | ||
21 | val &= mask; | ||
22 | |||
23 | /* | ||
24 | * Calculate a shift & mask that correspond to the value we wish to | ||
25 | * exchange within the naturally aligned 4 byte integerthat includes | ||
26 | * it. | ||
27 | */ | ||
28 | shift = (unsigned long)ptr & 0x3; | ||
29 | if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) | ||
30 | shift ^= sizeof(u32) - size; | ||
31 | shift *= BITS_PER_BYTE; | ||
32 | mask <<= shift; | ||
33 | |||
34 | /* | ||
35 | * Calculate a pointer to the naturally aligned 4 byte integer that | ||
36 | * includes our byte of interest, and load its value. | ||
37 | */ | ||
38 | ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); | ||
39 | load32 = *ptr32; | ||
40 | |||
41 | do { | ||
42 | old32 = load32; | ||
43 | new32 = (load32 & ~mask) | (val << shift); | ||
44 | load32 = cmpxchg(ptr32, old32, new32); | ||
45 | } while (load32 != old32); | ||
46 | |||
47 | return (load32 & mask) >> shift; | ||
48 | } | ||
49 | |||
50 | unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old, | ||
51 | unsigned long new, unsigned int size) | ||
52 | { | ||
53 | u32 mask, old32, new32, load32, load; | ||
54 | volatile u32 *ptr32; | ||
55 | unsigned int shift; | ||
56 | |||
57 | /* Check that ptr is naturally aligned */ | ||
58 | WARN_ON((unsigned long)ptr & (size - 1)); | ||
59 | |||
60 | /* Mask inputs to the correct size. */ | ||
61 | mask = GENMASK((size * BITS_PER_BYTE) - 1, 0); | ||
62 | old &= mask; | ||
63 | new &= mask; | ||
64 | |||
65 | /* | ||
66 | * Calculate a shift & mask that correspond to the value we wish to | ||
67 | * compare & exchange within the naturally aligned 4 byte integer | ||
68 | * that includes it. | ||
69 | */ | ||
70 | shift = (unsigned long)ptr & 0x3; | ||
71 | if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) | ||
72 | shift ^= sizeof(u32) - size; | ||
73 | shift *= BITS_PER_BYTE; | ||
74 | mask <<= shift; | ||
75 | |||
76 | /* | ||
77 | * Calculate a pointer to the naturally aligned 4 byte integer that | ||
78 | * includes our byte of interest, and load its value. | ||
79 | */ | ||
80 | ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3); | ||
81 | load32 = *ptr32; | ||
82 | |||
83 | while (true) { | ||
84 | /* | ||
85 | * Ensure the byte we want to exchange matches the expected | ||
86 | * old value, and if not then bail. | ||
87 | */ | ||
88 | load = (load32 & mask) >> shift; | ||
89 | if (load != old) | ||
90 | return load; | ||
91 | |||
92 | /* | ||
93 | * Calculate the old & new values of the naturally aligned | ||
94 | * 4 byte integer that include the byte we want to exchange. | ||
95 | * Attempt to exchange the old value for the new value, and | ||
96 | * return if we succeed. | ||
97 | */ | ||
98 | old32 = (load32 & ~mask) | (old << shift); | ||
99 | new32 = (load32 & ~mask) | (new << shift); | ||
100 | load32 = cmpxchg(ptr32, old32, new32); | ||
101 | if (load32 == old32) | ||
102 | return old; | ||
103 | } | ||
104 | } | ||
diff --git a/arch/mips/kernel/cps-vec-ns16550.S b/arch/mips/kernel/cps-vec-ns16550.S new file mode 100644 index 000000000..30725e1df --- /dev/null +++ b/arch/mips/kernel/cps-vec-ns16550.S | |||
@@ -0,0 +1,212 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
2 | /* | ||
3 | * Copyright (C) 2015 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <asm/addrspace.h> | ||
8 | #include <asm/asm.h> | ||
9 | #include <asm/asm-offsets.h> | ||
10 | #include <asm/mipsregs.h> | ||
11 | #include <asm/regdef.h> | ||
12 | #include <linux/serial_reg.h> | ||
13 | |||
14 | #define UART_TX_OFS (UART_TX << CONFIG_MIPS_CPS_NS16550_SHIFT) | ||
15 | #define UART_LSR_OFS (UART_LSR << CONFIG_MIPS_CPS_NS16550_SHIFT) | ||
16 | |||
17 | #if CONFIG_MIPS_CPS_NS16550_WIDTH == 1 | ||
18 | # define UART_L lb | ||
19 | # define UART_S sb | ||
20 | #elif CONFIG_MIPS_CPS_NS16550_WIDTH == 2 | ||
21 | # define UART_L lh | ||
22 | # define UART_S sh | ||
23 | #elif CONFIG_MIPS_CPS_NS16550_WIDTH == 4 | ||
24 | # define UART_L lw | ||
25 | # define UART_S sw | ||
26 | #else | ||
27 | # define UART_L lb | ||
28 | # define UART_S sb | ||
29 | #endif | ||
30 | |||
31 | /** | ||
32 | * _mips_cps_putc() - write a character to the UART | ||
33 | * @a0: ASCII character to write | ||
34 | * @t9: UART base address | ||
35 | */ | ||
36 | LEAF(_mips_cps_putc) | ||
37 | 1: UART_L t0, UART_LSR_OFS(t9) | ||
38 | andi t0, t0, UART_LSR_TEMT | ||
39 | beqz t0, 1b | ||
40 | UART_S a0, UART_TX_OFS(t9) | ||
41 | jr ra | ||
42 | END(_mips_cps_putc) | ||
43 | |||
44 | /** | ||
45 | * _mips_cps_puts() - write a string to the UART | ||
46 | * @a0: pointer to NULL-terminated ASCII string | ||
47 | * @t9: UART base address | ||
48 | * | ||
49 | * Write a null-terminated ASCII string to the UART. | ||
50 | */ | ||
51 | NESTED(_mips_cps_puts, 0, ra) | ||
52 | move s7, ra | ||
53 | move s6, a0 | ||
54 | |||
55 | 1: lb a0, 0(s6) | ||
56 | beqz a0, 2f | ||
57 | jal _mips_cps_putc | ||
58 | PTR_ADDIU s6, s6, 1 | ||
59 | b 1b | ||
60 | |||
61 | 2: jr s7 | ||
62 | END(_mips_cps_puts) | ||
63 | |||
64 | /** | ||
65 | * _mips_cps_putx4 - write a 4b hex value to the UART | ||
66 | * @a0: the 4b value to write to the UART | ||
67 | * @t9: UART base address | ||
68 | * | ||
69 | * Write a single hexadecimal character to the UART. | ||
70 | */ | ||
71 | NESTED(_mips_cps_putx4, 0, ra) | ||
72 | andi a0, a0, 0xf | ||
73 | li t0, '0' | ||
74 | blt a0, 10, 1f | ||
75 | li t0, 'a' | ||
76 | addiu a0, a0, -10 | ||
77 | 1: addu a0, a0, t0 | ||
78 | b _mips_cps_putc | ||
79 | END(_mips_cps_putx4) | ||
80 | |||
81 | /** | ||
82 | * _mips_cps_putx8 - write an 8b hex value to the UART | ||
83 | * @a0: the 8b value to write to the UART | ||
84 | * @t9: UART base address | ||
85 | * | ||
86 | * Write an 8 bit value (ie. 2 hexadecimal characters) to the UART. | ||
87 | */ | ||
88 | NESTED(_mips_cps_putx8, 0, ra) | ||
89 | move s3, ra | ||
90 | move s2, a0 | ||
91 | srl a0, a0, 4 | ||
92 | jal _mips_cps_putx4 | ||
93 | move a0, s2 | ||
94 | move ra, s3 | ||
95 | b _mips_cps_putx4 | ||
96 | END(_mips_cps_putx8) | ||
97 | |||
98 | /** | ||
99 | * _mips_cps_putx16 - write a 16b hex value to the UART | ||
100 | * @a0: the 16b value to write to the UART | ||
101 | * @t9: UART base address | ||
102 | * | ||
103 | * Write a 16 bit value (ie. 4 hexadecimal characters) to the UART. | ||
104 | */ | ||
105 | NESTED(_mips_cps_putx16, 0, ra) | ||
106 | move s5, ra | ||
107 | move s4, a0 | ||
108 | srl a0, a0, 8 | ||
109 | jal _mips_cps_putx8 | ||
110 | move a0, s4 | ||
111 | move ra, s5 | ||
112 | b _mips_cps_putx8 | ||
113 | END(_mips_cps_putx16) | ||
114 | |||
115 | /** | ||
116 | * _mips_cps_putx32 - write a 32b hex value to the UART | ||
117 | * @a0: the 32b value to write to the UART | ||
118 | * @t9: UART base address | ||
119 | * | ||
120 | * Write a 32 bit value (ie. 8 hexadecimal characters) to the UART. | ||
121 | */ | ||
122 | NESTED(_mips_cps_putx32, 0, ra) | ||
123 | move s7, ra | ||
124 | move s6, a0 | ||
125 | srl a0, a0, 16 | ||
126 | jal _mips_cps_putx16 | ||
127 | move a0, s6 | ||
128 | move ra, s7 | ||
129 | b _mips_cps_putx16 | ||
130 | END(_mips_cps_putx32) | ||
131 | |||
132 | #ifdef CONFIG_64BIT | ||
133 | |||
134 | /** | ||
135 | * _mips_cps_putx64 - write a 64b hex value to the UART | ||
136 | * @a0: the 64b value to write to the UART | ||
137 | * @t9: UART base address | ||
138 | * | ||
139 | * Write a 64 bit value (ie. 16 hexadecimal characters) to the UART. | ||
140 | */ | ||
141 | NESTED(_mips_cps_putx64, 0, ra) | ||
142 | move sp, ra | ||
143 | move s8, a0 | ||
144 | dsrl32 a0, a0, 0 | ||
145 | jal _mips_cps_putx32 | ||
146 | move a0, s8 | ||
147 | move ra, sp | ||
148 | b _mips_cps_putx32 | ||
149 | END(_mips_cps_putx64) | ||
150 | |||
151 | #define _mips_cps_putxlong _mips_cps_putx64 | ||
152 | |||
153 | #else /* !CONFIG_64BIT */ | ||
154 | |||
155 | #define _mips_cps_putxlong _mips_cps_putx32 | ||
156 | |||
157 | #endif /* !CONFIG_64BIT */ | ||
158 | |||
159 | /** | ||
160 | * mips_cps_bev_dump() - dump relevant exception state to UART | ||
161 | * @a0: pointer to NULL-terminated ASCII string naming the exception | ||
162 | * | ||
163 | * Write information that may be useful in debugging an exception to the | ||
164 | * UART configured by CONFIG_MIPS_CPS_NS16550_*. As this BEV exception | ||
165 | * will only be run if something goes horribly wrong very early during | ||
166 | * the bringup of a core and it is very likely to be unsafe to perform | ||
167 | * memory accesses at that point (cache state indeterminate, EVA may not | ||
168 | * be configured, coherence may be disabled) let alone have a stack, | ||
169 | * this is all written in assembly using only registers & unmapped | ||
170 | * uncached access to the UART registers. | ||
171 | */ | ||
172 | LEAF(mips_cps_bev_dump) | ||
173 | move s0, ra | ||
174 | move s1, a0 | ||
175 | |||
176 | li t9, CKSEG1ADDR(CONFIG_MIPS_CPS_NS16550_BASE) | ||
177 | |||
178 | PTR_LA a0, str_newline | ||
179 | jal _mips_cps_puts | ||
180 | PTR_LA a0, str_bev | ||
181 | jal _mips_cps_puts | ||
182 | move a0, s1 | ||
183 | jal _mips_cps_puts | ||
184 | PTR_LA a0, str_newline | ||
185 | jal _mips_cps_puts | ||
186 | PTR_LA a0, str_newline | ||
187 | jal _mips_cps_puts | ||
188 | |||
189 | #define DUMP_COP0_REG(reg, name, sz, _mfc0) \ | ||
190 | PTR_LA a0, 8f; \ | ||
191 | jal _mips_cps_puts; \ | ||
192 | _mfc0 a0, reg; \ | ||
193 | jal _mips_cps_putx##sz; \ | ||
194 | PTR_LA a0, str_newline; \ | ||
195 | jal _mips_cps_puts; \ | ||
196 | TEXT(name) | ||
197 | |||
198 | DUMP_COP0_REG(CP0_CAUSE, "Cause: 0x", 32, mfc0) | ||
199 | DUMP_COP0_REG(CP0_STATUS, "Status: 0x", 32, mfc0) | ||
200 | DUMP_COP0_REG(CP0_EBASE, "EBase: 0x", long, MFC0) | ||
201 | DUMP_COP0_REG(CP0_BADVADDR, "BadVAddr: 0x", long, MFC0) | ||
202 | DUMP_COP0_REG(CP0_BADINSTR, "BadInstr: 0x", 32, mfc0) | ||
203 | |||
204 | PTR_LA a0, str_newline | ||
205 | jal _mips_cps_puts | ||
206 | jr s0 | ||
207 | END(mips_cps_bev_dump) | ||
208 | |||
209 | .pushsection .data | ||
210 | str_bev: .asciiz "BEV Exception: " | ||
211 | str_newline: .asciiz "\r\n" | ||
212 | .popsection | ||
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S new file mode 100644 index 000000000..4db7ff055 --- /dev/null +++ b/arch/mips/kernel/cps-vec.S | |||
@@ -0,0 +1,630 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
2 | /* | ||
3 | * Copyright (C) 2013 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <asm/addrspace.h> | ||
8 | #include <asm/asm.h> | ||
9 | #include <asm/asm-offsets.h> | ||
10 | #include <asm/asmmacro.h> | ||
11 | #include <asm/cacheops.h> | ||
12 | #include <asm/eva.h> | ||
13 | #include <asm/mipsregs.h> | ||
14 | #include <asm/mipsmtregs.h> | ||
15 | #include <asm/pm.h> | ||
16 | |||
17 | #define GCR_CPC_BASE_OFS 0x0088 | ||
18 | #define GCR_CL_COHERENCE_OFS 0x2008 | ||
19 | #define GCR_CL_ID_OFS 0x2028 | ||
20 | |||
21 | #define CPC_CL_VC_STOP_OFS 0x2020 | ||
22 | #define CPC_CL_VC_RUN_OFS 0x2028 | ||
23 | |||
24 | .extern mips_cm_base | ||
25 | |||
26 | .set noreorder | ||
27 | |||
28 | #ifdef CONFIG_64BIT | ||
29 | # define STATUS_BITDEPS ST0_KX | ||
30 | #else | ||
31 | # define STATUS_BITDEPS 0 | ||
32 | #endif | ||
33 | |||
34 | #ifdef CONFIG_MIPS_CPS_NS16550 | ||
35 | |||
36 | #define DUMP_EXCEP(name) \ | ||
37 | PTR_LA a0, 8f; \ | ||
38 | jal mips_cps_bev_dump; \ | ||
39 | nop; \ | ||
40 | TEXT(name) | ||
41 | |||
42 | #else /* !CONFIG_MIPS_CPS_NS16550 */ | ||
43 | |||
44 | #define DUMP_EXCEP(name) | ||
45 | |||
46 | #endif /* !CONFIG_MIPS_CPS_NS16550 */ | ||
47 | |||
48 | /* | ||
49 | * Set dest to non-zero if the core supports the MT ASE, else zero. If | ||
50 | * MT is not supported then branch to nomt. | ||
51 | */ | ||
52 | .macro has_mt dest, nomt | ||
53 | mfc0 \dest, CP0_CONFIG, 1 | ||
54 | bgez \dest, \nomt | ||
55 | mfc0 \dest, CP0_CONFIG, 2 | ||
56 | bgez \dest, \nomt | ||
57 | mfc0 \dest, CP0_CONFIG, 3 | ||
58 | andi \dest, \dest, MIPS_CONF3_MT | ||
59 | beqz \dest, \nomt | ||
60 | nop | ||
61 | .endm | ||
62 | |||
63 | /* | ||
64 | * Set dest to non-zero if the core supports MIPSr6 multithreading | ||
65 | * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then | ||
66 | * branch to nomt. | ||
67 | */ | ||
68 | .macro has_vp dest, nomt | ||
69 | mfc0 \dest, CP0_CONFIG, 1 | ||
70 | bgez \dest, \nomt | ||
71 | mfc0 \dest, CP0_CONFIG, 2 | ||
72 | bgez \dest, \nomt | ||
73 | mfc0 \dest, CP0_CONFIG, 3 | ||
74 | bgez \dest, \nomt | ||
75 | mfc0 \dest, CP0_CONFIG, 4 | ||
76 | bgez \dest, \nomt | ||
77 | mfc0 \dest, CP0_CONFIG, 5 | ||
78 | andi \dest, \dest, MIPS_CONF5_VP | ||
79 | beqz \dest, \nomt | ||
80 | nop | ||
81 | .endm | ||
82 | |||
83 | /* Calculate an uncached address for the CM GCRs */ | ||
84 | .macro cmgcrb dest | ||
85 | .set push | ||
86 | .set noat | ||
87 | MFC0 $1, CP0_CMGCRBASE | ||
88 | PTR_SLL $1, $1, 4 | ||
89 | PTR_LI \dest, UNCAC_BASE | ||
90 | PTR_ADDU \dest, \dest, $1 | ||
91 | .set pop | ||
92 | .endm | ||
93 | |||
94 | .section .text.cps-vec | ||
95 | .balign 0x1000 | ||
96 | |||
97 | LEAF(mips_cps_core_entry) | ||
98 | /* | ||
99 | * These first 4 bytes will be patched by cps_smp_setup to load the | ||
100 | * CCA to use into register s0. | ||
101 | */ | ||
102 | .word 0 | ||
103 | |||
104 | /* Check whether we're here due to an NMI */ | ||
105 | mfc0 k0, CP0_STATUS | ||
106 | and k0, k0, ST0_NMI | ||
107 | beqz k0, not_nmi | ||
108 | nop | ||
109 | |||
110 | /* This is an NMI */ | ||
111 | PTR_LA k0, nmi_handler | ||
112 | jr k0 | ||
113 | nop | ||
114 | |||
115 | not_nmi: | ||
116 | /* Setup Cause */ | ||
117 | li t0, CAUSEF_IV | ||
118 | mtc0 t0, CP0_CAUSE | ||
119 | |||
120 | /* Setup Status */ | ||
121 | li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS | ||
122 | mtc0 t0, CP0_STATUS | ||
123 | |||
124 | /* Skip cache & coherence setup if we're already coherent */ | ||
125 | cmgcrb v1 | ||
126 | lw s7, GCR_CL_COHERENCE_OFS(v1) | ||
127 | bnez s7, 1f | ||
128 | nop | ||
129 | |||
130 | /* Initialize the L1 caches */ | ||
131 | jal mips_cps_cache_init | ||
132 | nop | ||
133 | |||
134 | /* Enter the coherent domain */ | ||
135 | li t0, 0xff | ||
136 | sw t0, GCR_CL_COHERENCE_OFS(v1) | ||
137 | ehb | ||
138 | |||
139 | /* Set Kseg0 CCA to that in s0 */ | ||
140 | 1: mfc0 t0, CP0_CONFIG | ||
141 | ori t0, 0x7 | ||
142 | xori t0, 0x7 | ||
143 | or t0, t0, s0 | ||
144 | mtc0 t0, CP0_CONFIG | ||
145 | ehb | ||
146 | |||
147 | /* Jump to kseg0 */ | ||
148 | PTR_LA t0, 1f | ||
149 | jr t0 | ||
150 | nop | ||
151 | |||
152 | /* | ||
153 | * We're up, cached & coherent. Perform any EVA initialization necessary | ||
154 | * before we access memory. | ||
155 | */ | ||
156 | 1: eva_init | ||
157 | |||
158 | /* Retrieve boot configuration pointers */ | ||
159 | jal mips_cps_get_bootcfg | ||
160 | nop | ||
161 | |||
162 | /* Skip core-level init if we started up coherent */ | ||
163 | bnez s7, 1f | ||
164 | nop | ||
165 | |||
166 | /* Perform any further required core-level initialisation */ | ||
167 | jal mips_cps_core_init | ||
168 | nop | ||
169 | |||
170 | /* | ||
171 | * Boot any other VPEs within this core that should be online, and | ||
172 | * deactivate this VPE if it should be offline. | ||
173 | */ | ||
174 | move a1, t9 | ||
175 | jal mips_cps_boot_vpes | ||
176 | move a0, v0 | ||
177 | |||
178 | /* Off we go! */ | ||
179 | 1: PTR_L t1, VPEBOOTCFG_PC(v1) | ||
180 | PTR_L gp, VPEBOOTCFG_GP(v1) | ||
181 | PTR_L sp, VPEBOOTCFG_SP(v1) | ||
182 | jr t1 | ||
183 | nop | ||
184 | END(mips_cps_core_entry) | ||
185 | |||
186 | .org 0x200 | ||
187 | LEAF(excep_tlbfill) | ||
188 | DUMP_EXCEP("TLB Fill") | ||
189 | b . | ||
190 | nop | ||
191 | END(excep_tlbfill) | ||
192 | |||
193 | .org 0x280 | ||
194 | LEAF(excep_xtlbfill) | ||
195 | DUMP_EXCEP("XTLB Fill") | ||
196 | b . | ||
197 | nop | ||
198 | END(excep_xtlbfill) | ||
199 | |||
200 | .org 0x300 | ||
201 | LEAF(excep_cache) | ||
202 | DUMP_EXCEP("Cache") | ||
203 | b . | ||
204 | nop | ||
205 | END(excep_cache) | ||
206 | |||
207 | .org 0x380 | ||
208 | LEAF(excep_genex) | ||
209 | DUMP_EXCEP("General") | ||
210 | b . | ||
211 | nop | ||
212 | END(excep_genex) | ||
213 | |||
214 | .org 0x400 | ||
215 | LEAF(excep_intex) | ||
216 | DUMP_EXCEP("Interrupt") | ||
217 | b . | ||
218 | nop | ||
219 | END(excep_intex) | ||
220 | |||
221 | .org 0x480 | ||
222 | LEAF(excep_ejtag) | ||
223 | PTR_LA k0, ejtag_debug_handler | ||
224 | jr k0 | ||
225 | nop | ||
226 | END(excep_ejtag) | ||
227 | |||
228 | LEAF(mips_cps_core_init) | ||
229 | #ifdef CONFIG_MIPS_MT_SMP | ||
230 | /* Check that the core implements the MT ASE */ | ||
231 | has_mt t0, 3f | ||
232 | |||
233 | .set push | ||
234 | .set MIPS_ISA_LEVEL_RAW | ||
235 | .set mt | ||
236 | |||
237 | /* Only allow 1 TC per VPE to execute... */ | ||
238 | dmt | ||
239 | |||
240 | /* ...and for the moment only 1 VPE */ | ||
241 | dvpe | ||
242 | PTR_LA t1, 1f | ||
243 | jr.hb t1 | ||
244 | nop | ||
245 | |||
246 | /* Enter VPE configuration state */ | ||
247 | 1: mfc0 t0, CP0_MVPCONTROL | ||
248 | ori t0, t0, MVPCONTROL_VPC | ||
249 | mtc0 t0, CP0_MVPCONTROL | ||
250 | |||
251 | /* Retrieve the number of VPEs within the core */ | ||
252 | mfc0 t0, CP0_MVPCONF0 | ||
253 | srl t0, t0, MVPCONF0_PVPE_SHIFT | ||
254 | andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT) | ||
255 | addiu ta3, t0, 1 | ||
256 | |||
257 | /* If there's only 1, we're done */ | ||
258 | beqz t0, 2f | ||
259 | nop | ||
260 | |||
261 | /* Loop through each VPE within this core */ | ||
262 | li ta1, 1 | ||
263 | |||
264 | 1: /* Operate on the appropriate TC */ | ||
265 | mtc0 ta1, CP0_VPECONTROL | ||
266 | ehb | ||
267 | |||
268 | /* Bind TC to VPE (1:1 TC:VPE mapping) */ | ||
269 | mttc0 ta1, CP0_TCBIND | ||
270 | |||
271 | /* Set exclusive TC, non-active, master */ | ||
272 | li t0, VPECONF0_MVP | ||
273 | sll t1, ta1, VPECONF0_XTC_SHIFT | ||
274 | or t0, t0, t1 | ||
275 | mttc0 t0, CP0_VPECONF0 | ||
276 | |||
277 | /* Set TC non-active, non-allocatable */ | ||
278 | mttc0 zero, CP0_TCSTATUS | ||
279 | |||
280 | /* Set TC halted */ | ||
281 | li t0, TCHALT_H | ||
282 | mttc0 t0, CP0_TCHALT | ||
283 | |||
284 | /* Next VPE */ | ||
285 | addiu ta1, ta1, 1 | ||
286 | slt t0, ta1, ta3 | ||
287 | bnez t0, 1b | ||
288 | nop | ||
289 | |||
290 | /* Leave VPE configuration state */ | ||
291 | 2: mfc0 t0, CP0_MVPCONTROL | ||
292 | xori t0, t0, MVPCONTROL_VPC | ||
293 | mtc0 t0, CP0_MVPCONTROL | ||
294 | |||
295 | 3: .set pop | ||
296 | #endif | ||
297 | jr ra | ||
298 | nop | ||
299 | END(mips_cps_core_init) | ||
300 | |||
301 | /** | ||
302 | * mips_cps_get_bootcfg() - retrieve boot configuration pointers | ||
303 | * | ||
304 | * Returns: pointer to struct core_boot_config in v0, pointer to | ||
305 | * struct vpe_boot_config in v1, VPE ID in t9 | ||
306 | */ | ||
307 | LEAF(mips_cps_get_bootcfg) | ||
308 | /* Calculate a pointer to this cores struct core_boot_config */ | ||
309 | cmgcrb t0 | ||
310 | lw t0, GCR_CL_ID_OFS(t0) | ||
311 | li t1, COREBOOTCFG_SIZE | ||
312 | mul t0, t0, t1 | ||
313 | PTR_LA t1, mips_cps_core_bootcfg | ||
314 | PTR_L t1, 0(t1) | ||
315 | PTR_ADDU v0, t0, t1 | ||
316 | |||
317 | /* Calculate this VPEs ID. If the core doesn't support MT use 0 */ | ||
318 | li t9, 0 | ||
319 | #if defined(CONFIG_CPU_MIPSR6) | ||
320 | has_vp ta2, 1f | ||
321 | |||
322 | /* | ||
323 | * Assume non-contiguous numbering. Perhaps some day we'll need | ||
324 | * to handle contiguous VP numbering, but no such systems yet | ||
325 | * exist. | ||
326 | */ | ||
327 | mfc0 t9, CP0_GLOBALNUMBER | ||
328 | andi t9, t9, MIPS_GLOBALNUMBER_VP | ||
329 | #elif defined(CONFIG_MIPS_MT_SMP) | ||
330 | has_mt ta2, 1f | ||
331 | |||
332 | /* Find the number of VPEs present in the core */ | ||
333 | mfc0 t1, CP0_MVPCONF0 | ||
334 | srl t1, t1, MVPCONF0_PVPE_SHIFT | ||
335 | andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT | ||
336 | addiu t1, t1, 1 | ||
337 | |||
338 | /* Calculate a mask for the VPE ID from EBase.CPUNum */ | ||
339 | clz t1, t1 | ||
340 | li t2, 31 | ||
341 | subu t1, t2, t1 | ||
342 | li t2, 1 | ||
343 | sll t1, t2, t1 | ||
344 | addiu t1, t1, -1 | ||
345 | |||
346 | /* Retrieve the VPE ID from EBase.CPUNum */ | ||
347 | mfc0 t9, $15, 1 | ||
348 | and t9, t9, t1 | ||
349 | #endif | ||
350 | |||
351 | 1: /* Calculate a pointer to this VPEs struct vpe_boot_config */ | ||
352 | li t1, VPEBOOTCFG_SIZE | ||
353 | mul v1, t9, t1 | ||
354 | PTR_L ta3, COREBOOTCFG_VPECONFIG(v0) | ||
355 | PTR_ADDU v1, v1, ta3 | ||
356 | |||
357 | jr ra | ||
358 | nop | ||
359 | END(mips_cps_get_bootcfg) | ||
360 | |||
361 | LEAF(mips_cps_boot_vpes) | ||
362 | lw ta2, COREBOOTCFG_VPEMASK(a0) | ||
363 | PTR_L ta3, COREBOOTCFG_VPECONFIG(a0) | ||
364 | |||
365 | #if defined(CONFIG_CPU_MIPSR6) | ||
366 | |||
367 | has_vp t0, 5f | ||
368 | |||
369 | /* Find base address of CPC */ | ||
370 | cmgcrb t3 | ||
371 | PTR_L t1, GCR_CPC_BASE_OFS(t3) | ||
372 | PTR_LI t2, ~0x7fff | ||
373 | and t1, t1, t2 | ||
374 | PTR_LI t2, UNCAC_BASE | ||
375 | PTR_ADD t1, t1, t2 | ||
376 | |||
377 | /* Start any other VPs that ought to be running */ | ||
378 | PTR_S ta2, CPC_CL_VC_RUN_OFS(t1) | ||
379 | |||
380 | /* Ensure this VP stops running if it shouldn't be */ | ||
381 | not ta2 | ||
382 | PTR_S ta2, CPC_CL_VC_STOP_OFS(t1) | ||
383 | ehb | ||
384 | |||
385 | #elif defined(CONFIG_MIPS_MT) | ||
386 | |||
387 | /* If the core doesn't support MT then return */ | ||
388 | has_mt t0, 5f | ||
389 | |||
390 | /* Enter VPE configuration state */ | ||
391 | .set push | ||
392 | .set MIPS_ISA_LEVEL_RAW | ||
393 | .set mt | ||
394 | dvpe | ||
395 | .set pop | ||
396 | |||
397 | PTR_LA t1, 1f | ||
398 | jr.hb t1 | ||
399 | nop | ||
400 | 1: mfc0 t1, CP0_MVPCONTROL | ||
401 | ori t1, t1, MVPCONTROL_VPC | ||
402 | mtc0 t1, CP0_MVPCONTROL | ||
403 | ehb | ||
404 | |||
405 | /* Loop through each VPE */ | ||
406 | move t8, ta2 | ||
407 | li ta1, 0 | ||
408 | |||
409 | /* Check whether the VPE should be running. If not, skip it */ | ||
410 | 1: andi t0, ta2, 1 | ||
411 | beqz t0, 2f | ||
412 | nop | ||
413 | |||
414 | /* Operate on the appropriate TC */ | ||
415 | mfc0 t0, CP0_VPECONTROL | ||
416 | ori t0, t0, VPECONTROL_TARGTC | ||
417 | xori t0, t0, VPECONTROL_TARGTC | ||
418 | or t0, t0, ta1 | ||
419 | mtc0 t0, CP0_VPECONTROL | ||
420 | ehb | ||
421 | |||
422 | .set push | ||
423 | .set MIPS_ISA_LEVEL_RAW | ||
424 | .set mt | ||
425 | |||
426 | /* Skip the VPE if its TC is not halted */ | ||
427 | mftc0 t0, CP0_TCHALT | ||
428 | beqz t0, 2f | ||
429 | nop | ||
430 | |||
431 | /* Calculate a pointer to the VPEs struct vpe_boot_config */ | ||
432 | li t0, VPEBOOTCFG_SIZE | ||
433 | mul t0, t0, ta1 | ||
434 | addu t0, t0, ta3 | ||
435 | |||
436 | /* Set the TC restart PC */ | ||
437 | lw t1, VPEBOOTCFG_PC(t0) | ||
438 | mttc0 t1, CP0_TCRESTART | ||
439 | |||
440 | /* Set the TC stack pointer */ | ||
441 | lw t1, VPEBOOTCFG_SP(t0) | ||
442 | mttgpr t1, sp | ||
443 | |||
444 | /* Set the TC global pointer */ | ||
445 | lw t1, VPEBOOTCFG_GP(t0) | ||
446 | mttgpr t1, gp | ||
447 | |||
448 | /* Copy config from this VPE */ | ||
449 | mfc0 t0, CP0_CONFIG | ||
450 | mttc0 t0, CP0_CONFIG | ||
451 | |||
452 | /* | ||
453 | * Copy the EVA config from this VPE if the CPU supports it. | ||
454 | * CONFIG3 must exist to be running MT startup - just read it. | ||
455 | */ | ||
456 | mfc0 t0, CP0_CONFIG, 3 | ||
457 | and t0, t0, MIPS_CONF3_SC | ||
458 | beqz t0, 3f | ||
459 | nop | ||
460 | mfc0 t0, CP0_SEGCTL0 | ||
461 | mttc0 t0, CP0_SEGCTL0 | ||
462 | mfc0 t0, CP0_SEGCTL1 | ||
463 | mttc0 t0, CP0_SEGCTL1 | ||
464 | mfc0 t0, CP0_SEGCTL2 | ||
465 | mttc0 t0, CP0_SEGCTL2 | ||
466 | 3: | ||
467 | /* Ensure no software interrupts are pending */ | ||
468 | mttc0 zero, CP0_CAUSE | ||
469 | mttc0 zero, CP0_STATUS | ||
470 | |||
471 | /* Set TC active, not interrupt exempt */ | ||
472 | mftc0 t0, CP0_TCSTATUS | ||
473 | li t1, ~TCSTATUS_IXMT | ||
474 | and t0, t0, t1 | ||
475 | ori t0, t0, TCSTATUS_A | ||
476 | mttc0 t0, CP0_TCSTATUS | ||
477 | |||
478 | /* Clear the TC halt bit */ | ||
479 | mttc0 zero, CP0_TCHALT | ||
480 | |||
481 | /* Set VPE active */ | ||
482 | mftc0 t0, CP0_VPECONF0 | ||
483 | ori t0, t0, VPECONF0_VPA | ||
484 | mttc0 t0, CP0_VPECONF0 | ||
485 | |||
486 | /* Next VPE */ | ||
487 | 2: srl ta2, ta2, 1 | ||
488 | addiu ta1, ta1, 1 | ||
489 | bnez ta2, 1b | ||
490 | nop | ||
491 | |||
492 | /* Leave VPE configuration state */ | ||
493 | mfc0 t1, CP0_MVPCONTROL | ||
494 | xori t1, t1, MVPCONTROL_VPC | ||
495 | mtc0 t1, CP0_MVPCONTROL | ||
496 | ehb | ||
497 | evpe | ||
498 | |||
499 | .set pop | ||
500 | |||
501 | /* Check whether this VPE is meant to be running */ | ||
502 | li t0, 1 | ||
503 | sll t0, t0, a1 | ||
504 | and t0, t0, t8 | ||
505 | bnez t0, 2f | ||
506 | nop | ||
507 | |||
508 | /* This VPE should be offline, halt the TC */ | ||
509 | li t0, TCHALT_H | ||
510 | mtc0 t0, CP0_TCHALT | ||
511 | PTR_LA t0, 1f | ||
512 | 1: jr.hb t0 | ||
513 | nop | ||
514 | |||
515 | 2: | ||
516 | |||
517 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
518 | |||
519 | /* Return */ | ||
520 | 5: jr ra | ||
521 | nop | ||
522 | END(mips_cps_boot_vpes) | ||
523 | |||
524 | LEAF(mips_cps_cache_init) | ||
525 | /* | ||
526 | * Clear the bits used to index the caches. Note that the architecture | ||
527 | * dictates that writing to any of TagLo or TagHi selects 0 or 2 should | ||
528 | * be valid for all MIPS32 CPUs, even those for which said writes are | ||
529 | * unnecessary. | ||
530 | */ | ||
531 | mtc0 zero, CP0_TAGLO, 0 | ||
532 | mtc0 zero, CP0_TAGHI, 0 | ||
533 | mtc0 zero, CP0_TAGLO, 2 | ||
534 | mtc0 zero, CP0_TAGHI, 2 | ||
535 | ehb | ||
536 | |||
537 | /* Primary cache configuration is indicated by Config1 */ | ||
538 | mfc0 v0, CP0_CONFIG, 1 | ||
539 | |||
540 | /* Detect I-cache line size */ | ||
541 | _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ | ||
542 | beqz t0, icache_done | ||
543 | li t1, 2 | ||
544 | sllv t0, t1, t0 | ||
545 | |||
546 | /* Detect I-cache size */ | ||
547 | _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ | ||
548 | xori t2, t1, 0x7 | ||
549 | beqz t2, 1f | ||
550 | li t3, 32 | ||
551 | addiu t1, t1, 1 | ||
552 | sllv t1, t3, t1 | ||
553 | 1: /* At this point t1 == I-cache sets per way */ | ||
554 | _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ | ||
555 | addiu t2, t2, 1 | ||
556 | mul t1, t1, t0 | ||
557 | mul t1, t1, t2 | ||
558 | |||
559 | li a0, CKSEG0 | ||
560 | PTR_ADD a1, a0, t1 | ||
561 | 1: cache Index_Store_Tag_I, 0(a0) | ||
562 | PTR_ADD a0, a0, t0 | ||
563 | bne a0, a1, 1b | ||
564 | nop | ||
565 | icache_done: | ||
566 | |||
567 | /* Detect D-cache line size */ | ||
568 | _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ | ||
569 | beqz t0, dcache_done | ||
570 | li t1, 2 | ||
571 | sllv t0, t1, t0 | ||
572 | |||
573 | /* Detect D-cache size */ | ||
574 | _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ | ||
575 | xori t2, t1, 0x7 | ||
576 | beqz t2, 1f | ||
577 | li t3, 32 | ||
578 | addiu t1, t1, 1 | ||
579 | sllv t1, t3, t1 | ||
580 | 1: /* At this point t1 == D-cache sets per way */ | ||
581 | _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ | ||
582 | addiu t2, t2, 1 | ||
583 | mul t1, t1, t0 | ||
584 | mul t1, t1, t2 | ||
585 | |||
586 | li a0, CKSEG0 | ||
587 | PTR_ADDU a1, a0, t1 | ||
588 | PTR_SUBU a1, a1, t0 | ||
589 | 1: cache Index_Store_Tag_D, 0(a0) | ||
590 | bne a0, a1, 1b | ||
591 | PTR_ADD a0, a0, t0 | ||
592 | dcache_done: | ||
593 | |||
594 | jr ra | ||
595 | nop | ||
596 | END(mips_cps_cache_init) | ||
597 | |||
598 | #if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM) | ||
599 | |||
600 | /* Calculate a pointer to this CPUs struct mips_static_suspend_state */ | ||
601 | .macro psstate dest | ||
602 | .set push | ||
603 | .set noat | ||
604 | lw $1, TI_CPU(gp) | ||
605 | sll $1, $1, LONGLOG | ||
606 | PTR_LA \dest, __per_cpu_offset | ||
607 | addu $1, $1, \dest | ||
608 | lw $1, 0($1) | ||
609 | PTR_LA \dest, cps_cpu_state | ||
610 | addu \dest, \dest, $1 | ||
611 | .set pop | ||
612 | .endm | ||
613 | |||
614 | LEAF(mips_cps_pm_save) | ||
615 | /* Save CPU state */ | ||
616 | SUSPEND_SAVE_REGS | ||
617 | psstate t1 | ||
618 | SUSPEND_SAVE_STATIC | ||
619 | jr v0 | ||
620 | nop | ||
621 | END(mips_cps_pm_save) | ||
622 | |||
623 | LEAF(mips_cps_pm_restore) | ||
624 | /* Restore CPU state */ | ||
625 | psstate t1 | ||
626 | RESUME_RESTORE_STATIC | ||
627 | RESUME_RESTORE_REGS_RETURN | ||
628 | END(mips_cps_pm_restore) | ||
629 | |||
630 | #endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */ | ||
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c new file mode 100644 index 000000000..d12020191 --- /dev/null +++ b/arch/mips/kernel/cpu-probe.c | |||
@@ -0,0 +1,2173 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Processor capabilities determination functions. | ||
4 | * | ||
5 | * Copyright (C) xxxx the Anonymous | ||
6 | * Copyright (C) 1994 - 2006 Ralf Baechle | ||
7 | * Copyright (C) 2003, 2004 Maciej W. Rozycki | ||
8 | * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/ptrace.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/stddef.h> | ||
15 | #include <linux/export.h> | ||
16 | |||
17 | #include <asm/bugs.h> | ||
18 | #include <asm/cpu.h> | ||
19 | #include <asm/cpu-features.h> | ||
20 | #include <asm/cpu-type.h> | ||
21 | #include <asm/fpu.h> | ||
22 | #include <asm/mipsregs.h> | ||
23 | #include <asm/mipsmtregs.h> | ||
24 | #include <asm/msa.h> | ||
25 | #include <asm/watch.h> | ||
26 | #include <asm/elf.h> | ||
27 | #include <asm/pgtable-bits.h> | ||
28 | #include <asm/spram.h> | ||
29 | #include <linux/uaccess.h> | ||
30 | |||
31 | #include "fpu-probe.h" | ||
32 | |||
33 | #include <asm/mach-loongson64/cpucfg-emul.h> | ||
34 | |||
35 | /* Hardware capabilities */ | ||
36 | unsigned int elf_hwcap __read_mostly; | ||
37 | EXPORT_SYMBOL_GPL(elf_hwcap); | ||
38 | |||
39 | static inline unsigned long cpu_get_msa_id(void) | ||
40 | { | ||
41 | unsigned long status, msa_id; | ||
42 | |||
43 | status = read_c0_status(); | ||
44 | __enable_fpu(FPU_64BIT); | ||
45 | enable_msa(); | ||
46 | msa_id = read_msa_ir(); | ||
47 | disable_msa(); | ||
48 | write_c0_status(status); | ||
49 | return msa_id; | ||
50 | } | ||
51 | |||
52 | static int mips_dsp_disabled; | ||
53 | |||
54 | static int __init dsp_disable(char *s) | ||
55 | { | ||
56 | cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); | ||
57 | mips_dsp_disabled = 1; | ||
58 | |||
59 | return 1; | ||
60 | } | ||
61 | |||
62 | __setup("nodsp", dsp_disable); | ||
63 | |||
64 | static int mips_htw_disabled; | ||
65 | |||
66 | static int __init htw_disable(char *s) | ||
67 | { | ||
68 | mips_htw_disabled = 1; | ||
69 | cpu_data[0].options &= ~MIPS_CPU_HTW; | ||
70 | write_c0_pwctl(read_c0_pwctl() & | ||
71 | ~(1 << MIPS_PWCTL_PWEN_SHIFT)); | ||
72 | |||
73 | return 1; | ||
74 | } | ||
75 | |||
76 | __setup("nohtw", htw_disable); | ||
77 | |||
78 | static int mips_ftlb_disabled; | ||
79 | static int mips_has_ftlb_configured; | ||
80 | |||
81 | enum ftlb_flags { | ||
82 | FTLB_EN = 1 << 0, | ||
83 | FTLB_SET_PROB = 1 << 1, | ||
84 | }; | ||
85 | |||
86 | static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags); | ||
87 | |||
88 | static int __init ftlb_disable(char *s) | ||
89 | { | ||
90 | unsigned int config4, mmuextdef; | ||
91 | |||
92 | /* | ||
93 | * If the core hasn't done any FTLB configuration, there is nothing | ||
94 | * for us to do here. | ||
95 | */ | ||
96 | if (!mips_has_ftlb_configured) | ||
97 | return 1; | ||
98 | |||
99 | /* Disable it in the boot cpu */ | ||
100 | if (set_ftlb_enable(&cpu_data[0], 0)) { | ||
101 | pr_warn("Can't turn FTLB off\n"); | ||
102 | return 1; | ||
103 | } | ||
104 | |||
105 | config4 = read_c0_config4(); | ||
106 | |||
107 | /* Check that FTLB has been disabled */ | ||
108 | mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; | ||
109 | /* MMUSIZEEXT == VTLB ON, FTLB OFF */ | ||
110 | if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) { | ||
111 | /* This should never happen */ | ||
112 | pr_warn("FTLB could not be disabled!\n"); | ||
113 | return 1; | ||
114 | } | ||
115 | |||
116 | mips_ftlb_disabled = 1; | ||
117 | mips_has_ftlb_configured = 0; | ||
118 | |||
119 | /* | ||
120 | * noftlb is mainly used for debug purposes so print | ||
121 | * an informative message instead of using pr_debug() | ||
122 | */ | ||
123 | pr_info("FTLB has been disabled\n"); | ||
124 | |||
125 | /* | ||
126 | * Some of these bits are duplicated in the decode_config4. | ||
127 | * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case | ||
128 | * once FTLB has been disabled so undo what decode_config4 did. | ||
129 | */ | ||
130 | cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways * | ||
131 | cpu_data[0].tlbsizeftlbsets; | ||
132 | cpu_data[0].tlbsizeftlbsets = 0; | ||
133 | cpu_data[0].tlbsizeftlbways = 0; | ||
134 | |||
135 | return 1; | ||
136 | } | ||
137 | |||
138 | __setup("noftlb", ftlb_disable); | ||
139 | |||
140 | /* | ||
141 | * Check if the CPU has per tc perf counters | ||
142 | */ | ||
143 | static inline void cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c) | ||
144 | { | ||
145 | if (read_c0_config7() & MTI_CONF7_PTC) | ||
146 | c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS; | ||
147 | } | ||
148 | |||
149 | static inline void check_errata(void) | ||
150 | { | ||
151 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
152 | |||
153 | switch (current_cpu_type()) { | ||
154 | case CPU_34K: | ||
155 | /* | ||
156 | * Erratum "RPS May Cause Incorrect Instruction Execution" | ||
157 | * This code only handles VPE0, any SMP/RTOS code | ||
158 | * making use of VPE1 will be responsable for that VPE. | ||
159 | */ | ||
160 | if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) | ||
161 | write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS); | ||
162 | break; | ||
163 | default: | ||
164 | break; | ||
165 | } | ||
166 | } | ||
167 | |||
168 | void __init check_bugs32(void) | ||
169 | { | ||
170 | check_errata(); | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * Probe whether cpu has config register by trying to play with | ||
175 | * alternate cache bit and see whether it matters. | ||
176 | * It's used by cpu_probe to distinguish between R3000A and R3081. | ||
177 | */ | ||
178 | static inline int cpu_has_confreg(void) | ||
179 | { | ||
180 | #ifdef CONFIG_CPU_R3000 | ||
181 | extern unsigned long r3k_cache_size(unsigned long); | ||
182 | unsigned long size1, size2; | ||
183 | unsigned long cfg = read_c0_conf(); | ||
184 | |||
185 | size1 = r3k_cache_size(ST0_ISC); | ||
186 | write_c0_conf(cfg ^ R30XX_CONF_AC); | ||
187 | size2 = r3k_cache_size(ST0_ISC); | ||
188 | write_c0_conf(cfg); | ||
189 | return size1 != size2; | ||
190 | #else | ||
191 | return 0; | ||
192 | #endif | ||
193 | } | ||
194 | |||
195 | static inline void set_elf_platform(int cpu, const char *plat) | ||
196 | { | ||
197 | if (cpu == 0) | ||
198 | __elf_platform = plat; | ||
199 | } | ||
200 | |||
201 | static inline void set_elf_base_platform(const char *plat) | ||
202 | { | ||
203 | if (__elf_base_platform == NULL) { | ||
204 | __elf_base_platform = plat; | ||
205 | } | ||
206 | } | ||
207 | |||
208 | static inline void cpu_probe_vmbits(struct cpuinfo_mips *c) | ||
209 | { | ||
210 | #ifdef __NEED_VMBITS_PROBE | ||
211 | write_c0_entryhi(0x3fffffffffffe000ULL); | ||
212 | back_to_back_c0_hazard(); | ||
213 | c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL); | ||
214 | #endif | ||
215 | } | ||
216 | |||
217 | static void set_isa(struct cpuinfo_mips *c, unsigned int isa) | ||
218 | { | ||
219 | switch (isa) { | ||
220 | case MIPS_CPU_ISA_M64R5: | ||
221 | c->isa_level |= MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5; | ||
222 | set_elf_base_platform("mips64r5"); | ||
223 | fallthrough; | ||
224 | case MIPS_CPU_ISA_M64R2: | ||
225 | c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2; | ||
226 | set_elf_base_platform("mips64r2"); | ||
227 | fallthrough; | ||
228 | case MIPS_CPU_ISA_M64R1: | ||
229 | c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1; | ||
230 | set_elf_base_platform("mips64"); | ||
231 | fallthrough; | ||
232 | case MIPS_CPU_ISA_V: | ||
233 | c->isa_level |= MIPS_CPU_ISA_V; | ||
234 | set_elf_base_platform("mips5"); | ||
235 | fallthrough; | ||
236 | case MIPS_CPU_ISA_IV: | ||
237 | c->isa_level |= MIPS_CPU_ISA_IV; | ||
238 | set_elf_base_platform("mips4"); | ||
239 | fallthrough; | ||
240 | case MIPS_CPU_ISA_III: | ||
241 | c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III; | ||
242 | set_elf_base_platform("mips3"); | ||
243 | break; | ||
244 | |||
245 | /* R6 incompatible with everything else */ | ||
246 | case MIPS_CPU_ISA_M64R6: | ||
247 | c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6; | ||
248 | set_elf_base_platform("mips64r6"); | ||
249 | fallthrough; | ||
250 | case MIPS_CPU_ISA_M32R6: | ||
251 | c->isa_level |= MIPS_CPU_ISA_M32R6; | ||
252 | set_elf_base_platform("mips32r6"); | ||
253 | /* Break here so we don't add incompatible ISAs */ | ||
254 | break; | ||
255 | case MIPS_CPU_ISA_M32R5: | ||
256 | c->isa_level |= MIPS_CPU_ISA_M32R5; | ||
257 | set_elf_base_platform("mips32r5"); | ||
258 | fallthrough; | ||
259 | case MIPS_CPU_ISA_M32R2: | ||
260 | c->isa_level |= MIPS_CPU_ISA_M32R2; | ||
261 | set_elf_base_platform("mips32r2"); | ||
262 | fallthrough; | ||
263 | case MIPS_CPU_ISA_M32R1: | ||
264 | c->isa_level |= MIPS_CPU_ISA_M32R1; | ||
265 | set_elf_base_platform("mips32"); | ||
266 | fallthrough; | ||
267 | case MIPS_CPU_ISA_II: | ||
268 | c->isa_level |= MIPS_CPU_ISA_II; | ||
269 | set_elf_base_platform("mips2"); | ||
270 | break; | ||
271 | } | ||
272 | } | ||
273 | |||
274 | static char unknown_isa[] = KERN_ERR \ | ||
275 | "Unsupported ISA type, c0.config0: %d."; | ||
276 | |||
277 | static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c) | ||
278 | { | ||
279 | |||
280 | unsigned int probability = c->tlbsize / c->tlbsizevtlb; | ||
281 | |||
282 | /* | ||
283 | * 0 = All TLBWR instructions go to FTLB | ||
284 | * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the | ||
285 | * FTLB and 1 goes to the VTLB. | ||
286 | * 2 = 7:1: As above with 7:1 ratio. | ||
287 | * 3 = 3:1: As above with 3:1 ratio. | ||
288 | * | ||
289 | * Use the linear midpoint as the probability threshold. | ||
290 | */ | ||
291 | if (probability >= 12) | ||
292 | return 1; | ||
293 | else if (probability >= 6) | ||
294 | return 2; | ||
295 | else | ||
296 | /* | ||
297 | * So FTLB is less than 4 times bigger than VTLB. | ||
298 | * A 3:1 ratio can still be useful though. | ||
299 | */ | ||
300 | return 3; | ||
301 | } | ||
302 | |||
303 | static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags) | ||
304 | { | ||
305 | unsigned int config; | ||
306 | |||
307 | /* It's implementation dependent how the FTLB can be enabled */ | ||
308 | switch (c->cputype) { | ||
309 | case CPU_PROAPTIV: | ||
310 | case CPU_P5600: | ||
311 | case CPU_P6600: | ||
312 | /* proAptiv & related cores use Config6 to enable the FTLB */ | ||
313 | config = read_c0_config6(); | ||
314 | |||
315 | if (flags & FTLB_EN) | ||
316 | config |= MTI_CONF6_FTLBEN; | ||
317 | else | ||
318 | config &= ~MTI_CONF6_FTLBEN; | ||
319 | |||
320 | if (flags & FTLB_SET_PROB) { | ||
321 | config &= ~(3 << MTI_CONF6_FTLBP_SHIFT); | ||
322 | config |= calculate_ftlb_probability(c) | ||
323 | << MTI_CONF6_FTLBP_SHIFT; | ||
324 | } | ||
325 | |||
326 | write_c0_config6(config); | ||
327 | back_to_back_c0_hazard(); | ||
328 | break; | ||
329 | case CPU_I6400: | ||
330 | case CPU_I6500: | ||
331 | /* There's no way to disable the FTLB */ | ||
332 | if (!(flags & FTLB_EN)) | ||
333 | return 1; | ||
334 | return 0; | ||
335 | case CPU_LOONGSON64: | ||
336 | /* Flush ITLB, DTLB, VTLB and FTLB */ | ||
337 | write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB | | ||
338 | LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB); | ||
339 | /* Loongson-3 cores use Config6 to enable the FTLB */ | ||
340 | config = read_c0_config6(); | ||
341 | if (flags & FTLB_EN) | ||
342 | /* Enable FTLB */ | ||
343 | write_c0_config6(config & ~LOONGSON_CONF6_FTLBDIS); | ||
344 | else | ||
345 | /* Disable FTLB */ | ||
346 | write_c0_config6(config | LOONGSON_CONF6_FTLBDIS); | ||
347 | break; | ||
348 | default: | ||
349 | return 1; | ||
350 | } | ||
351 | |||
352 | return 0; | ||
353 | } | ||
354 | |||
355 | static int mm_config(struct cpuinfo_mips *c) | ||
356 | { | ||
357 | unsigned int config0, update, mm; | ||
358 | |||
359 | config0 = read_c0_config(); | ||
360 | mm = config0 & MIPS_CONF_MM; | ||
361 | |||
362 | /* | ||
363 | * It's implementation dependent what type of write-merge is supported | ||
364 | * and whether it can be enabled/disabled. If it is settable lets make | ||
365 | * the merging allowed by default. Some platforms might have | ||
366 | * write-through caching unsupported. In this case just ignore the | ||
367 | * CP0.Config.MM bit field value. | ||
368 | */ | ||
369 | switch (c->cputype) { | ||
370 | case CPU_24K: | ||
371 | case CPU_34K: | ||
372 | case CPU_74K: | ||
373 | case CPU_P5600: | ||
374 | case CPU_P6600: | ||
375 | c->options |= MIPS_CPU_MM_FULL; | ||
376 | update = MIPS_CONF_MM_FULL; | ||
377 | break; | ||
378 | case CPU_1004K: | ||
379 | case CPU_1074K: | ||
380 | case CPU_INTERAPTIV: | ||
381 | case CPU_PROAPTIV: | ||
382 | mm = 0; | ||
383 | fallthrough; | ||
384 | default: | ||
385 | update = 0; | ||
386 | break; | ||
387 | } | ||
388 | |||
389 | if (update) { | ||
390 | config0 = (config0 & ~MIPS_CONF_MM) | update; | ||
391 | write_c0_config(config0); | ||
392 | } else if (mm == MIPS_CONF_MM_SYSAD) { | ||
393 | c->options |= MIPS_CPU_MM_SYSAD; | ||
394 | } else if (mm == MIPS_CONF_MM_FULL) { | ||
395 | c->options |= MIPS_CPU_MM_FULL; | ||
396 | } | ||
397 | |||
398 | return 0; | ||
399 | } | ||
400 | |||
401 | static inline unsigned int decode_config0(struct cpuinfo_mips *c) | ||
402 | { | ||
403 | unsigned int config0; | ||
404 | int isa, mt; | ||
405 | |||
406 | config0 = read_c0_config(); | ||
407 | |||
408 | /* | ||
409 | * Look for Standard TLB or Dual VTLB and FTLB | ||
410 | */ | ||
411 | mt = config0 & MIPS_CONF_MT; | ||
412 | if (mt == MIPS_CONF_MT_TLB) | ||
413 | c->options |= MIPS_CPU_TLB; | ||
414 | else if (mt == MIPS_CONF_MT_FTLB) | ||
415 | c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB; | ||
416 | |||
417 | isa = (config0 & MIPS_CONF_AT) >> 13; | ||
418 | switch (isa) { | ||
419 | case 0: | ||
420 | switch ((config0 & MIPS_CONF_AR) >> 10) { | ||
421 | case 0: | ||
422 | set_isa(c, MIPS_CPU_ISA_M32R1); | ||
423 | break; | ||
424 | case 1: | ||
425 | set_isa(c, MIPS_CPU_ISA_M32R2); | ||
426 | break; | ||
427 | case 2: | ||
428 | set_isa(c, MIPS_CPU_ISA_M32R6); | ||
429 | break; | ||
430 | default: | ||
431 | goto unknown; | ||
432 | } | ||
433 | break; | ||
434 | case 2: | ||
435 | switch ((config0 & MIPS_CONF_AR) >> 10) { | ||
436 | case 0: | ||
437 | set_isa(c, MIPS_CPU_ISA_M64R1); | ||
438 | break; | ||
439 | case 1: | ||
440 | set_isa(c, MIPS_CPU_ISA_M64R2); | ||
441 | break; | ||
442 | case 2: | ||
443 | set_isa(c, MIPS_CPU_ISA_M64R6); | ||
444 | break; | ||
445 | default: | ||
446 | goto unknown; | ||
447 | } | ||
448 | break; | ||
449 | default: | ||
450 | goto unknown; | ||
451 | } | ||
452 | |||
453 | return config0 & MIPS_CONF_M; | ||
454 | |||
455 | unknown: | ||
456 | panic(unknown_isa, config0); | ||
457 | } | ||
458 | |||
459 | static inline unsigned int decode_config1(struct cpuinfo_mips *c) | ||
460 | { | ||
461 | unsigned int config1; | ||
462 | |||
463 | config1 = read_c0_config1(); | ||
464 | |||
465 | if (config1 & MIPS_CONF1_MD) | ||
466 | c->ases |= MIPS_ASE_MDMX; | ||
467 | if (config1 & MIPS_CONF1_PC) | ||
468 | c->options |= MIPS_CPU_PERF; | ||
469 | if (config1 & MIPS_CONF1_WR) | ||
470 | c->options |= MIPS_CPU_WATCH; | ||
471 | if (config1 & MIPS_CONF1_CA) | ||
472 | c->ases |= MIPS_ASE_MIPS16; | ||
473 | if (config1 & MIPS_CONF1_EP) | ||
474 | c->options |= MIPS_CPU_EJTAG; | ||
475 | if (config1 & MIPS_CONF1_FP) { | ||
476 | c->options |= MIPS_CPU_FPU; | ||
477 | c->options |= MIPS_CPU_32FPR; | ||
478 | } | ||
479 | if (cpu_has_tlb) { | ||
480 | c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1; | ||
481 | c->tlbsizevtlb = c->tlbsize; | ||
482 | c->tlbsizeftlbsets = 0; | ||
483 | } | ||
484 | |||
485 | return config1 & MIPS_CONF_M; | ||
486 | } | ||
487 | |||
488 | static inline unsigned int decode_config2(struct cpuinfo_mips *c) | ||
489 | { | ||
490 | unsigned int config2; | ||
491 | |||
492 | config2 = read_c0_config2(); | ||
493 | |||
494 | if (config2 & MIPS_CONF2_SL) | ||
495 | c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT; | ||
496 | |||
497 | return config2 & MIPS_CONF_M; | ||
498 | } | ||
499 | |||
500 | static inline unsigned int decode_config3(struct cpuinfo_mips *c) | ||
501 | { | ||
502 | unsigned int config3; | ||
503 | |||
504 | config3 = read_c0_config3(); | ||
505 | |||
506 | if (config3 & MIPS_CONF3_SM) { | ||
507 | c->ases |= MIPS_ASE_SMARTMIPS; | ||
508 | c->options |= MIPS_CPU_RIXI | MIPS_CPU_CTXTC; | ||
509 | } | ||
510 | if (config3 & MIPS_CONF3_RXI) | ||
511 | c->options |= MIPS_CPU_RIXI; | ||
512 | if (config3 & MIPS_CONF3_CTXTC) | ||
513 | c->options |= MIPS_CPU_CTXTC; | ||
514 | if (config3 & MIPS_CONF3_DSP) | ||
515 | c->ases |= MIPS_ASE_DSP; | ||
516 | if (config3 & MIPS_CONF3_DSP2P) { | ||
517 | c->ases |= MIPS_ASE_DSP2P; | ||
518 | if (cpu_has_mips_r6) | ||
519 | c->ases |= MIPS_ASE_DSP3; | ||
520 | } | ||
521 | if (config3 & MIPS_CONF3_VINT) | ||
522 | c->options |= MIPS_CPU_VINT; | ||
523 | if (config3 & MIPS_CONF3_VEIC) | ||
524 | c->options |= MIPS_CPU_VEIC; | ||
525 | if (config3 & MIPS_CONF3_LPA) | ||
526 | c->options |= MIPS_CPU_LPA; | ||
527 | if (config3 & MIPS_CONF3_MT) | ||
528 | c->ases |= MIPS_ASE_MIPSMT; | ||
529 | if (config3 & MIPS_CONF3_ULRI) | ||
530 | c->options |= MIPS_CPU_ULRI; | ||
531 | if (config3 & MIPS_CONF3_ISA) | ||
532 | c->options |= MIPS_CPU_MICROMIPS; | ||
533 | if (config3 & MIPS_CONF3_VZ) | ||
534 | c->ases |= MIPS_ASE_VZ; | ||
535 | if (config3 & MIPS_CONF3_SC) | ||
536 | c->options |= MIPS_CPU_SEGMENTS; | ||
537 | if (config3 & MIPS_CONF3_BI) | ||
538 | c->options |= MIPS_CPU_BADINSTR; | ||
539 | if (config3 & MIPS_CONF3_BP) | ||
540 | c->options |= MIPS_CPU_BADINSTRP; | ||
541 | if (config3 & MIPS_CONF3_MSA) | ||
542 | c->ases |= MIPS_ASE_MSA; | ||
543 | if (config3 & MIPS_CONF3_PW) { | ||
544 | c->htw_seq = 0; | ||
545 | c->options |= MIPS_CPU_HTW; | ||
546 | } | ||
547 | if (config3 & MIPS_CONF3_CDMM) | ||
548 | c->options |= MIPS_CPU_CDMM; | ||
549 | if (config3 & MIPS_CONF3_SP) | ||
550 | c->options |= MIPS_CPU_SP; | ||
551 | |||
552 | return config3 & MIPS_CONF_M; | ||
553 | } | ||
554 | |||
555 | static inline unsigned int decode_config4(struct cpuinfo_mips *c) | ||
556 | { | ||
557 | unsigned int config4; | ||
558 | unsigned int newcf4; | ||
559 | unsigned int mmuextdef; | ||
560 | unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE; | ||
561 | unsigned long asid_mask; | ||
562 | |||
563 | config4 = read_c0_config4(); | ||
564 | |||
565 | if (cpu_has_tlb) { | ||
566 | if (((config4 & MIPS_CONF4_IE) >> 29) == 2) | ||
567 | c->options |= MIPS_CPU_TLBINV; | ||
568 | |||
569 | /* | ||
570 | * R6 has dropped the MMUExtDef field from config4. | ||
571 | * On R6 the fields always describe the FTLB, and only if it is | ||
572 | * present according to Config.MT. | ||
573 | */ | ||
574 | if (!cpu_has_mips_r6) | ||
575 | mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF; | ||
576 | else if (cpu_has_ftlb) | ||
577 | mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT; | ||
578 | else | ||
579 | mmuextdef = 0; | ||
580 | |||
581 | switch (mmuextdef) { | ||
582 | case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT: | ||
583 | c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40; | ||
584 | c->tlbsizevtlb = c->tlbsize; | ||
585 | break; | ||
586 | case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT: | ||
587 | c->tlbsizevtlb += | ||
588 | ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> | ||
589 | MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40; | ||
590 | c->tlbsize = c->tlbsizevtlb; | ||
591 | ftlb_page = MIPS_CONF4_VFTLBPAGESIZE; | ||
592 | fallthrough; | ||
593 | case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT: | ||
594 | if (mips_ftlb_disabled) | ||
595 | break; | ||
596 | newcf4 = (config4 & ~ftlb_page) | | ||
597 | (page_size_ftlb(mmuextdef) << | ||
598 | MIPS_CONF4_FTLBPAGESIZE_SHIFT); | ||
599 | write_c0_config4(newcf4); | ||
600 | back_to_back_c0_hazard(); | ||
601 | config4 = read_c0_config4(); | ||
602 | if (config4 != newcf4) { | ||
603 | pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n", | ||
604 | PAGE_SIZE, config4); | ||
605 | /* Switch FTLB off */ | ||
606 | set_ftlb_enable(c, 0); | ||
607 | mips_ftlb_disabled = 1; | ||
608 | break; | ||
609 | } | ||
610 | c->tlbsizeftlbsets = 1 << | ||
611 | ((config4 & MIPS_CONF4_FTLBSETS) >> | ||
612 | MIPS_CONF4_FTLBSETS_SHIFT); | ||
613 | c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >> | ||
614 | MIPS_CONF4_FTLBWAYS_SHIFT) + 2; | ||
615 | c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets; | ||
616 | mips_has_ftlb_configured = 1; | ||
617 | break; | ||
618 | } | ||
619 | } | ||
620 | |||
621 | c->kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST) | ||
622 | >> MIPS_CONF4_KSCREXIST_SHIFT; | ||
623 | |||
624 | asid_mask = MIPS_ENTRYHI_ASID; | ||
625 | if (config4 & MIPS_CONF4_AE) | ||
626 | asid_mask |= MIPS_ENTRYHI_ASIDX; | ||
627 | set_cpu_asid_mask(c, asid_mask); | ||
628 | |||
629 | /* | ||
630 | * Warn if the computed ASID mask doesn't match the mask the kernel | ||
631 | * is built for. This may indicate either a serious problem or an | ||
632 | * easy optimisation opportunity, but either way should be addressed. | ||
633 | */ | ||
634 | WARN_ON(asid_mask != cpu_asid_mask(c)); | ||
635 | |||
636 | return config4 & MIPS_CONF_M; | ||
637 | } | ||
638 | |||
639 | static inline unsigned int decode_config5(struct cpuinfo_mips *c) | ||
640 | { | ||
641 | unsigned int config5, max_mmid_width; | ||
642 | unsigned long asid_mask; | ||
643 | |||
644 | config5 = read_c0_config5(); | ||
645 | config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE); | ||
646 | |||
647 | if (cpu_has_mips_r6) { | ||
648 | if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid) | ||
649 | config5 |= MIPS_CONF5_MI; | ||
650 | else | ||
651 | config5 &= ~MIPS_CONF5_MI; | ||
652 | } | ||
653 | |||
654 | write_c0_config5(config5); | ||
655 | |||
656 | if (config5 & MIPS_CONF5_EVA) | ||
657 | c->options |= MIPS_CPU_EVA; | ||
658 | if (config5 & MIPS_CONF5_MRP) | ||
659 | c->options |= MIPS_CPU_MAAR; | ||
660 | if (config5 & MIPS_CONF5_LLB) | ||
661 | c->options |= MIPS_CPU_RW_LLB; | ||
662 | if (config5 & MIPS_CONF5_MVH) | ||
663 | c->options |= MIPS_CPU_MVH; | ||
664 | if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP)) | ||
665 | c->options |= MIPS_CPU_VP; | ||
666 | if (config5 & MIPS_CONF5_CA2) | ||
667 | c->ases |= MIPS_ASE_MIPS16E2; | ||
668 | |||
669 | if (config5 & MIPS_CONF5_CRCP) | ||
670 | elf_hwcap |= HWCAP_MIPS_CRC32; | ||
671 | |||
672 | if (cpu_has_mips_r6) { | ||
673 | /* Ensure the write to config5 above takes effect */ | ||
674 | back_to_back_c0_hazard(); | ||
675 | |||
676 | /* Check whether we successfully enabled MMID support */ | ||
677 | config5 = read_c0_config5(); | ||
678 | if (config5 & MIPS_CONF5_MI) | ||
679 | c->options |= MIPS_CPU_MMID; | ||
680 | |||
681 | /* | ||
682 | * Warn if we've hardcoded cpu_has_mmid to a value unsuitable | ||
683 | * for the CPU we're running on, or if CPUs in an SMP system | ||
684 | * have inconsistent MMID support. | ||
685 | */ | ||
686 | WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI)); | ||
687 | |||
688 | if (cpu_has_mmid) { | ||
689 | write_c0_memorymapid(~0ul); | ||
690 | back_to_back_c0_hazard(); | ||
691 | asid_mask = read_c0_memorymapid(); | ||
692 | |||
693 | /* | ||
694 | * We maintain a bitmap to track MMID allocation, and | ||
695 | * need a sensible upper bound on the size of that | ||
696 | * bitmap. The initial CPU with MMID support (I6500) | ||
697 | * supports 16 bit MMIDs, which gives us an 8KiB | ||
698 | * bitmap. The architecture recommends that hardware | ||
699 | * support 32 bit MMIDs, which would give us a 512MiB | ||
700 | * bitmap - that's too big in most cases. | ||
701 | * | ||
702 | * Cap MMID width at 16 bits for now & we can revisit | ||
703 | * this if & when hardware supports anything wider. | ||
704 | */ | ||
705 | max_mmid_width = 16; | ||
706 | if (asid_mask > GENMASK(max_mmid_width - 1, 0)) { | ||
707 | pr_info("Capping MMID width at %d bits", | ||
708 | max_mmid_width); | ||
709 | asid_mask = GENMASK(max_mmid_width - 1, 0); | ||
710 | } | ||
711 | |||
712 | set_cpu_asid_mask(c, asid_mask); | ||
713 | } | ||
714 | } | ||
715 | |||
716 | return config5 & MIPS_CONF_M; | ||
717 | } | ||
718 | |||
719 | static void decode_configs(struct cpuinfo_mips *c) | ||
720 | { | ||
721 | int ok; | ||
722 | |||
723 | /* MIPS32 or MIPS64 compliant CPU. */ | ||
724 | c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | | ||
725 | MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; | ||
726 | |||
727 | c->scache.flags = MIPS_CACHE_NOT_PRESENT; | ||
728 | |||
729 | /* Enable FTLB if present and not disabled */ | ||
730 | set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN); | ||
731 | |||
732 | ok = decode_config0(c); /* Read Config registers. */ | ||
733 | BUG_ON(!ok); /* Arch spec violation! */ | ||
734 | if (ok) | ||
735 | ok = decode_config1(c); | ||
736 | if (ok) | ||
737 | ok = decode_config2(c); | ||
738 | if (ok) | ||
739 | ok = decode_config3(c); | ||
740 | if (ok) | ||
741 | ok = decode_config4(c); | ||
742 | if (ok) | ||
743 | ok = decode_config5(c); | ||
744 | |||
745 | /* Probe the EBase.WG bit */ | ||
746 | if (cpu_has_mips_r2_r6) { | ||
747 | u64 ebase; | ||
748 | unsigned int status; | ||
749 | |||
750 | /* {read,write}_c0_ebase_64() may be UNDEFINED prior to r6 */ | ||
751 | ebase = cpu_has_mips64r6 ? read_c0_ebase_64() | ||
752 | : (s32)read_c0_ebase(); | ||
753 | if (ebase & MIPS_EBASE_WG) { | ||
754 | /* WG bit already set, we can avoid the clumsy probe */ | ||
755 | c->options |= MIPS_CPU_EBASE_WG; | ||
756 | } else { | ||
757 | /* Its UNDEFINED to change EBase while BEV=0 */ | ||
758 | status = read_c0_status(); | ||
759 | write_c0_status(status | ST0_BEV); | ||
760 | irq_enable_hazard(); | ||
761 | /* | ||
762 | * On pre-r6 cores, this may well clobber the upper bits | ||
763 | * of EBase. This is hard to avoid without potentially | ||
764 | * hitting UNDEFINED dm*c0 behaviour if EBase is 32-bit. | ||
765 | */ | ||
766 | if (cpu_has_mips64r6) | ||
767 | write_c0_ebase_64(ebase | MIPS_EBASE_WG); | ||
768 | else | ||
769 | write_c0_ebase(ebase | MIPS_EBASE_WG); | ||
770 | back_to_back_c0_hazard(); | ||
771 | /* Restore BEV */ | ||
772 | write_c0_status(status); | ||
773 | if (read_c0_ebase() & MIPS_EBASE_WG) { | ||
774 | c->options |= MIPS_CPU_EBASE_WG; | ||
775 | write_c0_ebase(ebase); | ||
776 | } | ||
777 | } | ||
778 | } | ||
779 | |||
780 | /* configure the FTLB write probability */ | ||
781 | set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB); | ||
782 | |||
783 | mips_probe_watch_registers(c); | ||
784 | |||
785 | #ifndef CONFIG_MIPS_CPS | ||
786 | if (cpu_has_mips_r2_r6) { | ||
787 | unsigned int core; | ||
788 | |||
789 | core = get_ebase_cpunum(); | ||
790 | if (cpu_has_mipsmt) | ||
791 | core >>= fls(core_nvpes()) - 1; | ||
792 | cpu_set_core(c, core); | ||
793 | } | ||
794 | #endif | ||
795 | } | ||
796 | |||
797 | /* | ||
798 | * Probe for certain guest capabilities by writing config bits and reading back. | ||
799 | * Finally write back the original value. | ||
800 | */ | ||
801 | #define probe_gc0_config(name, maxconf, bits) \ | ||
802 | do { \ | ||
803 | unsigned int tmp; \ | ||
804 | tmp = read_gc0_##name(); \ | ||
805 | write_gc0_##name(tmp | (bits)); \ | ||
806 | back_to_back_c0_hazard(); \ | ||
807 | maxconf = read_gc0_##name(); \ | ||
808 | write_gc0_##name(tmp); \ | ||
809 | } while (0) | ||
810 | |||
811 | /* | ||
812 | * Probe for dynamic guest capabilities by changing certain config bits and | ||
813 | * reading back to see if they change. Finally write back the original value. | ||
814 | */ | ||
815 | #define probe_gc0_config_dyn(name, maxconf, dynconf, bits) \ | ||
816 | do { \ | ||
817 | maxconf = read_gc0_##name(); \ | ||
818 | write_gc0_##name(maxconf ^ (bits)); \ | ||
819 | back_to_back_c0_hazard(); \ | ||
820 | dynconf = maxconf ^ read_gc0_##name(); \ | ||
821 | write_gc0_##name(maxconf); \ | ||
822 | maxconf |= dynconf; \ | ||
823 | } while (0) | ||
824 | |||
825 | static inline unsigned int decode_guest_config0(struct cpuinfo_mips *c) | ||
826 | { | ||
827 | unsigned int config0; | ||
828 | |||
829 | probe_gc0_config(config, config0, MIPS_CONF_M); | ||
830 | |||
831 | if (config0 & MIPS_CONF_M) | ||
832 | c->guest.conf |= BIT(1); | ||
833 | return config0 & MIPS_CONF_M; | ||
834 | } | ||
835 | |||
836 | static inline unsigned int decode_guest_config1(struct cpuinfo_mips *c) | ||
837 | { | ||
838 | unsigned int config1, config1_dyn; | ||
839 | |||
840 | probe_gc0_config_dyn(config1, config1, config1_dyn, | ||
841 | MIPS_CONF_M | MIPS_CONF1_PC | MIPS_CONF1_WR | | ||
842 | MIPS_CONF1_FP); | ||
843 | |||
844 | if (config1 & MIPS_CONF1_FP) | ||
845 | c->guest.options |= MIPS_CPU_FPU; | ||
846 | if (config1_dyn & MIPS_CONF1_FP) | ||
847 | c->guest.options_dyn |= MIPS_CPU_FPU; | ||
848 | |||
849 | if (config1 & MIPS_CONF1_WR) | ||
850 | c->guest.options |= MIPS_CPU_WATCH; | ||
851 | if (config1_dyn & MIPS_CONF1_WR) | ||
852 | c->guest.options_dyn |= MIPS_CPU_WATCH; | ||
853 | |||
854 | if (config1 & MIPS_CONF1_PC) | ||
855 | c->guest.options |= MIPS_CPU_PERF; | ||
856 | if (config1_dyn & MIPS_CONF1_PC) | ||
857 | c->guest.options_dyn |= MIPS_CPU_PERF; | ||
858 | |||
859 | if (config1 & MIPS_CONF_M) | ||
860 | c->guest.conf |= BIT(2); | ||
861 | return config1 & MIPS_CONF_M; | ||
862 | } | ||
863 | |||
864 | static inline unsigned int decode_guest_config2(struct cpuinfo_mips *c) | ||
865 | { | ||
866 | unsigned int config2; | ||
867 | |||
868 | probe_gc0_config(config2, config2, MIPS_CONF_M); | ||
869 | |||
870 | if (config2 & MIPS_CONF_M) | ||
871 | c->guest.conf |= BIT(3); | ||
872 | return config2 & MIPS_CONF_M; | ||
873 | } | ||
874 | |||
875 | static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c) | ||
876 | { | ||
877 | unsigned int config3, config3_dyn; | ||
878 | |||
879 | probe_gc0_config_dyn(config3, config3, config3_dyn, | ||
880 | MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI | | ||
881 | MIPS_CONF3_CTXTC); | ||
882 | |||
883 | if (config3 & MIPS_CONF3_CTXTC) | ||
884 | c->guest.options |= MIPS_CPU_CTXTC; | ||
885 | if (config3_dyn & MIPS_CONF3_CTXTC) | ||
886 | c->guest.options_dyn |= MIPS_CPU_CTXTC; | ||
887 | |||
888 | if (config3 & MIPS_CONF3_PW) | ||
889 | c->guest.options |= MIPS_CPU_HTW; | ||
890 | |||
891 | if (config3 & MIPS_CONF3_ULRI) | ||
892 | c->guest.options |= MIPS_CPU_ULRI; | ||
893 | |||
894 | if (config3 & MIPS_CONF3_SC) | ||
895 | c->guest.options |= MIPS_CPU_SEGMENTS; | ||
896 | |||
897 | if (config3 & MIPS_CONF3_BI) | ||
898 | c->guest.options |= MIPS_CPU_BADINSTR; | ||
899 | if (config3 & MIPS_CONF3_BP) | ||
900 | c->guest.options |= MIPS_CPU_BADINSTRP; | ||
901 | |||
902 | if (config3 & MIPS_CONF3_MSA) | ||
903 | c->guest.ases |= MIPS_ASE_MSA; | ||
904 | if (config3_dyn & MIPS_CONF3_MSA) | ||
905 | c->guest.ases_dyn |= MIPS_ASE_MSA; | ||
906 | |||
907 | if (config3 & MIPS_CONF_M) | ||
908 | c->guest.conf |= BIT(4); | ||
909 | return config3 & MIPS_CONF_M; | ||
910 | } | ||
911 | |||
912 | static inline unsigned int decode_guest_config4(struct cpuinfo_mips *c) | ||
913 | { | ||
914 | unsigned int config4; | ||
915 | |||
916 | probe_gc0_config(config4, config4, | ||
917 | MIPS_CONF_M | MIPS_CONF4_KSCREXIST); | ||
918 | |||
919 | c->guest.kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST) | ||
920 | >> MIPS_CONF4_KSCREXIST_SHIFT; | ||
921 | |||
922 | if (config4 & MIPS_CONF_M) | ||
923 | c->guest.conf |= BIT(5); | ||
924 | return config4 & MIPS_CONF_M; | ||
925 | } | ||
926 | |||
927 | static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c) | ||
928 | { | ||
929 | unsigned int config5, config5_dyn; | ||
930 | |||
931 | probe_gc0_config_dyn(config5, config5, config5_dyn, | ||
932 | MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP); | ||
933 | |||
934 | if (config5 & MIPS_CONF5_MRP) | ||
935 | c->guest.options |= MIPS_CPU_MAAR; | ||
936 | if (config5_dyn & MIPS_CONF5_MRP) | ||
937 | c->guest.options_dyn |= MIPS_CPU_MAAR; | ||
938 | |||
939 | if (config5 & MIPS_CONF5_LLB) | ||
940 | c->guest.options |= MIPS_CPU_RW_LLB; | ||
941 | |||
942 | if (config5 & MIPS_CONF5_MVH) | ||
943 | c->guest.options |= MIPS_CPU_MVH; | ||
944 | |||
945 | if (config5 & MIPS_CONF_M) | ||
946 | c->guest.conf |= BIT(6); | ||
947 | return config5 & MIPS_CONF_M; | ||
948 | } | ||
949 | |||
950 | static inline void decode_guest_configs(struct cpuinfo_mips *c) | ||
951 | { | ||
952 | unsigned int ok; | ||
953 | |||
954 | ok = decode_guest_config0(c); | ||
955 | if (ok) | ||
956 | ok = decode_guest_config1(c); | ||
957 | if (ok) | ||
958 | ok = decode_guest_config2(c); | ||
959 | if (ok) | ||
960 | ok = decode_guest_config3(c); | ||
961 | if (ok) | ||
962 | ok = decode_guest_config4(c); | ||
963 | if (ok) | ||
964 | decode_guest_config5(c); | ||
965 | } | ||
966 | |||
967 | static inline void cpu_probe_guestctl0(struct cpuinfo_mips *c) | ||
968 | { | ||
969 | unsigned int guestctl0, temp; | ||
970 | |||
971 | guestctl0 = read_c0_guestctl0(); | ||
972 | |||
973 | if (guestctl0 & MIPS_GCTL0_G0E) | ||
974 | c->options |= MIPS_CPU_GUESTCTL0EXT; | ||
975 | if (guestctl0 & MIPS_GCTL0_G1) | ||
976 | c->options |= MIPS_CPU_GUESTCTL1; | ||
977 | if (guestctl0 & MIPS_GCTL0_G2) | ||
978 | c->options |= MIPS_CPU_GUESTCTL2; | ||
979 | if (!(guestctl0 & MIPS_GCTL0_RAD)) { | ||
980 | c->options |= MIPS_CPU_GUESTID; | ||
981 | |||
982 | /* | ||
983 | * Probe for Direct Root to Guest (DRG). Set GuestCtl1.RID = 0 | ||
984 | * first, otherwise all data accesses will be fully virtualised | ||
985 | * as if they were performed by guest mode. | ||
986 | */ | ||
987 | write_c0_guestctl1(0); | ||
988 | tlbw_use_hazard(); | ||
989 | |||
990 | write_c0_guestctl0(guestctl0 | MIPS_GCTL0_DRG); | ||
991 | back_to_back_c0_hazard(); | ||
992 | temp = read_c0_guestctl0(); | ||
993 | |||
994 | if (temp & MIPS_GCTL0_DRG) { | ||
995 | write_c0_guestctl0(guestctl0); | ||
996 | c->options |= MIPS_CPU_DRG; | ||
997 | } | ||
998 | } | ||
999 | } | ||
1000 | |||
1001 | static inline void cpu_probe_guestctl1(struct cpuinfo_mips *c) | ||
1002 | { | ||
1003 | if (cpu_has_guestid) { | ||
1004 | /* determine the number of bits of GuestID available */ | ||
1005 | write_c0_guestctl1(MIPS_GCTL1_ID); | ||
1006 | back_to_back_c0_hazard(); | ||
1007 | c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID) | ||
1008 | >> MIPS_GCTL1_ID_SHIFT; | ||
1009 | write_c0_guestctl1(0); | ||
1010 | } | ||
1011 | } | ||
1012 | |||
1013 | static inline void cpu_probe_gtoffset(struct cpuinfo_mips *c) | ||
1014 | { | ||
1015 | /* determine the number of bits of GTOffset available */ | ||
1016 | write_c0_gtoffset(0xffffffff); | ||
1017 | back_to_back_c0_hazard(); | ||
1018 | c->gtoffset_mask = read_c0_gtoffset(); | ||
1019 | write_c0_gtoffset(0); | ||
1020 | } | ||
1021 | |||
1022 | static inline void cpu_probe_vz(struct cpuinfo_mips *c) | ||
1023 | { | ||
1024 | cpu_probe_guestctl0(c); | ||
1025 | if (cpu_has_guestctl1) | ||
1026 | cpu_probe_guestctl1(c); | ||
1027 | |||
1028 | cpu_probe_gtoffset(c); | ||
1029 | |||
1030 | decode_guest_configs(c); | ||
1031 | } | ||
1032 | |||
1033 | #define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \ | ||
1034 | | MIPS_CPU_COUNTER) | ||
1035 | |||
1036 | static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu) | ||
1037 | { | ||
1038 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1039 | case PRID_IMP_R2000: | ||
1040 | c->cputype = CPU_R2000; | ||
1041 | __cpu_name[cpu] = "R2000"; | ||
1042 | c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; | ||
1043 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | | ||
1044 | MIPS_CPU_NOFPUEX; | ||
1045 | if (__cpu_has_fpu()) | ||
1046 | c->options |= MIPS_CPU_FPU; | ||
1047 | c->tlbsize = 64; | ||
1048 | break; | ||
1049 | case PRID_IMP_R3000: | ||
1050 | if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) { | ||
1051 | if (cpu_has_confreg()) { | ||
1052 | c->cputype = CPU_R3081E; | ||
1053 | __cpu_name[cpu] = "R3081"; | ||
1054 | } else { | ||
1055 | c->cputype = CPU_R3000A; | ||
1056 | __cpu_name[cpu] = "R3000A"; | ||
1057 | } | ||
1058 | } else { | ||
1059 | c->cputype = CPU_R3000; | ||
1060 | __cpu_name[cpu] = "R3000"; | ||
1061 | } | ||
1062 | c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; | ||
1063 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | | ||
1064 | MIPS_CPU_NOFPUEX; | ||
1065 | if (__cpu_has_fpu()) | ||
1066 | c->options |= MIPS_CPU_FPU; | ||
1067 | c->tlbsize = 64; | ||
1068 | break; | ||
1069 | case PRID_IMP_R4000: | ||
1070 | if (read_c0_config() & CONF_SC) { | ||
1071 | if ((c->processor_id & PRID_REV_MASK) >= | ||
1072 | PRID_REV_R4400) { | ||
1073 | c->cputype = CPU_R4400PC; | ||
1074 | __cpu_name[cpu] = "R4400PC"; | ||
1075 | } else { | ||
1076 | c->cputype = CPU_R4000PC; | ||
1077 | __cpu_name[cpu] = "R4000PC"; | ||
1078 | } | ||
1079 | } else { | ||
1080 | int cca = read_c0_config() & CONF_CM_CMASK; | ||
1081 | int mc; | ||
1082 | |||
1083 | /* | ||
1084 | * SC and MC versions can't be reliably told apart, | ||
1085 | * but only the latter support coherent caching | ||
1086 | * modes so assume the firmware has set the KSEG0 | ||
1087 | * coherency attribute reasonably (if uncached, we | ||
1088 | * assume SC). | ||
1089 | */ | ||
1090 | switch (cca) { | ||
1091 | case CONF_CM_CACHABLE_CE: | ||
1092 | case CONF_CM_CACHABLE_COW: | ||
1093 | case CONF_CM_CACHABLE_CUW: | ||
1094 | mc = 1; | ||
1095 | break; | ||
1096 | default: | ||
1097 | mc = 0; | ||
1098 | break; | ||
1099 | } | ||
1100 | if ((c->processor_id & PRID_REV_MASK) >= | ||
1101 | PRID_REV_R4400) { | ||
1102 | c->cputype = mc ? CPU_R4400MC : CPU_R4400SC; | ||
1103 | __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC"; | ||
1104 | } else { | ||
1105 | c->cputype = mc ? CPU_R4000MC : CPU_R4000SC; | ||
1106 | __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC"; | ||
1107 | } | ||
1108 | } | ||
1109 | |||
1110 | set_isa(c, MIPS_CPU_ISA_III); | ||
1111 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1112 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1113 | MIPS_CPU_WATCH | MIPS_CPU_VCE | | ||
1114 | MIPS_CPU_LLSC; | ||
1115 | c->tlbsize = 48; | ||
1116 | break; | ||
1117 | case PRID_IMP_VR41XX: | ||
1118 | set_isa(c, MIPS_CPU_ISA_III); | ||
1119 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1120 | c->options = R4K_OPTS; | ||
1121 | c->tlbsize = 32; | ||
1122 | switch (c->processor_id & 0xf0) { | ||
1123 | case PRID_REV_VR4111: | ||
1124 | c->cputype = CPU_VR4111; | ||
1125 | __cpu_name[cpu] = "NEC VR4111"; | ||
1126 | break; | ||
1127 | case PRID_REV_VR4121: | ||
1128 | c->cputype = CPU_VR4121; | ||
1129 | __cpu_name[cpu] = "NEC VR4121"; | ||
1130 | break; | ||
1131 | case PRID_REV_VR4122: | ||
1132 | if ((c->processor_id & 0xf) < 0x3) { | ||
1133 | c->cputype = CPU_VR4122; | ||
1134 | __cpu_name[cpu] = "NEC VR4122"; | ||
1135 | } else { | ||
1136 | c->cputype = CPU_VR4181A; | ||
1137 | __cpu_name[cpu] = "NEC VR4181A"; | ||
1138 | } | ||
1139 | break; | ||
1140 | case PRID_REV_VR4130: | ||
1141 | if ((c->processor_id & 0xf) < 0x4) { | ||
1142 | c->cputype = CPU_VR4131; | ||
1143 | __cpu_name[cpu] = "NEC VR4131"; | ||
1144 | } else { | ||
1145 | c->cputype = CPU_VR4133; | ||
1146 | c->options |= MIPS_CPU_LLSC; | ||
1147 | __cpu_name[cpu] = "NEC VR4133"; | ||
1148 | } | ||
1149 | break; | ||
1150 | default: | ||
1151 | printk(KERN_INFO "Unexpected CPU of NEC VR4100 series\n"); | ||
1152 | c->cputype = CPU_VR41XX; | ||
1153 | __cpu_name[cpu] = "NEC Vr41xx"; | ||
1154 | break; | ||
1155 | } | ||
1156 | break; | ||
1157 | case PRID_IMP_R4600: | ||
1158 | c->cputype = CPU_R4600; | ||
1159 | __cpu_name[cpu] = "R4600"; | ||
1160 | set_isa(c, MIPS_CPU_ISA_III); | ||
1161 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1162 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1163 | MIPS_CPU_LLSC; | ||
1164 | c->tlbsize = 48; | ||
1165 | break; | ||
1166 | #if 0 | ||
1167 | case PRID_IMP_R4650: | ||
1168 | /* | ||
1169 | * This processor doesn't have an MMU, so it's not | ||
1170 | * "real easy" to run Linux on it. It is left purely | ||
1171 | * for documentation. Commented out because it shares | ||
1172 | * it's c0_prid id number with the TX3900. | ||
1173 | */ | ||
1174 | c->cputype = CPU_R4650; | ||
1175 | __cpu_name[cpu] = "R4650"; | ||
1176 | set_isa(c, MIPS_CPU_ISA_III); | ||
1177 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1178 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; | ||
1179 | c->tlbsize = 48; | ||
1180 | break; | ||
1181 | #endif | ||
1182 | case PRID_IMP_TX39: | ||
1183 | c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS; | ||
1184 | c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; | ||
1185 | |||
1186 | if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { | ||
1187 | c->cputype = CPU_TX3927; | ||
1188 | __cpu_name[cpu] = "TX3927"; | ||
1189 | c->tlbsize = 64; | ||
1190 | } else { | ||
1191 | switch (c->processor_id & PRID_REV_MASK) { | ||
1192 | case PRID_REV_TX3912: | ||
1193 | c->cputype = CPU_TX3912; | ||
1194 | __cpu_name[cpu] = "TX3912"; | ||
1195 | c->tlbsize = 32; | ||
1196 | break; | ||
1197 | case PRID_REV_TX3922: | ||
1198 | c->cputype = CPU_TX3922; | ||
1199 | __cpu_name[cpu] = "TX3922"; | ||
1200 | c->tlbsize = 64; | ||
1201 | break; | ||
1202 | } | ||
1203 | } | ||
1204 | break; | ||
1205 | case PRID_IMP_R4700: | ||
1206 | c->cputype = CPU_R4700; | ||
1207 | __cpu_name[cpu] = "R4700"; | ||
1208 | set_isa(c, MIPS_CPU_ISA_III); | ||
1209 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1210 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1211 | MIPS_CPU_LLSC; | ||
1212 | c->tlbsize = 48; | ||
1213 | break; | ||
1214 | case PRID_IMP_TX49: | ||
1215 | c->cputype = CPU_TX49XX; | ||
1216 | __cpu_name[cpu] = "R49XX"; | ||
1217 | set_isa(c, MIPS_CPU_ISA_III); | ||
1218 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1219 | c->options = R4K_OPTS | MIPS_CPU_LLSC; | ||
1220 | if (!(c->processor_id & 0x08)) | ||
1221 | c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR; | ||
1222 | c->tlbsize = 48; | ||
1223 | break; | ||
1224 | case PRID_IMP_R5000: | ||
1225 | c->cputype = CPU_R5000; | ||
1226 | __cpu_name[cpu] = "R5000"; | ||
1227 | set_isa(c, MIPS_CPU_ISA_IV); | ||
1228 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1229 | MIPS_CPU_LLSC; | ||
1230 | c->tlbsize = 48; | ||
1231 | break; | ||
1232 | case PRID_IMP_R5500: | ||
1233 | c->cputype = CPU_R5500; | ||
1234 | __cpu_name[cpu] = "R5500"; | ||
1235 | set_isa(c, MIPS_CPU_ISA_IV); | ||
1236 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1237 | MIPS_CPU_WATCH | MIPS_CPU_LLSC; | ||
1238 | c->tlbsize = 48; | ||
1239 | break; | ||
1240 | case PRID_IMP_NEVADA: | ||
1241 | c->cputype = CPU_NEVADA; | ||
1242 | __cpu_name[cpu] = "Nevada"; | ||
1243 | set_isa(c, MIPS_CPU_ISA_IV); | ||
1244 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1245 | MIPS_CPU_DIVEC | MIPS_CPU_LLSC; | ||
1246 | c->tlbsize = 48; | ||
1247 | break; | ||
1248 | case PRID_IMP_RM7000: | ||
1249 | c->cputype = CPU_RM7000; | ||
1250 | __cpu_name[cpu] = "RM7000"; | ||
1251 | set_isa(c, MIPS_CPU_ISA_IV); | ||
1252 | c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1253 | MIPS_CPU_LLSC; | ||
1254 | /* | ||
1255 | * Undocumented RM7000: Bit 29 in the info register of | ||
1256 | * the RM7000 v2.0 indicates if the TLB has 48 or 64 | ||
1257 | * entries. | ||
1258 | * | ||
1259 | * 29 1 => 64 entry JTLB | ||
1260 | * 0 => 48 entry JTLB | ||
1261 | */ | ||
1262 | c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48; | ||
1263 | break; | ||
1264 | case PRID_IMP_R10000: | ||
1265 | c->cputype = CPU_R10000; | ||
1266 | __cpu_name[cpu] = "R10000"; | ||
1267 | set_isa(c, MIPS_CPU_ISA_IV); | ||
1268 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | | ||
1269 | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1270 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | | ||
1271 | MIPS_CPU_LLSC; | ||
1272 | c->tlbsize = 64; | ||
1273 | break; | ||
1274 | case PRID_IMP_R12000: | ||
1275 | c->cputype = CPU_R12000; | ||
1276 | __cpu_name[cpu] = "R12000"; | ||
1277 | set_isa(c, MIPS_CPU_ISA_IV); | ||
1278 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | | ||
1279 | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1280 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | | ||
1281 | MIPS_CPU_LLSC; | ||
1282 | c->tlbsize = 64; | ||
1283 | write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST); | ||
1284 | break; | ||
1285 | case PRID_IMP_R14000: | ||
1286 | if (((c->processor_id >> 4) & 0x0f) > 2) { | ||
1287 | c->cputype = CPU_R16000; | ||
1288 | __cpu_name[cpu] = "R16000"; | ||
1289 | } else { | ||
1290 | c->cputype = CPU_R14000; | ||
1291 | __cpu_name[cpu] = "R14000"; | ||
1292 | } | ||
1293 | set_isa(c, MIPS_CPU_ISA_IV); | ||
1294 | c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | | ||
1295 | MIPS_CPU_FPU | MIPS_CPU_32FPR | | ||
1296 | MIPS_CPU_COUNTER | MIPS_CPU_WATCH | | ||
1297 | MIPS_CPU_LLSC; | ||
1298 | c->tlbsize = 64; | ||
1299 | write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST); | ||
1300 | break; | ||
1301 | case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */ | ||
1302 | switch (c->processor_id & PRID_REV_MASK) { | ||
1303 | case PRID_REV_LOONGSON2E: | ||
1304 | c->cputype = CPU_LOONGSON2EF; | ||
1305 | __cpu_name[cpu] = "ICT Loongson-2"; | ||
1306 | set_elf_platform(cpu, "loongson2e"); | ||
1307 | set_isa(c, MIPS_CPU_ISA_III); | ||
1308 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1309 | break; | ||
1310 | case PRID_REV_LOONGSON2F: | ||
1311 | c->cputype = CPU_LOONGSON2EF; | ||
1312 | __cpu_name[cpu] = "ICT Loongson-2"; | ||
1313 | set_elf_platform(cpu, "loongson2f"); | ||
1314 | set_isa(c, MIPS_CPU_ISA_III); | ||
1315 | c->fpu_msk31 |= FPU_CSR_CONDX; | ||
1316 | break; | ||
1317 | case PRID_REV_LOONGSON3A_R1: | ||
1318 | c->cputype = CPU_LOONGSON64; | ||
1319 | __cpu_name[cpu] = "ICT Loongson-3"; | ||
1320 | set_elf_platform(cpu, "loongson3a"); | ||
1321 | set_isa(c, MIPS_CPU_ISA_M64R1); | ||
1322 | c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | | ||
1323 | MIPS_ASE_LOONGSON_EXT); | ||
1324 | break; | ||
1325 | case PRID_REV_LOONGSON3B_R1: | ||
1326 | case PRID_REV_LOONGSON3B_R2: | ||
1327 | c->cputype = CPU_LOONGSON64; | ||
1328 | __cpu_name[cpu] = "ICT Loongson-3"; | ||
1329 | set_elf_platform(cpu, "loongson3b"); | ||
1330 | set_isa(c, MIPS_CPU_ISA_M64R1); | ||
1331 | c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | | ||
1332 | MIPS_ASE_LOONGSON_EXT); | ||
1333 | break; | ||
1334 | } | ||
1335 | |||
1336 | c->options = R4K_OPTS | | ||
1337 | MIPS_CPU_FPU | MIPS_CPU_LLSC | | ||
1338 | MIPS_CPU_32FPR; | ||
1339 | c->tlbsize = 64; | ||
1340 | set_cpu_asid_mask(c, MIPS_ENTRYHI_ASID); | ||
1341 | c->writecombine = _CACHE_UNCACHED_ACCELERATED; | ||
1342 | break; | ||
1343 | case PRID_IMP_LOONGSON_32: /* Loongson-1 */ | ||
1344 | decode_configs(c); | ||
1345 | |||
1346 | c->cputype = CPU_LOONGSON32; | ||
1347 | |||
1348 | switch (c->processor_id & PRID_REV_MASK) { | ||
1349 | case PRID_REV_LOONGSON1B: | ||
1350 | __cpu_name[cpu] = "Loongson 1B"; | ||
1351 | break; | ||
1352 | } | ||
1353 | |||
1354 | break; | ||
1355 | } | ||
1356 | } | ||
1357 | |||
1358 | static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu) | ||
1359 | { | ||
1360 | c->writecombine = _CACHE_UNCACHED_ACCELERATED; | ||
1361 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1362 | case PRID_IMP_QEMU_GENERIC: | ||
1363 | c->writecombine = _CACHE_UNCACHED; | ||
1364 | c->cputype = CPU_QEMU_GENERIC; | ||
1365 | __cpu_name[cpu] = "MIPS GENERIC QEMU"; | ||
1366 | break; | ||
1367 | case PRID_IMP_4KC: | ||
1368 | c->cputype = CPU_4KC; | ||
1369 | c->writecombine = _CACHE_UNCACHED; | ||
1370 | __cpu_name[cpu] = "MIPS 4Kc"; | ||
1371 | break; | ||
1372 | case PRID_IMP_4KEC: | ||
1373 | case PRID_IMP_4KECR2: | ||
1374 | c->cputype = CPU_4KEC; | ||
1375 | c->writecombine = _CACHE_UNCACHED; | ||
1376 | __cpu_name[cpu] = "MIPS 4KEc"; | ||
1377 | break; | ||
1378 | case PRID_IMP_4KSC: | ||
1379 | case PRID_IMP_4KSD: | ||
1380 | c->cputype = CPU_4KSC; | ||
1381 | c->writecombine = _CACHE_UNCACHED; | ||
1382 | __cpu_name[cpu] = "MIPS 4KSc"; | ||
1383 | break; | ||
1384 | case PRID_IMP_5KC: | ||
1385 | c->cputype = CPU_5KC; | ||
1386 | c->writecombine = _CACHE_UNCACHED; | ||
1387 | __cpu_name[cpu] = "MIPS 5Kc"; | ||
1388 | break; | ||
1389 | case PRID_IMP_5KE: | ||
1390 | c->cputype = CPU_5KE; | ||
1391 | c->writecombine = _CACHE_UNCACHED; | ||
1392 | __cpu_name[cpu] = "MIPS 5KE"; | ||
1393 | break; | ||
1394 | case PRID_IMP_20KC: | ||
1395 | c->cputype = CPU_20KC; | ||
1396 | c->writecombine = _CACHE_UNCACHED; | ||
1397 | __cpu_name[cpu] = "MIPS 20Kc"; | ||
1398 | break; | ||
1399 | case PRID_IMP_24K: | ||
1400 | c->cputype = CPU_24K; | ||
1401 | c->writecombine = _CACHE_UNCACHED; | ||
1402 | __cpu_name[cpu] = "MIPS 24Kc"; | ||
1403 | break; | ||
1404 | case PRID_IMP_24KE: | ||
1405 | c->cputype = CPU_24K; | ||
1406 | c->writecombine = _CACHE_UNCACHED; | ||
1407 | __cpu_name[cpu] = "MIPS 24KEc"; | ||
1408 | break; | ||
1409 | case PRID_IMP_25KF: | ||
1410 | c->cputype = CPU_25KF; | ||
1411 | c->writecombine = _CACHE_UNCACHED; | ||
1412 | __cpu_name[cpu] = "MIPS 25Kc"; | ||
1413 | break; | ||
1414 | case PRID_IMP_34K: | ||
1415 | c->cputype = CPU_34K; | ||
1416 | c->writecombine = _CACHE_UNCACHED; | ||
1417 | __cpu_name[cpu] = "MIPS 34Kc"; | ||
1418 | cpu_set_mt_per_tc_perf(c); | ||
1419 | break; | ||
1420 | case PRID_IMP_74K: | ||
1421 | c->cputype = CPU_74K; | ||
1422 | c->writecombine = _CACHE_UNCACHED; | ||
1423 | __cpu_name[cpu] = "MIPS 74Kc"; | ||
1424 | break; | ||
1425 | case PRID_IMP_M14KC: | ||
1426 | c->cputype = CPU_M14KC; | ||
1427 | c->writecombine = _CACHE_UNCACHED; | ||
1428 | __cpu_name[cpu] = "MIPS M14Kc"; | ||
1429 | break; | ||
1430 | case PRID_IMP_M14KEC: | ||
1431 | c->cputype = CPU_M14KEC; | ||
1432 | c->writecombine = _CACHE_UNCACHED; | ||
1433 | __cpu_name[cpu] = "MIPS M14KEc"; | ||
1434 | break; | ||
1435 | case PRID_IMP_1004K: | ||
1436 | c->cputype = CPU_1004K; | ||
1437 | c->writecombine = _CACHE_UNCACHED; | ||
1438 | __cpu_name[cpu] = "MIPS 1004Kc"; | ||
1439 | cpu_set_mt_per_tc_perf(c); | ||
1440 | break; | ||
1441 | case PRID_IMP_1074K: | ||
1442 | c->cputype = CPU_1074K; | ||
1443 | c->writecombine = _CACHE_UNCACHED; | ||
1444 | __cpu_name[cpu] = "MIPS 1074Kc"; | ||
1445 | break; | ||
1446 | case PRID_IMP_INTERAPTIV_UP: | ||
1447 | c->cputype = CPU_INTERAPTIV; | ||
1448 | __cpu_name[cpu] = "MIPS interAptiv"; | ||
1449 | cpu_set_mt_per_tc_perf(c); | ||
1450 | break; | ||
1451 | case PRID_IMP_INTERAPTIV_MP: | ||
1452 | c->cputype = CPU_INTERAPTIV; | ||
1453 | __cpu_name[cpu] = "MIPS interAptiv (multi)"; | ||
1454 | cpu_set_mt_per_tc_perf(c); | ||
1455 | break; | ||
1456 | case PRID_IMP_PROAPTIV_UP: | ||
1457 | c->cputype = CPU_PROAPTIV; | ||
1458 | __cpu_name[cpu] = "MIPS proAptiv"; | ||
1459 | break; | ||
1460 | case PRID_IMP_PROAPTIV_MP: | ||
1461 | c->cputype = CPU_PROAPTIV; | ||
1462 | __cpu_name[cpu] = "MIPS proAptiv (multi)"; | ||
1463 | break; | ||
1464 | case PRID_IMP_P5600: | ||
1465 | c->cputype = CPU_P5600; | ||
1466 | __cpu_name[cpu] = "MIPS P5600"; | ||
1467 | break; | ||
1468 | case PRID_IMP_P6600: | ||
1469 | c->cputype = CPU_P6600; | ||
1470 | __cpu_name[cpu] = "MIPS P6600"; | ||
1471 | break; | ||
1472 | case PRID_IMP_I6400: | ||
1473 | c->cputype = CPU_I6400; | ||
1474 | __cpu_name[cpu] = "MIPS I6400"; | ||
1475 | break; | ||
1476 | case PRID_IMP_I6500: | ||
1477 | c->cputype = CPU_I6500; | ||
1478 | __cpu_name[cpu] = "MIPS I6500"; | ||
1479 | break; | ||
1480 | case PRID_IMP_M5150: | ||
1481 | c->cputype = CPU_M5150; | ||
1482 | __cpu_name[cpu] = "MIPS M5150"; | ||
1483 | break; | ||
1484 | case PRID_IMP_M6250: | ||
1485 | c->cputype = CPU_M6250; | ||
1486 | __cpu_name[cpu] = "MIPS M6250"; | ||
1487 | break; | ||
1488 | } | ||
1489 | |||
1490 | decode_configs(c); | ||
1491 | |||
1492 | spram_config(); | ||
1493 | |||
1494 | mm_config(c); | ||
1495 | |||
1496 | switch (__get_cpu_type(c->cputype)) { | ||
1497 | case CPU_M5150: | ||
1498 | case CPU_P5600: | ||
1499 | set_isa(c, MIPS_CPU_ISA_M32R5); | ||
1500 | break; | ||
1501 | case CPU_I6500: | ||
1502 | c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES; | ||
1503 | fallthrough; | ||
1504 | case CPU_I6400: | ||
1505 | c->options |= MIPS_CPU_SHARED_FTLB_RAM; | ||
1506 | fallthrough; | ||
1507 | default: | ||
1508 | break; | ||
1509 | } | ||
1510 | |||
1511 | /* Recent MIPS cores use the implementation-dependent ExcCode 16 for | ||
1512 | * cache/FTLB parity exceptions. | ||
1513 | */ | ||
1514 | switch (__get_cpu_type(c->cputype)) { | ||
1515 | case CPU_PROAPTIV: | ||
1516 | case CPU_P5600: | ||
1517 | case CPU_P6600: | ||
1518 | case CPU_I6400: | ||
1519 | case CPU_I6500: | ||
1520 | c->options |= MIPS_CPU_FTLBPAREX; | ||
1521 | break; | ||
1522 | } | ||
1523 | } | ||
1524 | |||
1525 | static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu) | ||
1526 | { | ||
1527 | decode_configs(c); | ||
1528 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1529 | case PRID_IMP_AU1_REV1: | ||
1530 | case PRID_IMP_AU1_REV2: | ||
1531 | c->cputype = CPU_ALCHEMY; | ||
1532 | switch ((c->processor_id >> 24) & 0xff) { | ||
1533 | case 0: | ||
1534 | __cpu_name[cpu] = "Au1000"; | ||
1535 | break; | ||
1536 | case 1: | ||
1537 | __cpu_name[cpu] = "Au1500"; | ||
1538 | break; | ||
1539 | case 2: | ||
1540 | __cpu_name[cpu] = "Au1100"; | ||
1541 | break; | ||
1542 | case 3: | ||
1543 | __cpu_name[cpu] = "Au1550"; | ||
1544 | break; | ||
1545 | case 4: | ||
1546 | __cpu_name[cpu] = "Au1200"; | ||
1547 | if ((c->processor_id & PRID_REV_MASK) == 2) | ||
1548 | __cpu_name[cpu] = "Au1250"; | ||
1549 | break; | ||
1550 | case 5: | ||
1551 | __cpu_name[cpu] = "Au1210"; | ||
1552 | break; | ||
1553 | default: | ||
1554 | __cpu_name[cpu] = "Au1xxx"; | ||
1555 | break; | ||
1556 | } | ||
1557 | break; | ||
1558 | } | ||
1559 | } | ||
1560 | |||
1561 | static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu) | ||
1562 | { | ||
1563 | decode_configs(c); | ||
1564 | |||
1565 | c->writecombine = _CACHE_UNCACHED_ACCELERATED; | ||
1566 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1567 | case PRID_IMP_SB1: | ||
1568 | c->cputype = CPU_SB1; | ||
1569 | __cpu_name[cpu] = "SiByte SB1"; | ||
1570 | /* FPU in pass1 is known to have issues. */ | ||
1571 | if ((c->processor_id & PRID_REV_MASK) < 0x02) | ||
1572 | c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR); | ||
1573 | break; | ||
1574 | case PRID_IMP_SB1A: | ||
1575 | c->cputype = CPU_SB1A; | ||
1576 | __cpu_name[cpu] = "SiByte SB1A"; | ||
1577 | break; | ||
1578 | } | ||
1579 | } | ||
1580 | |||
1581 | static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu) | ||
1582 | { | ||
1583 | decode_configs(c); | ||
1584 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1585 | case PRID_IMP_SR71000: | ||
1586 | c->cputype = CPU_SR71000; | ||
1587 | __cpu_name[cpu] = "Sandcraft SR71000"; | ||
1588 | c->scache.ways = 8; | ||
1589 | c->tlbsize = 64; | ||
1590 | break; | ||
1591 | } | ||
1592 | } | ||
1593 | |||
1594 | static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu) | ||
1595 | { | ||
1596 | decode_configs(c); | ||
1597 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1598 | case PRID_IMP_PR4450: | ||
1599 | c->cputype = CPU_PR4450; | ||
1600 | __cpu_name[cpu] = "Philips PR4450"; | ||
1601 | set_isa(c, MIPS_CPU_ISA_M32R1); | ||
1602 | break; | ||
1603 | } | ||
1604 | } | ||
1605 | |||
1606 | static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu) | ||
1607 | { | ||
1608 | decode_configs(c); | ||
1609 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1610 | case PRID_IMP_BMIPS32_REV4: | ||
1611 | case PRID_IMP_BMIPS32_REV8: | ||
1612 | c->cputype = CPU_BMIPS32; | ||
1613 | __cpu_name[cpu] = "Broadcom BMIPS32"; | ||
1614 | set_elf_platform(cpu, "bmips32"); | ||
1615 | break; | ||
1616 | case PRID_IMP_BMIPS3300: | ||
1617 | case PRID_IMP_BMIPS3300_ALT: | ||
1618 | case PRID_IMP_BMIPS3300_BUG: | ||
1619 | c->cputype = CPU_BMIPS3300; | ||
1620 | __cpu_name[cpu] = "Broadcom BMIPS3300"; | ||
1621 | set_elf_platform(cpu, "bmips3300"); | ||
1622 | break; | ||
1623 | case PRID_IMP_BMIPS43XX: { | ||
1624 | int rev = c->processor_id & PRID_REV_MASK; | ||
1625 | |||
1626 | if (rev >= PRID_REV_BMIPS4380_LO && | ||
1627 | rev <= PRID_REV_BMIPS4380_HI) { | ||
1628 | c->cputype = CPU_BMIPS4380; | ||
1629 | __cpu_name[cpu] = "Broadcom BMIPS4380"; | ||
1630 | set_elf_platform(cpu, "bmips4380"); | ||
1631 | c->options |= MIPS_CPU_RIXI; | ||
1632 | } else { | ||
1633 | c->cputype = CPU_BMIPS4350; | ||
1634 | __cpu_name[cpu] = "Broadcom BMIPS4350"; | ||
1635 | set_elf_platform(cpu, "bmips4350"); | ||
1636 | } | ||
1637 | break; | ||
1638 | } | ||
1639 | case PRID_IMP_BMIPS5000: | ||
1640 | case PRID_IMP_BMIPS5200: | ||
1641 | c->cputype = CPU_BMIPS5000; | ||
1642 | if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200) | ||
1643 | __cpu_name[cpu] = "Broadcom BMIPS5200"; | ||
1644 | else | ||
1645 | __cpu_name[cpu] = "Broadcom BMIPS5000"; | ||
1646 | set_elf_platform(cpu, "bmips5000"); | ||
1647 | c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI; | ||
1648 | break; | ||
1649 | } | ||
1650 | } | ||
1651 | |||
1652 | static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu) | ||
1653 | { | ||
1654 | decode_configs(c); | ||
1655 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1656 | case PRID_IMP_CAVIUM_CN38XX: | ||
1657 | case PRID_IMP_CAVIUM_CN31XX: | ||
1658 | case PRID_IMP_CAVIUM_CN30XX: | ||
1659 | c->cputype = CPU_CAVIUM_OCTEON; | ||
1660 | __cpu_name[cpu] = "Cavium Octeon"; | ||
1661 | goto platform; | ||
1662 | case PRID_IMP_CAVIUM_CN58XX: | ||
1663 | case PRID_IMP_CAVIUM_CN56XX: | ||
1664 | case PRID_IMP_CAVIUM_CN50XX: | ||
1665 | case PRID_IMP_CAVIUM_CN52XX: | ||
1666 | c->cputype = CPU_CAVIUM_OCTEON_PLUS; | ||
1667 | __cpu_name[cpu] = "Cavium Octeon+"; | ||
1668 | platform: | ||
1669 | set_elf_platform(cpu, "octeon"); | ||
1670 | break; | ||
1671 | case PRID_IMP_CAVIUM_CN61XX: | ||
1672 | case PRID_IMP_CAVIUM_CN63XX: | ||
1673 | case PRID_IMP_CAVIUM_CN66XX: | ||
1674 | case PRID_IMP_CAVIUM_CN68XX: | ||
1675 | case PRID_IMP_CAVIUM_CNF71XX: | ||
1676 | c->cputype = CPU_CAVIUM_OCTEON2; | ||
1677 | __cpu_name[cpu] = "Cavium Octeon II"; | ||
1678 | set_elf_platform(cpu, "octeon2"); | ||
1679 | break; | ||
1680 | case PRID_IMP_CAVIUM_CN70XX: | ||
1681 | case PRID_IMP_CAVIUM_CN73XX: | ||
1682 | case PRID_IMP_CAVIUM_CNF75XX: | ||
1683 | case PRID_IMP_CAVIUM_CN78XX: | ||
1684 | c->cputype = CPU_CAVIUM_OCTEON3; | ||
1685 | __cpu_name[cpu] = "Cavium Octeon III"; | ||
1686 | set_elf_platform(cpu, "octeon3"); | ||
1687 | break; | ||
1688 | default: | ||
1689 | printk(KERN_INFO "Unknown Octeon chip!\n"); | ||
1690 | c->cputype = CPU_UNKNOWN; | ||
1691 | break; | ||
1692 | } | ||
1693 | } | ||
1694 | |||
1695 | #ifdef CONFIG_CPU_LOONGSON64 | ||
1696 | #include <loongson_regs.h> | ||
1697 | |||
1698 | static inline void decode_cpucfg(struct cpuinfo_mips *c) | ||
1699 | { | ||
1700 | u32 cfg1 = read_cpucfg(LOONGSON_CFG1); | ||
1701 | u32 cfg2 = read_cpucfg(LOONGSON_CFG2); | ||
1702 | u32 cfg3 = read_cpucfg(LOONGSON_CFG3); | ||
1703 | |||
1704 | if (cfg1 & LOONGSON_CFG1_MMI) | ||
1705 | c->ases |= MIPS_ASE_LOONGSON_MMI; | ||
1706 | |||
1707 | if (cfg2 & LOONGSON_CFG2_LEXT1) | ||
1708 | c->ases |= MIPS_ASE_LOONGSON_EXT; | ||
1709 | |||
1710 | if (cfg2 & LOONGSON_CFG2_LEXT2) | ||
1711 | c->ases |= MIPS_ASE_LOONGSON_EXT2; | ||
1712 | |||
1713 | if (cfg2 & LOONGSON_CFG2_LSPW) { | ||
1714 | c->options |= MIPS_CPU_LDPTE; | ||
1715 | c->guest.options |= MIPS_CPU_LDPTE; | ||
1716 | } | ||
1717 | |||
1718 | if (cfg3 & LOONGSON_CFG3_LCAMP) | ||
1719 | c->ases |= MIPS_ASE_LOONGSON_CAM; | ||
1720 | } | ||
1721 | |||
1722 | static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) | ||
1723 | { | ||
1724 | /* All Loongson processors covered here define ExcCode 16 as GSExc. */ | ||
1725 | c->options |= MIPS_CPU_GSEXCEX; | ||
1726 | |||
1727 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1728 | case PRID_IMP_LOONGSON_64R: /* Loongson-64 Reduced */ | ||
1729 | switch (c->processor_id & PRID_REV_MASK) { | ||
1730 | case PRID_REV_LOONGSON2K_R1_0: | ||
1731 | case PRID_REV_LOONGSON2K_R1_1: | ||
1732 | case PRID_REV_LOONGSON2K_R1_2: | ||
1733 | case PRID_REV_LOONGSON2K_R1_3: | ||
1734 | c->cputype = CPU_LOONGSON64; | ||
1735 | __cpu_name[cpu] = "Loongson-2K"; | ||
1736 | set_elf_platform(cpu, "gs264e"); | ||
1737 | set_isa(c, MIPS_CPU_ISA_M64R2); | ||
1738 | break; | ||
1739 | } | ||
1740 | c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT | | ||
1741 | MIPS_ASE_LOONGSON_EXT2); | ||
1742 | break; | ||
1743 | case PRID_IMP_LOONGSON_64C: /* Loongson-3 Classic */ | ||
1744 | switch (c->processor_id & PRID_REV_MASK) { | ||
1745 | case PRID_REV_LOONGSON3A_R2_0: | ||
1746 | case PRID_REV_LOONGSON3A_R2_1: | ||
1747 | c->cputype = CPU_LOONGSON64; | ||
1748 | __cpu_name[cpu] = "ICT Loongson-3"; | ||
1749 | set_elf_platform(cpu, "loongson3a"); | ||
1750 | set_isa(c, MIPS_CPU_ISA_M64R2); | ||
1751 | break; | ||
1752 | case PRID_REV_LOONGSON3A_R3_0: | ||
1753 | case PRID_REV_LOONGSON3A_R3_1: | ||
1754 | c->cputype = CPU_LOONGSON64; | ||
1755 | __cpu_name[cpu] = "ICT Loongson-3"; | ||
1756 | set_elf_platform(cpu, "loongson3a"); | ||
1757 | set_isa(c, MIPS_CPU_ISA_M64R2); | ||
1758 | break; | ||
1759 | } | ||
1760 | /* | ||
1761 | * Loongson-3 Classic did not implement MIPS standard TLBINV | ||
1762 | * but implemented TLBINVF and EHINV. As currently we're only | ||
1763 | * using these two features, enable MIPS_CPU_TLBINV as well. | ||
1764 | * | ||
1765 | * Also some early Loongson-3A2000 had wrong TLB type in Config | ||
1766 | * register, we correct it here. | ||
1767 | */ | ||
1768 | c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE; | ||
1769 | c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM | | ||
1770 | MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2); | ||
1771 | c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */ | ||
1772 | break; | ||
1773 | case PRID_IMP_LOONGSON_64G: | ||
1774 | c->cputype = CPU_LOONGSON64; | ||
1775 | __cpu_name[cpu] = "ICT Loongson-3"; | ||
1776 | set_elf_platform(cpu, "loongson3a"); | ||
1777 | set_isa(c, MIPS_CPU_ISA_M64R2); | ||
1778 | decode_cpucfg(c); | ||
1779 | break; | ||
1780 | default: | ||
1781 | panic("Unknown Loongson Processor ID!"); | ||
1782 | break; | ||
1783 | } | ||
1784 | |||
1785 | decode_configs(c); | ||
1786 | } | ||
1787 | #else | ||
1788 | static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { } | ||
1789 | #endif | ||
1790 | |||
1791 | static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu) | ||
1792 | { | ||
1793 | decode_configs(c); | ||
1794 | |||
1795 | /* | ||
1796 | * XBurst misses a config2 register, so config3 decode was skipped in | ||
1797 | * decode_configs(). | ||
1798 | */ | ||
1799 | decode_config3(c); | ||
1800 | |||
1801 | /* XBurst does not implement the CP0 counter. */ | ||
1802 | c->options &= ~MIPS_CPU_COUNTER; | ||
1803 | BUG_ON(__builtin_constant_p(cpu_has_counter) && cpu_has_counter); | ||
1804 | |||
1805 | /* XBurst has virtually tagged icache */ | ||
1806 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
1807 | |||
1808 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1809 | |||
1810 | /* XBurst®1 with MXU1.0/MXU1.1 SIMD ISA */ | ||
1811 | case PRID_IMP_XBURST_REV1: | ||
1812 | |||
1813 | /* | ||
1814 | * The XBurst core by default attempts to avoid branch target | ||
1815 | * buffer lookups by detecting & special casing loops. This | ||
1816 | * feature will cause BogoMIPS and lpj calculate in error. | ||
1817 | * Set cp0 config7 bit 4 to disable this feature. | ||
1818 | */ | ||
1819 | set_c0_config7(MIPS_CONF7_BTB_LOOP_EN); | ||
1820 | |||
1821 | switch (c->processor_id & PRID_COMP_MASK) { | ||
1822 | |||
1823 | /* | ||
1824 | * The config0 register in the XBurst CPUs with a processor ID of | ||
1825 | * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible, | ||
1826 | * but they don't actually support this ISA. | ||
1827 | */ | ||
1828 | case PRID_COMP_INGENIC_D0: | ||
1829 | c->isa_level &= ~MIPS_CPU_ISA_M32R2; | ||
1830 | |||
1831 | /* FPU is not properly detected on JZ4760(B). */ | ||
1832 | if (c->processor_id == 0x2ed0024f) | ||
1833 | c->options |= MIPS_CPU_FPU; | ||
1834 | |||
1835 | fallthrough; | ||
1836 | |||
1837 | /* | ||
1838 | * The config0 register in the XBurst CPUs with a processor ID of | ||
1839 | * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned | ||
1840 | * huge page tlb mode, this mode is not compatible with the MIPS | ||
1841 | * standard, it will cause tlbmiss and into an infinite loop | ||
1842 | * (line 21 in the tlb-funcs.S) when starting the init process. | ||
1843 | * After chip reset, the default is HPTLB mode, Write 0xa9000000 | ||
1844 | * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent | ||
1845 | * getting stuck. | ||
1846 | */ | ||
1847 | case PRID_COMP_INGENIC_D1: | ||
1848 | write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS); | ||
1849 | break; | ||
1850 | |||
1851 | default: | ||
1852 | break; | ||
1853 | } | ||
1854 | fallthrough; | ||
1855 | |||
1856 | /* XBurst®1 with MXU2.0 SIMD ISA */ | ||
1857 | case PRID_IMP_XBURST_REV2: | ||
1858 | /* Ingenic uses the WA bit to achieve write-combine memory writes */ | ||
1859 | c->writecombine = _CACHE_CACHABLE_WA; | ||
1860 | c->cputype = CPU_XBURST; | ||
1861 | __cpu_name[cpu] = "Ingenic XBurst"; | ||
1862 | break; | ||
1863 | |||
1864 | /* XBurst®2 with MXU2.1 SIMD ISA */ | ||
1865 | case PRID_IMP_XBURST2: | ||
1866 | c->cputype = CPU_XBURST; | ||
1867 | __cpu_name[cpu] = "Ingenic XBurst II"; | ||
1868 | break; | ||
1869 | |||
1870 | default: | ||
1871 | panic("Unknown Ingenic Processor ID!"); | ||
1872 | break; | ||
1873 | } | ||
1874 | } | ||
1875 | |||
1876 | static inline void cpu_probe_netlogic(struct cpuinfo_mips *c, int cpu) | ||
1877 | { | ||
1878 | decode_configs(c); | ||
1879 | |||
1880 | if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_NETLOGIC_AU13XX) { | ||
1881 | c->cputype = CPU_ALCHEMY; | ||
1882 | __cpu_name[cpu] = "Au1300"; | ||
1883 | /* following stuff is not for Alchemy */ | ||
1884 | return; | ||
1885 | } | ||
1886 | |||
1887 | c->options = (MIPS_CPU_TLB | | ||
1888 | MIPS_CPU_4KEX | | ||
1889 | MIPS_CPU_COUNTER | | ||
1890 | MIPS_CPU_DIVEC | | ||
1891 | MIPS_CPU_WATCH | | ||
1892 | MIPS_CPU_EJTAG | | ||
1893 | MIPS_CPU_LLSC); | ||
1894 | |||
1895 | switch (c->processor_id & PRID_IMP_MASK) { | ||
1896 | case PRID_IMP_NETLOGIC_XLP2XX: | ||
1897 | case PRID_IMP_NETLOGIC_XLP9XX: | ||
1898 | case PRID_IMP_NETLOGIC_XLP5XX: | ||
1899 | c->cputype = CPU_XLP; | ||
1900 | __cpu_name[cpu] = "Broadcom XLPII"; | ||
1901 | break; | ||
1902 | |||
1903 | case PRID_IMP_NETLOGIC_XLP8XX: | ||
1904 | case PRID_IMP_NETLOGIC_XLP3XX: | ||
1905 | c->cputype = CPU_XLP; | ||
1906 | __cpu_name[cpu] = "Netlogic XLP"; | ||
1907 | break; | ||
1908 | |||
1909 | case PRID_IMP_NETLOGIC_XLR732: | ||
1910 | case PRID_IMP_NETLOGIC_XLR716: | ||
1911 | case PRID_IMP_NETLOGIC_XLR532: | ||
1912 | case PRID_IMP_NETLOGIC_XLR308: | ||
1913 | case PRID_IMP_NETLOGIC_XLR532C: | ||
1914 | case PRID_IMP_NETLOGIC_XLR516C: | ||
1915 | case PRID_IMP_NETLOGIC_XLR508C: | ||
1916 | case PRID_IMP_NETLOGIC_XLR308C: | ||
1917 | c->cputype = CPU_XLR; | ||
1918 | __cpu_name[cpu] = "Netlogic XLR"; | ||
1919 | break; | ||
1920 | |||
1921 | case PRID_IMP_NETLOGIC_XLS608: | ||
1922 | case PRID_IMP_NETLOGIC_XLS408: | ||
1923 | case PRID_IMP_NETLOGIC_XLS404: | ||
1924 | case PRID_IMP_NETLOGIC_XLS208: | ||
1925 | case PRID_IMP_NETLOGIC_XLS204: | ||
1926 | case PRID_IMP_NETLOGIC_XLS108: | ||
1927 | case PRID_IMP_NETLOGIC_XLS104: | ||
1928 | case PRID_IMP_NETLOGIC_XLS616B: | ||
1929 | case PRID_IMP_NETLOGIC_XLS608B: | ||
1930 | case PRID_IMP_NETLOGIC_XLS416B: | ||
1931 | case PRID_IMP_NETLOGIC_XLS412B: | ||
1932 | case PRID_IMP_NETLOGIC_XLS408B: | ||
1933 | case PRID_IMP_NETLOGIC_XLS404B: | ||
1934 | c->cputype = CPU_XLR; | ||
1935 | __cpu_name[cpu] = "Netlogic XLS"; | ||
1936 | break; | ||
1937 | |||
1938 | default: | ||
1939 | pr_info("Unknown Netlogic chip id [%02x]!\n", | ||
1940 | c->processor_id); | ||
1941 | c->cputype = CPU_XLR; | ||
1942 | break; | ||
1943 | } | ||
1944 | |||
1945 | if (c->cputype == CPU_XLP) { | ||
1946 | set_isa(c, MIPS_CPU_ISA_M64R2); | ||
1947 | c->options |= (MIPS_CPU_FPU | MIPS_CPU_ULRI | MIPS_CPU_MCHECK); | ||
1948 | /* This will be updated again after all threads are woken up */ | ||
1949 | c->tlbsize = ((read_c0_config6() >> 16) & 0xffff) + 1; | ||
1950 | } else { | ||
1951 | set_isa(c, MIPS_CPU_ISA_M64R1); | ||
1952 | c->tlbsize = ((read_c0_config1() >> 25) & 0x3f) + 1; | ||
1953 | } | ||
1954 | c->kscratch_mask = 0xf; | ||
1955 | } | ||
1956 | |||
1957 | #ifdef CONFIG_64BIT | ||
1958 | /* For use by uaccess.h */ | ||
1959 | u64 __ua_limit; | ||
1960 | EXPORT_SYMBOL(__ua_limit); | ||
1961 | #endif | ||
1962 | |||
1963 | const char *__cpu_name[NR_CPUS]; | ||
1964 | const char *__elf_platform; | ||
1965 | const char *__elf_base_platform; | ||
1966 | |||
1967 | void cpu_probe(void) | ||
1968 | { | ||
1969 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1970 | unsigned int cpu = smp_processor_id(); | ||
1971 | |||
1972 | /* | ||
1973 | * Set a default elf platform, cpu probe may later | ||
1974 | * overwrite it with a more precise value | ||
1975 | */ | ||
1976 | set_elf_platform(cpu, "mips"); | ||
1977 | |||
1978 | c->processor_id = PRID_IMP_UNKNOWN; | ||
1979 | c->fpu_id = FPIR_IMP_NONE; | ||
1980 | c->cputype = CPU_UNKNOWN; | ||
1981 | c->writecombine = _CACHE_UNCACHED; | ||
1982 | |||
1983 | c->fpu_csr31 = FPU_CSR_RN; | ||
1984 | c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008; | ||
1985 | |||
1986 | c->processor_id = read_c0_prid(); | ||
1987 | switch (c->processor_id & PRID_COMP_MASK) { | ||
1988 | case PRID_COMP_LEGACY: | ||
1989 | cpu_probe_legacy(c, cpu); | ||
1990 | break; | ||
1991 | case PRID_COMP_MIPS: | ||
1992 | cpu_probe_mips(c, cpu); | ||
1993 | break; | ||
1994 | case PRID_COMP_ALCHEMY: | ||
1995 | cpu_probe_alchemy(c, cpu); | ||
1996 | break; | ||
1997 | case PRID_COMP_SIBYTE: | ||
1998 | cpu_probe_sibyte(c, cpu); | ||
1999 | break; | ||
2000 | case PRID_COMP_BROADCOM: | ||
2001 | cpu_probe_broadcom(c, cpu); | ||
2002 | break; | ||
2003 | case PRID_COMP_SANDCRAFT: | ||
2004 | cpu_probe_sandcraft(c, cpu); | ||
2005 | break; | ||
2006 | case PRID_COMP_NXP: | ||
2007 | cpu_probe_nxp(c, cpu); | ||
2008 | break; | ||
2009 | case PRID_COMP_CAVIUM: | ||
2010 | cpu_probe_cavium(c, cpu); | ||
2011 | break; | ||
2012 | case PRID_COMP_LOONGSON: | ||
2013 | cpu_probe_loongson(c, cpu); | ||
2014 | break; | ||
2015 | case PRID_COMP_INGENIC_13: | ||
2016 | case PRID_COMP_INGENIC_D0: | ||
2017 | case PRID_COMP_INGENIC_D1: | ||
2018 | case PRID_COMP_INGENIC_E1: | ||
2019 | cpu_probe_ingenic(c, cpu); | ||
2020 | break; | ||
2021 | case PRID_COMP_NETLOGIC: | ||
2022 | cpu_probe_netlogic(c, cpu); | ||
2023 | break; | ||
2024 | } | ||
2025 | |||
2026 | BUG_ON(!__cpu_name[cpu]); | ||
2027 | BUG_ON(c->cputype == CPU_UNKNOWN); | ||
2028 | |||
2029 | /* | ||
2030 | * Platform code can force the cpu type to optimize code | ||
2031 | * generation. In that case be sure the cpu type is correctly | ||
2032 | * manually setup otherwise it could trigger some nasty bugs. | ||
2033 | */ | ||
2034 | BUG_ON(current_cpu_type() != c->cputype); | ||
2035 | |||
2036 | if (cpu_has_rixi) { | ||
2037 | /* Enable the RIXI exceptions */ | ||
2038 | set_c0_pagegrain(PG_IEC); | ||
2039 | back_to_back_c0_hazard(); | ||
2040 | /* Verify the IEC bit is set */ | ||
2041 | if (read_c0_pagegrain() & PG_IEC) | ||
2042 | c->options |= MIPS_CPU_RIXIEX; | ||
2043 | } | ||
2044 | |||
2045 | if (mips_fpu_disabled) | ||
2046 | c->options &= ~MIPS_CPU_FPU; | ||
2047 | |||
2048 | if (mips_dsp_disabled) | ||
2049 | c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P); | ||
2050 | |||
2051 | if (mips_htw_disabled) { | ||
2052 | c->options &= ~MIPS_CPU_HTW; | ||
2053 | write_c0_pwctl(read_c0_pwctl() & | ||
2054 | ~(1 << MIPS_PWCTL_PWEN_SHIFT)); | ||
2055 | } | ||
2056 | |||
2057 | if (c->options & MIPS_CPU_FPU) | ||
2058 | cpu_set_fpu_opts(c); | ||
2059 | else | ||
2060 | cpu_set_nofpu_opts(c); | ||
2061 | |||
2062 | if (cpu_has_mips_r2_r6) { | ||
2063 | c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1; | ||
2064 | /* R2 has Performance Counter Interrupt indicator */ | ||
2065 | c->options |= MIPS_CPU_PCI; | ||
2066 | } | ||
2067 | else | ||
2068 | c->srsets = 1; | ||
2069 | |||
2070 | if (cpu_has_mips_r6) | ||
2071 | elf_hwcap |= HWCAP_MIPS_R6; | ||
2072 | |||
2073 | if (cpu_has_msa) { | ||
2074 | c->msa_id = cpu_get_msa_id(); | ||
2075 | WARN(c->msa_id & MSA_IR_WRPF, | ||
2076 | "Vector register partitioning unimplemented!"); | ||
2077 | elf_hwcap |= HWCAP_MIPS_MSA; | ||
2078 | } | ||
2079 | |||
2080 | if (cpu_has_mips16) | ||
2081 | elf_hwcap |= HWCAP_MIPS_MIPS16; | ||
2082 | |||
2083 | if (cpu_has_mdmx) | ||
2084 | elf_hwcap |= HWCAP_MIPS_MDMX; | ||
2085 | |||
2086 | if (cpu_has_mips3d) | ||
2087 | elf_hwcap |= HWCAP_MIPS_MIPS3D; | ||
2088 | |||
2089 | if (cpu_has_smartmips) | ||
2090 | elf_hwcap |= HWCAP_MIPS_SMARTMIPS; | ||
2091 | |||
2092 | if (cpu_has_dsp) | ||
2093 | elf_hwcap |= HWCAP_MIPS_DSP; | ||
2094 | |||
2095 | if (cpu_has_dsp2) | ||
2096 | elf_hwcap |= HWCAP_MIPS_DSP2; | ||
2097 | |||
2098 | if (cpu_has_dsp3) | ||
2099 | elf_hwcap |= HWCAP_MIPS_DSP3; | ||
2100 | |||
2101 | if (cpu_has_mips16e2) | ||
2102 | elf_hwcap |= HWCAP_MIPS_MIPS16E2; | ||
2103 | |||
2104 | if (cpu_has_loongson_mmi) | ||
2105 | elf_hwcap |= HWCAP_LOONGSON_MMI; | ||
2106 | |||
2107 | if (cpu_has_loongson_ext) | ||
2108 | elf_hwcap |= HWCAP_LOONGSON_EXT; | ||
2109 | |||
2110 | if (cpu_has_loongson_ext2) | ||
2111 | elf_hwcap |= HWCAP_LOONGSON_EXT2; | ||
2112 | |||
2113 | if (cpu_has_vz) | ||
2114 | cpu_probe_vz(c); | ||
2115 | |||
2116 | cpu_probe_vmbits(c); | ||
2117 | |||
2118 | /* Synthesize CPUCFG data if running on Loongson processors; | ||
2119 | * no-op otherwise. | ||
2120 | * | ||
2121 | * This looks at previously probed features, so keep this at bottom. | ||
2122 | */ | ||
2123 | loongson3_cpucfg_synthesize_data(c); | ||
2124 | |||
2125 | #ifdef CONFIG_64BIT | ||
2126 | if (cpu == 0) | ||
2127 | __ua_limit = ~((1ull << cpu_vmbits) - 1); | ||
2128 | #endif | ||
2129 | } | ||
2130 | |||
2131 | void cpu_report(void) | ||
2132 | { | ||
2133 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
2134 | |||
2135 | pr_info("CPU%d revision is: %08x (%s)\n", | ||
2136 | smp_processor_id(), c->processor_id, cpu_name_string()); | ||
2137 | if (c->options & MIPS_CPU_FPU) | ||
2138 | printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id); | ||
2139 | if (cpu_has_msa) | ||
2140 | pr_info("MSA revision is: %08x\n", c->msa_id); | ||
2141 | } | ||
2142 | |||
2143 | void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster) | ||
2144 | { | ||
2145 | /* Ensure the core number fits in the field */ | ||
2146 | WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >> | ||
2147 | MIPS_GLOBALNUMBER_CLUSTER_SHF)); | ||
2148 | |||
2149 | cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CLUSTER; | ||
2150 | cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF; | ||
2151 | } | ||
2152 | |||
2153 | void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core) | ||
2154 | { | ||
2155 | /* Ensure the core number fits in the field */ | ||
2156 | WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF)); | ||
2157 | |||
2158 | cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CORE; | ||
2159 | cpuinfo->globalnumber |= core << MIPS_GLOBALNUMBER_CORE_SHF; | ||
2160 | } | ||
2161 | |||
2162 | void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe) | ||
2163 | { | ||
2164 | /* Ensure the VP(E) ID fits in the field */ | ||
2165 | WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF)); | ||
2166 | |||
2167 | /* Ensure we're not using VP(E)s without support */ | ||
2168 | WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) && | ||
2169 | !IS_ENABLED(CONFIG_CPU_MIPSR6)); | ||
2170 | |||
2171 | cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP; | ||
2172 | cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF; | ||
2173 | } | ||
diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c new file mode 100644 index 000000000..abdbbe8c5 --- /dev/null +++ b/arch/mips/kernel/cpu-r3k-probe.c | |||
@@ -0,0 +1,171 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Processor capabilities determination functions. | ||
4 | * | ||
5 | * Copyright (C) xxxx the Anonymous | ||
6 | * Copyright (C) 1994 - 2006 Ralf Baechle | ||
7 | * Copyright (C) 2003, 2004 Maciej W. Rozycki | ||
8 | * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/ptrace.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/stddef.h> | ||
15 | #include <linux/export.h> | ||
16 | |||
17 | #include <asm/bugs.h> | ||
18 | #include <asm/cpu.h> | ||
19 | #include <asm/cpu-features.h> | ||
20 | #include <asm/cpu-type.h> | ||
21 | #include <asm/fpu.h> | ||
22 | #include <asm/mipsregs.h> | ||
23 | #include <asm/elf.h> | ||
24 | |||
25 | #include "fpu-probe.h" | ||
26 | |||
27 | /* Hardware capabilities */ | ||
28 | unsigned int elf_hwcap __read_mostly; | ||
29 | EXPORT_SYMBOL_GPL(elf_hwcap); | ||
30 | |||
31 | void __init check_bugs32(void) | ||
32 | { | ||
33 | |||
34 | } | ||
35 | |||
36 | /* | ||
37 | * Probe whether cpu has config register by trying to play with | ||
38 | * alternate cache bit and see whether it matters. | ||
39 | * It's used by cpu_probe to distinguish between R3000A and R3081. | ||
40 | */ | ||
41 | static inline int cpu_has_confreg(void) | ||
42 | { | ||
43 | #ifdef CONFIG_CPU_R3000 | ||
44 | extern unsigned long r3k_cache_size(unsigned long); | ||
45 | unsigned long size1, size2; | ||
46 | unsigned long cfg = read_c0_conf(); | ||
47 | |||
48 | size1 = r3k_cache_size(ST0_ISC); | ||
49 | write_c0_conf(cfg ^ R30XX_CONF_AC); | ||
50 | size2 = r3k_cache_size(ST0_ISC); | ||
51 | write_c0_conf(cfg); | ||
52 | return size1 != size2; | ||
53 | #else | ||
54 | return 0; | ||
55 | #endif | ||
56 | } | ||
57 | |||
58 | static inline void set_elf_platform(int cpu, const char *plat) | ||
59 | { | ||
60 | if (cpu == 0) | ||
61 | __elf_platform = plat; | ||
62 | } | ||
63 | |||
64 | const char *__cpu_name[NR_CPUS]; | ||
65 | const char *__elf_platform; | ||
66 | const char *__elf_base_platform; | ||
67 | |||
68 | void cpu_probe(void) | ||
69 | { | ||
70 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
71 | unsigned int cpu = smp_processor_id(); | ||
72 | |||
73 | /* | ||
74 | * Set a default elf platform, cpu probe may later | ||
75 | * overwrite it with a more precise value | ||
76 | */ | ||
77 | set_elf_platform(cpu, "mips"); | ||
78 | |||
79 | c->processor_id = PRID_IMP_UNKNOWN; | ||
80 | c->fpu_id = FPIR_IMP_NONE; | ||
81 | c->cputype = CPU_UNKNOWN; | ||
82 | c->writecombine = _CACHE_UNCACHED; | ||
83 | |||
84 | c->fpu_csr31 = FPU_CSR_RN; | ||
85 | c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | | ||
86 | FPU_CSR_CONDX | FPU_CSR_FS; | ||
87 | |||
88 | c->srsets = 1; | ||
89 | |||
90 | c->processor_id = read_c0_prid(); | ||
91 | switch (c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) { | ||
92 | case PRID_COMP_LEGACY | PRID_IMP_R2000: | ||
93 | c->cputype = CPU_R2000; | ||
94 | __cpu_name[cpu] = "R2000"; | ||
95 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | | ||
96 | MIPS_CPU_NOFPUEX; | ||
97 | if (__cpu_has_fpu()) | ||
98 | c->options |= MIPS_CPU_FPU; | ||
99 | c->tlbsize = 64; | ||
100 | break; | ||
101 | case PRID_COMP_LEGACY | PRID_IMP_R3000: | ||
102 | if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) { | ||
103 | if (cpu_has_confreg()) { | ||
104 | c->cputype = CPU_R3081E; | ||
105 | __cpu_name[cpu] = "R3081"; | ||
106 | } else { | ||
107 | c->cputype = CPU_R3000A; | ||
108 | __cpu_name[cpu] = "R3000A"; | ||
109 | } | ||
110 | } else { | ||
111 | c->cputype = CPU_R3000; | ||
112 | __cpu_name[cpu] = "R3000"; | ||
113 | } | ||
114 | c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | | ||
115 | MIPS_CPU_NOFPUEX; | ||
116 | if (__cpu_has_fpu()) | ||
117 | c->options |= MIPS_CPU_FPU; | ||
118 | c->tlbsize = 64; | ||
119 | break; | ||
120 | case PRID_COMP_LEGACY | PRID_IMP_TX39: | ||
121 | c->options = MIPS_CPU_TLB | MIPS_CPU_TX39_CACHE; | ||
122 | |||
123 | if ((c->processor_id & 0xf0) == (PRID_REV_TX3927 & 0xf0)) { | ||
124 | c->cputype = CPU_TX3927; | ||
125 | __cpu_name[cpu] = "TX3927"; | ||
126 | c->tlbsize = 64; | ||
127 | } else { | ||
128 | switch (c->processor_id & PRID_REV_MASK) { | ||
129 | case PRID_REV_TX3912: | ||
130 | c->cputype = CPU_TX3912; | ||
131 | __cpu_name[cpu] = "TX3912"; | ||
132 | c->tlbsize = 32; | ||
133 | break; | ||
134 | case PRID_REV_TX3922: | ||
135 | c->cputype = CPU_TX3922; | ||
136 | __cpu_name[cpu] = "TX3922"; | ||
137 | c->tlbsize = 64; | ||
138 | break; | ||
139 | } | ||
140 | } | ||
141 | break; | ||
142 | } | ||
143 | |||
144 | BUG_ON(!__cpu_name[cpu]); | ||
145 | BUG_ON(c->cputype == CPU_UNKNOWN); | ||
146 | |||
147 | /* | ||
148 | * Platform code can force the cpu type to optimize code | ||
149 | * generation. In that case be sure the cpu type is correctly | ||
150 | * manually setup otherwise it could trigger some nasty bugs. | ||
151 | */ | ||
152 | BUG_ON(current_cpu_type() != c->cputype); | ||
153 | |||
154 | if (mips_fpu_disabled) | ||
155 | c->options &= ~MIPS_CPU_FPU; | ||
156 | |||
157 | if (c->options & MIPS_CPU_FPU) | ||
158 | cpu_set_fpu_opts(c); | ||
159 | else | ||
160 | cpu_set_nofpu_opts(c); | ||
161 | } | ||
162 | |||
163 | void cpu_report(void) | ||
164 | { | ||
165 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
166 | |||
167 | pr_info("CPU%d revision is: %08x (%s)\n", | ||
168 | smp_processor_id(), c->processor_id, cpu_name_string()); | ||
169 | if (c->options & MIPS_CPU_FPU) | ||
170 | pr_info("FPU revision is: %08x\n", c->fpu_id); | ||
171 | } | ||
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c new file mode 100644 index 000000000..81845ba04 --- /dev/null +++ b/arch/mips/kernel/crash.c | |||
@@ -0,0 +1,103 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/kernel.h> | ||
3 | #include <linux/smp.h> | ||
4 | #include <linux/reboot.h> | ||
5 | #include <linux/kexec.h> | ||
6 | #include <linux/memblock.h> | ||
7 | #include <linux/crash_dump.h> | ||
8 | #include <linux/delay.h> | ||
9 | #include <linux/irq.h> | ||
10 | #include <linux/types.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/sched/task_stack.h> | ||
13 | |||
14 | /* This keeps a track of which one is crashing cpu. */ | ||
15 | static int crashing_cpu = -1; | ||
16 | static cpumask_t cpus_in_crash = CPU_MASK_NONE; | ||
17 | |||
18 | #ifdef CONFIG_SMP | ||
19 | static void crash_shutdown_secondary(void *passed_regs) | ||
20 | { | ||
21 | struct pt_regs *regs = passed_regs; | ||
22 | int cpu = smp_processor_id(); | ||
23 | |||
24 | /* | ||
25 | * If we are passed registers, use those. Otherwise get the | ||
26 | * regs from the last interrupt, which should be correct, as | ||
27 | * we are in an interrupt. But if the regs are not there, | ||
28 | * pull them from the top of the stack. They are probably | ||
29 | * wrong, but we need something to keep from crashing again. | ||
30 | */ | ||
31 | if (!regs) | ||
32 | regs = get_irq_regs(); | ||
33 | if (!regs) | ||
34 | regs = task_pt_regs(current); | ||
35 | |||
36 | if (!cpu_online(cpu)) | ||
37 | return; | ||
38 | |||
39 | /* We won't be sent IPIs any more. */ | ||
40 | set_cpu_online(cpu, false); | ||
41 | |||
42 | local_irq_disable(); | ||
43 | if (!cpumask_test_cpu(cpu, &cpus_in_crash)) | ||
44 | crash_save_cpu(regs, cpu); | ||
45 | cpumask_set_cpu(cpu, &cpus_in_crash); | ||
46 | |||
47 | while (!atomic_read(&kexec_ready_to_reboot)) | ||
48 | cpu_relax(); | ||
49 | |||
50 | kexec_reboot(); | ||
51 | |||
52 | /* NOTREACHED */ | ||
53 | } | ||
54 | |||
55 | static void crash_kexec_prepare_cpus(void) | ||
56 | { | ||
57 | static int cpus_stopped; | ||
58 | unsigned int msecs; | ||
59 | unsigned int ncpus; | ||
60 | |||
61 | if (cpus_stopped) | ||
62 | return; | ||
63 | |||
64 | ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */ | ||
65 | |||
66 | smp_call_function(crash_shutdown_secondary, NULL, 0); | ||
67 | smp_wmb(); | ||
68 | |||
69 | /* | ||
70 | * The crash CPU sends an IPI and wait for other CPUs to | ||
71 | * respond. Delay of at least 10 seconds. | ||
72 | */ | ||
73 | pr_emerg("Sending IPI to other cpus...\n"); | ||
74 | msecs = 10000; | ||
75 | while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) { | ||
76 | cpu_relax(); | ||
77 | mdelay(1); | ||
78 | } | ||
79 | |||
80 | cpus_stopped = 1; | ||
81 | } | ||
82 | |||
83 | /* Override the weak function in kernel/panic.c */ | ||
84 | void crash_smp_send_stop(void) | ||
85 | { | ||
86 | if (_crash_smp_send_stop) | ||
87 | _crash_smp_send_stop(); | ||
88 | |||
89 | crash_kexec_prepare_cpus(); | ||
90 | } | ||
91 | |||
92 | #else /* !defined(CONFIG_SMP) */ | ||
93 | static void crash_kexec_prepare_cpus(void) {} | ||
94 | #endif /* !defined(CONFIG_SMP) */ | ||
95 | |||
96 | void default_machine_crash_shutdown(struct pt_regs *regs) | ||
97 | { | ||
98 | local_irq_disable(); | ||
99 | crashing_cpu = smp_processor_id(); | ||
100 | crash_save_cpu(regs, crashing_cpu); | ||
101 | crash_kexec_prepare_cpus(); | ||
102 | cpumask_set_cpu(crashing_cpu, &cpus_in_crash); | ||
103 | } | ||
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c new file mode 100644 index 000000000..01b2bd95b --- /dev/null +++ b/arch/mips/kernel/crash_dump.c | |||
@@ -0,0 +1,67 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/highmem.h> | ||
3 | #include <linux/memblock.h> | ||
4 | #include <linux/crash_dump.h> | ||
5 | #include <linux/uaccess.h> | ||
6 | #include <linux/slab.h> | ||
7 | |||
8 | static void *kdump_buf_page; | ||
9 | |||
10 | /** | ||
11 | * copy_oldmem_page - copy one page from "oldmem" | ||
12 | * @pfn: page frame number to be copied | ||
13 | * @buf: target memory address for the copy; this can be in kernel address | ||
14 | * space or user address space (see @userbuf) | ||
15 | * @csize: number of bytes to copy | ||
16 | * @offset: offset in bytes into the page (based on pfn) to begin the copy | ||
17 | * @userbuf: if set, @buf is in user address space, use copy_to_user(), | ||
18 | * otherwise @buf is in kernel address space, use memcpy(). | ||
19 | * | ||
20 | * Copy a page from "oldmem". For this page, there is no pte mapped | ||
21 | * in the current kernel. | ||
22 | * | ||
23 | * Calling copy_to_user() in atomic context is not desirable. Hence first | ||
24 | * copying the data to a pre-allocated kernel page and then copying to user | ||
25 | * space in non-atomic context. | ||
26 | */ | ||
27 | ssize_t copy_oldmem_page(unsigned long pfn, char *buf, | ||
28 | size_t csize, unsigned long offset, int userbuf) | ||
29 | { | ||
30 | void *vaddr; | ||
31 | |||
32 | if (!csize) | ||
33 | return 0; | ||
34 | |||
35 | vaddr = kmap_atomic_pfn(pfn); | ||
36 | |||
37 | if (!userbuf) { | ||
38 | memcpy(buf, (vaddr + offset), csize); | ||
39 | kunmap_atomic(vaddr); | ||
40 | } else { | ||
41 | if (!kdump_buf_page) { | ||
42 | pr_warn("Kdump: Kdump buffer page not allocated\n"); | ||
43 | |||
44 | return -EFAULT; | ||
45 | } | ||
46 | copy_page(kdump_buf_page, vaddr); | ||
47 | kunmap_atomic(vaddr); | ||
48 | if (copy_to_user(buf, (kdump_buf_page + offset), csize)) | ||
49 | return -EFAULT; | ||
50 | } | ||
51 | |||
52 | return csize; | ||
53 | } | ||
54 | |||
55 | static int __init kdump_buf_page_init(void) | ||
56 | { | ||
57 | int ret = 0; | ||
58 | |||
59 | kdump_buf_page = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
60 | if (!kdump_buf_page) { | ||
61 | pr_warn("Kdump: Failed to allocate kdump buffer page\n"); | ||
62 | ret = -ENOMEM; | ||
63 | } | ||
64 | |||
65 | return ret; | ||
66 | } | ||
67 | arch_initcall(kdump_buf_page_init); | ||
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c new file mode 100644 index 000000000..6c18a138f --- /dev/null +++ b/arch/mips/kernel/csrc-bcm1480.c | |||
@@ -0,0 +1,48 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2000,2001,2004 Broadcom Corporation | ||
4 | */ | ||
5 | #include <linux/clocksource.h> | ||
6 | #include <linux/sched_clock.h> | ||
7 | |||
8 | #include <asm/addrspace.h> | ||
9 | #include <asm/io.h> | ||
10 | #include <asm/time.h> | ||
11 | |||
12 | #include <asm/sibyte/bcm1480_regs.h> | ||
13 | #include <asm/sibyte/sb1250_regs.h> | ||
14 | #include <asm/sibyte/bcm1480_int.h> | ||
15 | #include <asm/sibyte/bcm1480_scd.h> | ||
16 | |||
17 | #include <asm/sibyte/sb1250.h> | ||
18 | |||
19 | static u64 bcm1480_hpt_read(struct clocksource *cs) | ||
20 | { | ||
21 | return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); | ||
22 | } | ||
23 | |||
24 | struct clocksource bcm1480_clocksource = { | ||
25 | .name = "zbbus-cycles", | ||
26 | .rating = 200, | ||
27 | .read = bcm1480_hpt_read, | ||
28 | .mask = CLOCKSOURCE_MASK(64), | ||
29 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
30 | }; | ||
31 | |||
32 | static u64 notrace sb1480_read_sched_clock(void) | ||
33 | { | ||
34 | return __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT)); | ||
35 | } | ||
36 | |||
37 | void __init sb1480_clocksource_init(void) | ||
38 | { | ||
39 | struct clocksource *cs = &bcm1480_clocksource; | ||
40 | unsigned int plldiv; | ||
41 | unsigned long zbbus; | ||
42 | |||
43 | plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG))); | ||
44 | zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000); | ||
45 | clocksource_register_hz(cs, zbbus); | ||
46 | |||
47 | sched_clock_register(sb1480_read_sched_clock, 64, zbbus); | ||
48 | } | ||
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c new file mode 100644 index 000000000..bad740ad3 --- /dev/null +++ b/arch/mips/kernel/csrc-ioasic.c | |||
@@ -0,0 +1,65 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * DEC I/O ASIC's counter clocksource | ||
4 | * | ||
5 | * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org> | ||
6 | */ | ||
7 | #include <linux/clocksource.h> | ||
8 | #include <linux/sched_clock.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/ds1287.h> | ||
12 | #include <asm/time.h> | ||
13 | #include <asm/dec/ioasic.h> | ||
14 | #include <asm/dec/ioasic_addrs.h> | ||
15 | |||
16 | static u64 dec_ioasic_hpt_read(struct clocksource *cs) | ||
17 | { | ||
18 | return ioasic_read(IO_REG_FCTR); | ||
19 | } | ||
20 | |||
21 | static struct clocksource clocksource_dec = { | ||
22 | .name = "dec-ioasic", | ||
23 | .read = dec_ioasic_hpt_read, | ||
24 | .mask = CLOCKSOURCE_MASK(32), | ||
25 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
26 | }; | ||
27 | |||
28 | static u64 notrace dec_ioasic_read_sched_clock(void) | ||
29 | { | ||
30 | return ioasic_read(IO_REG_FCTR); | ||
31 | } | ||
32 | |||
33 | int __init dec_ioasic_clocksource_init(void) | ||
34 | { | ||
35 | unsigned int freq; | ||
36 | u32 start, end; | ||
37 | int i = HZ / 8; | ||
38 | |||
39 | ds1287_timer_state(); | ||
40 | while (!ds1287_timer_state()) | ||
41 | ; | ||
42 | |||
43 | start = dec_ioasic_hpt_read(&clocksource_dec); | ||
44 | |||
45 | while (i--) | ||
46 | while (!ds1287_timer_state()) | ||
47 | ; | ||
48 | |||
49 | end = dec_ioasic_hpt_read(&clocksource_dec); | ||
50 | |||
51 | freq = (end - start) * 8; | ||
52 | |||
53 | /* An early revision of the I/O ASIC didn't have the counter. */ | ||
54 | if (!freq) | ||
55 | return -ENXIO; | ||
56 | |||
57 | printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq); | ||
58 | |||
59 | clocksource_dec.rating = 200 + freq / 10000000; | ||
60 | clocksource_register_hz(&clocksource_dec, freq); | ||
61 | |||
62 | sched_clock_register(dec_ioasic_read_sched_clock, 32, freq); | ||
63 | |||
64 | return 0; | ||
65 | } | ||
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c new file mode 100644 index 000000000..edc4afc08 --- /dev/null +++ b/arch/mips/kernel/csrc-r4k.c | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2007 by Ralf Baechle | ||
7 | */ | ||
8 | #include <linux/clocksource.h> | ||
9 | #include <linux/cpufreq.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/sched_clock.h> | ||
12 | |||
13 | #include <asm/time.h> | ||
14 | |||
15 | static u64 c0_hpt_read(struct clocksource *cs) | ||
16 | { | ||
17 | return read_c0_count(); | ||
18 | } | ||
19 | |||
20 | static struct clocksource clocksource_mips = { | ||
21 | .name = "MIPS", | ||
22 | .read = c0_hpt_read, | ||
23 | .mask = CLOCKSOURCE_MASK(32), | ||
24 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
25 | }; | ||
26 | |||
27 | static u64 __maybe_unused notrace r4k_read_sched_clock(void) | ||
28 | { | ||
29 | return read_c0_count(); | ||
30 | } | ||
31 | |||
32 | static inline unsigned int rdhwr_count(void) | ||
33 | { | ||
34 | unsigned int count; | ||
35 | |||
36 | __asm__ __volatile__( | ||
37 | " .set push\n" | ||
38 | " .set mips32r2\n" | ||
39 | " rdhwr %0, $2\n" | ||
40 | " .set pop\n" | ||
41 | : "=r" (count)); | ||
42 | |||
43 | return count; | ||
44 | } | ||
45 | |||
46 | static bool rdhwr_count_usable(void) | ||
47 | { | ||
48 | unsigned int prev, curr, i; | ||
49 | |||
50 | /* | ||
51 | * Older QEMUs have a broken implementation of RDHWR for the CP0 count | ||
52 | * which always returns a constant value. Try to identify this and don't | ||
53 | * use it in the VDSO if it is broken. This workaround can be removed | ||
54 | * once the fix has been in QEMU stable for a reasonable amount of time. | ||
55 | */ | ||
56 | for (i = 0, prev = rdhwr_count(); i < 100; i++) { | ||
57 | curr = rdhwr_count(); | ||
58 | |||
59 | if (curr != prev) | ||
60 | return true; | ||
61 | |||
62 | prev = curr; | ||
63 | } | ||
64 | |||
65 | pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n"); | ||
66 | return false; | ||
67 | } | ||
68 | |||
69 | #ifdef CONFIG_CPU_FREQ | ||
70 | |||
71 | static bool __read_mostly r4k_clock_unstable; | ||
72 | |||
73 | static void r4k_clocksource_unstable(char *reason) | ||
74 | { | ||
75 | if (r4k_clock_unstable) | ||
76 | return; | ||
77 | |||
78 | r4k_clock_unstable = true; | ||
79 | |||
80 | pr_info("R4K timer is unstable due to %s\n", reason); | ||
81 | |||
82 | clocksource_mark_unstable(&clocksource_mips); | ||
83 | } | ||
84 | |||
85 | static int r4k_cpufreq_callback(struct notifier_block *nb, | ||
86 | unsigned long val, void *data) | ||
87 | { | ||
88 | if (val == CPUFREQ_POSTCHANGE) | ||
89 | r4k_clocksource_unstable("CPU frequency change"); | ||
90 | |||
91 | return 0; | ||
92 | } | ||
93 | |||
94 | static struct notifier_block r4k_cpufreq_notifier = { | ||
95 | .notifier_call = r4k_cpufreq_callback, | ||
96 | }; | ||
97 | |||
98 | static int __init r4k_register_cpufreq_notifier(void) | ||
99 | { | ||
100 | return cpufreq_register_notifier(&r4k_cpufreq_notifier, | ||
101 | CPUFREQ_TRANSITION_NOTIFIER); | ||
102 | |||
103 | } | ||
104 | core_initcall(r4k_register_cpufreq_notifier); | ||
105 | |||
106 | #endif /* !CONFIG_CPU_FREQ */ | ||
107 | |||
108 | int __init init_r4k_clocksource(void) | ||
109 | { | ||
110 | if (!cpu_has_counter || !mips_hpt_frequency) | ||
111 | return -ENXIO; | ||
112 | |||
113 | /* Calculate a somewhat reasonable rating value */ | ||
114 | clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000; | ||
115 | |||
116 | /* | ||
117 | * R2 onwards makes the count accessible to user mode so it can be used | ||
118 | * by the VDSO (HWREna is configured by configure_hwrena()). | ||
119 | */ | ||
120 | if (cpu_has_mips_r2_r6 && rdhwr_count_usable()) | ||
121 | clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K; | ||
122 | |||
123 | clocksource_register_hz(&clocksource_mips, mips_hpt_frequency); | ||
124 | |||
125 | #ifndef CONFIG_CPU_FREQ | ||
126 | sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency); | ||
127 | #endif | ||
128 | |||
129 | return 0; | ||
130 | } | ||
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c new file mode 100644 index 000000000..fa2fa3e10 --- /dev/null +++ b/arch/mips/kernel/csrc-sb1250.c | |||
@@ -0,0 +1,71 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2000, 2001 Broadcom Corporation | ||
4 | */ | ||
5 | #include <linux/clocksource.h> | ||
6 | #include <linux/sched_clock.h> | ||
7 | |||
8 | #include <asm/addrspace.h> | ||
9 | #include <asm/io.h> | ||
10 | #include <asm/time.h> | ||
11 | |||
12 | #include <asm/sibyte/sb1250.h> | ||
13 | #include <asm/sibyte/sb1250_regs.h> | ||
14 | #include <asm/sibyte/sb1250_int.h> | ||
15 | #include <asm/sibyte/sb1250_scd.h> | ||
16 | |||
17 | #define SB1250_HPT_NUM 3 | ||
18 | #define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */ | ||
19 | |||
20 | /* | ||
21 | * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over | ||
22 | * again. | ||
23 | */ | ||
24 | static inline u64 sb1250_hpt_get_cycles(void) | ||
25 | { | ||
26 | unsigned int count; | ||
27 | void __iomem *addr; | ||
28 | |||
29 | addr = IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT)); | ||
30 | count = G_SCD_TIMER_CNT(__raw_readq(addr)); | ||
31 | |||
32 | return SB1250_HPT_VALUE - count; | ||
33 | } | ||
34 | |||
35 | static u64 sb1250_hpt_read(struct clocksource *cs) | ||
36 | { | ||
37 | return sb1250_hpt_get_cycles(); | ||
38 | } | ||
39 | |||
40 | struct clocksource bcm1250_clocksource = { | ||
41 | .name = "bcm1250-counter-3", | ||
42 | .rating = 200, | ||
43 | .read = sb1250_hpt_read, | ||
44 | .mask = CLOCKSOURCE_MASK(23), | ||
45 | .flags = CLOCK_SOURCE_IS_CONTINUOUS, | ||
46 | }; | ||
47 | |||
48 | static u64 notrace sb1250_read_sched_clock(void) | ||
49 | { | ||
50 | return sb1250_hpt_get_cycles(); | ||
51 | } | ||
52 | |||
53 | void __init sb1250_clocksource_init(void) | ||
54 | { | ||
55 | struct clocksource *cs = &bcm1250_clocksource; | ||
56 | |||
57 | /* Setup hpt using timer #3 but do not enable irq for it */ | ||
58 | __raw_writeq(0, | ||
59 | IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, | ||
60 | R_SCD_TIMER_CFG))); | ||
61 | __raw_writeq(SB1250_HPT_VALUE, | ||
62 | IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, | ||
63 | R_SCD_TIMER_INIT))); | ||
64 | __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, | ||
65 | IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, | ||
66 | R_SCD_TIMER_CFG))); | ||
67 | |||
68 | clocksource_register_hz(cs, V_SCD_TIMER_FREQ); | ||
69 | |||
70 | sched_clock_register(sb1250_read_sched_clock, 23, V_SCD_TIMER_FREQ); | ||
71 | } | ||
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c new file mode 100644 index 000000000..4a1647ddf --- /dev/null +++ b/arch/mips/kernel/early_printk.c | |||
@@ -0,0 +1,41 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org) | ||
7 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
8 | * written by Ralf Baechle (ralf@linux-mips.org) | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/console.h> | ||
12 | #include <linux/printk.h> | ||
13 | #include <linux/init.h> | ||
14 | |||
15 | #include <asm/setup.h> | ||
16 | |||
17 | static void early_console_write(struct console *con, const char *s, unsigned n) | ||
18 | { | ||
19 | while (n-- && *s) { | ||
20 | if (*s == '\n') | ||
21 | prom_putchar('\r'); | ||
22 | prom_putchar(*s); | ||
23 | s++; | ||
24 | } | ||
25 | } | ||
26 | |||
27 | static struct console early_console_prom = { | ||
28 | .name = "early", | ||
29 | .write = early_console_write, | ||
30 | .flags = CON_PRINTBUFFER | CON_BOOT, | ||
31 | .index = -1 | ||
32 | }; | ||
33 | |||
34 | void __init setup_early_printk(void) | ||
35 | { | ||
36 | if (early_console) | ||
37 | return; | ||
38 | early_console = &early_console_prom; | ||
39 | |||
40 | register_console(&early_console_prom); | ||
41 | } | ||
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c new file mode 100644 index 000000000..567c6ec0c --- /dev/null +++ b/arch/mips/kernel/early_printk_8250.c | |||
@@ -0,0 +1,54 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * 8250/16550-type serial ports prom_putchar() | ||
4 | * | ||
5 | * Copyright (C) 2010 Yoichi Yuasa <yuasa@linux-mips.org> | ||
6 | */ | ||
7 | #include <linux/io.h> | ||
8 | #include <linux/serial_core.h> | ||
9 | #include <linux/serial_reg.h> | ||
10 | #include <asm/setup.h> | ||
11 | |||
12 | static void __iomem *serial8250_base; | ||
13 | static unsigned int serial8250_reg_shift; | ||
14 | static unsigned int serial8250_tx_timeout; | ||
15 | |||
16 | void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift, | ||
17 | unsigned int timeout) | ||
18 | { | ||
19 | serial8250_base = (void __iomem *)base; | ||
20 | serial8250_reg_shift = reg_shift; | ||
21 | serial8250_tx_timeout = timeout; | ||
22 | } | ||
23 | |||
24 | static inline u8 serial_in(int offset) | ||
25 | { | ||
26 | return readb(serial8250_base + (offset << serial8250_reg_shift)); | ||
27 | } | ||
28 | |||
29 | static inline void serial_out(int offset, char value) | ||
30 | { | ||
31 | writeb(value, serial8250_base + (offset << serial8250_reg_shift)); | ||
32 | } | ||
33 | |||
34 | void prom_putchar(char c) | ||
35 | { | ||
36 | unsigned int timeout; | ||
37 | int status, bits; | ||
38 | |||
39 | if (!serial8250_base) | ||
40 | return; | ||
41 | |||
42 | timeout = serial8250_tx_timeout; | ||
43 | bits = UART_LSR_TEMT | UART_LSR_THRE; | ||
44 | |||
45 | do { | ||
46 | status = serial_in(UART_LSR); | ||
47 | |||
48 | if (--timeout == 0) | ||
49 | break; | ||
50 | } while ((status & bits) != bits); | ||
51 | |||
52 | if (timeout) | ||
53 | serial_out(UART_TX, c); | ||
54 | } | ||
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c new file mode 100644 index 000000000..7b045d2a0 --- /dev/null +++ b/arch/mips/kernel/elf.c | |||
@@ -0,0 +1,343 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2014 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/binfmts.h> | ||
8 | #include <linux/elf.h> | ||
9 | #include <linux/export.h> | ||
10 | #include <linux/sched.h> | ||
11 | |||
12 | #include <asm/cpu-features.h> | ||
13 | #include <asm/cpu-info.h> | ||
14 | |||
15 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
16 | |||
17 | /* Whether to accept legacy-NaN and 2008-NaN user binaries. */ | ||
18 | bool mips_use_nan_legacy; | ||
19 | bool mips_use_nan_2008; | ||
20 | |||
21 | /* FPU modes */ | ||
22 | enum { | ||
23 | FP_FRE, | ||
24 | FP_FR0, | ||
25 | FP_FR1, | ||
26 | }; | ||
27 | |||
28 | /** | ||
29 | * struct mode_req - ABI FPU mode requirements | ||
30 | * @single: The program being loaded needs an FPU but it will only issue | ||
31 | * single precision instructions meaning that it can execute in | ||
32 | * either FR0 or FR1. | ||
33 | * @soft: The soft(-float) requirement means that the program being | ||
34 | * loaded needs has no FPU dependency at all (i.e. it has no | ||
35 | * FPU instructions). | ||
36 | * @fr1: The program being loaded depends on FPU being in FR=1 mode. | ||
37 | * @frdefault: The program being loaded depends on the default FPU mode. | ||
38 | * That is FR0 for O32 and FR1 for N32/N64. | ||
39 | * @fre: The program being loaded depends on FPU with FRE=1. This mode is | ||
40 | * a bridge which uses FR=1 whilst still being able to maintain | ||
41 | * full compatibility with pre-existing code using the O32 FP32 | ||
42 | * ABI. | ||
43 | * | ||
44 | * More information about the FP ABIs can be found here: | ||
45 | * | ||
46 | * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up | ||
47 | * | ||
48 | */ | ||
49 | |||
50 | struct mode_req { | ||
51 | bool single; | ||
52 | bool soft; | ||
53 | bool fr1; | ||
54 | bool frdefault; | ||
55 | bool fre; | ||
56 | }; | ||
57 | |||
58 | static const struct mode_req fpu_reqs[] = { | ||
59 | [MIPS_ABI_FP_ANY] = { true, true, true, true, true }, | ||
60 | [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true }, | ||
61 | [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false }, | ||
62 | [MIPS_ABI_FP_SOFT] = { false, true, false, false, false }, | ||
63 | [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false }, | ||
64 | [MIPS_ABI_FP_XX] = { false, false, true, true, true }, | ||
65 | [MIPS_ABI_FP_64] = { false, false, true, false, false }, | ||
66 | [MIPS_ABI_FP_64A] = { false, false, true, false, true } | ||
67 | }; | ||
68 | |||
69 | /* | ||
70 | * Mode requirements when .MIPS.abiflags is not present in the ELF. | ||
71 | * Not present means that everything is acceptable except FR1. | ||
72 | */ | ||
73 | static struct mode_req none_req = { true, true, false, true, true }; | ||
74 | |||
75 | int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf, | ||
76 | bool is_interp, struct arch_elf_state *state) | ||
77 | { | ||
78 | union { | ||
79 | struct elf32_hdr e32; | ||
80 | struct elf64_hdr e64; | ||
81 | } *ehdr = _ehdr; | ||
82 | struct elf32_phdr *phdr32 = _phdr; | ||
83 | struct elf64_phdr *phdr64 = _phdr; | ||
84 | struct mips_elf_abiflags_v0 abiflags; | ||
85 | bool elf32; | ||
86 | u32 flags; | ||
87 | int ret; | ||
88 | loff_t pos; | ||
89 | |||
90 | elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32; | ||
91 | flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags; | ||
92 | |||
93 | /* Let's see if this is an O32 ELF */ | ||
94 | if (elf32) { | ||
95 | if (flags & EF_MIPS_FP64) { | ||
96 | /* | ||
97 | * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it | ||
98 | * later if needed | ||
99 | */ | ||
100 | if (is_interp) | ||
101 | state->interp_fp_abi = MIPS_ABI_FP_OLD_64; | ||
102 | else | ||
103 | state->fp_abi = MIPS_ABI_FP_OLD_64; | ||
104 | } | ||
105 | if (phdr32->p_type != PT_MIPS_ABIFLAGS) | ||
106 | return 0; | ||
107 | |||
108 | if (phdr32->p_filesz < sizeof(abiflags)) | ||
109 | return -EINVAL; | ||
110 | pos = phdr32->p_offset; | ||
111 | } else { | ||
112 | if (phdr64->p_type != PT_MIPS_ABIFLAGS) | ||
113 | return 0; | ||
114 | if (phdr64->p_filesz < sizeof(abiflags)) | ||
115 | return -EINVAL; | ||
116 | pos = phdr64->p_offset; | ||
117 | } | ||
118 | |||
119 | ret = kernel_read(elf, &abiflags, sizeof(abiflags), &pos); | ||
120 | if (ret < 0) | ||
121 | return ret; | ||
122 | if (ret != sizeof(abiflags)) | ||
123 | return -EIO; | ||
124 | |||
125 | /* Record the required FP ABIs for use by mips_check_elf */ | ||
126 | if (is_interp) | ||
127 | state->interp_fp_abi = abiflags.fp_abi; | ||
128 | else | ||
129 | state->fp_abi = abiflags.fp_abi; | ||
130 | |||
131 | return 0; | ||
132 | } | ||
133 | |||
134 | int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr, | ||
135 | struct arch_elf_state *state) | ||
136 | { | ||
137 | union { | ||
138 | struct elf32_hdr e32; | ||
139 | struct elf64_hdr e64; | ||
140 | } *ehdr = _ehdr; | ||
141 | union { | ||
142 | struct elf32_hdr e32; | ||
143 | struct elf64_hdr e64; | ||
144 | } *iehdr = _interp_ehdr; | ||
145 | struct mode_req prog_req, interp_req; | ||
146 | int fp_abi, interp_fp_abi, abi0, abi1, max_abi; | ||
147 | bool elf32; | ||
148 | u32 flags; | ||
149 | |||
150 | elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32; | ||
151 | flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags; | ||
152 | |||
153 | /* | ||
154 | * Determine the NaN personality, reject the binary if not allowed. | ||
155 | * Also ensure that any interpreter matches the executable. | ||
156 | */ | ||
157 | if (flags & EF_MIPS_NAN2008) { | ||
158 | if (mips_use_nan_2008) | ||
159 | state->nan_2008 = 1; | ||
160 | else | ||
161 | return -ENOEXEC; | ||
162 | } else { | ||
163 | if (mips_use_nan_legacy) | ||
164 | state->nan_2008 = 0; | ||
165 | else | ||
166 | return -ENOEXEC; | ||
167 | } | ||
168 | if (has_interpreter) { | ||
169 | bool ielf32; | ||
170 | u32 iflags; | ||
171 | |||
172 | ielf32 = iehdr->e32.e_ident[EI_CLASS] == ELFCLASS32; | ||
173 | iflags = ielf32 ? iehdr->e32.e_flags : iehdr->e64.e_flags; | ||
174 | |||
175 | if ((flags ^ iflags) & EF_MIPS_NAN2008) | ||
176 | return -ELIBBAD; | ||
177 | } | ||
178 | |||
179 | if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) | ||
180 | return 0; | ||
181 | |||
182 | fp_abi = state->fp_abi; | ||
183 | |||
184 | if (has_interpreter) { | ||
185 | interp_fp_abi = state->interp_fp_abi; | ||
186 | |||
187 | abi0 = min(fp_abi, interp_fp_abi); | ||
188 | abi1 = max(fp_abi, interp_fp_abi); | ||
189 | } else { | ||
190 | abi0 = abi1 = fp_abi; | ||
191 | } | ||
192 | |||
193 | if (elf32 && !(flags & EF_MIPS_ABI2)) { | ||
194 | /* Default to a mode capable of running code expecting FR=0 */ | ||
195 | state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0; | ||
196 | |||
197 | /* Allow all ABIs we know about */ | ||
198 | max_abi = MIPS_ABI_FP_64A; | ||
199 | } else { | ||
200 | /* MIPS64 code always uses FR=1, thus the default is easy */ | ||
201 | state->overall_fp_mode = FP_FR1; | ||
202 | |||
203 | /* Disallow access to the various FPXX & FP64 ABIs */ | ||
204 | max_abi = MIPS_ABI_FP_SOFT; | ||
205 | } | ||
206 | |||
207 | if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) || | ||
208 | (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN)) | ||
209 | return -ELIBBAD; | ||
210 | |||
211 | /* It's time to determine the FPU mode requirements */ | ||
212 | prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0]; | ||
213 | interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1]; | ||
214 | |||
215 | /* | ||
216 | * Check whether the program's and interp's ABIs have a matching FPU | ||
217 | * mode requirement. | ||
218 | */ | ||
219 | prog_req.single = interp_req.single && prog_req.single; | ||
220 | prog_req.soft = interp_req.soft && prog_req.soft; | ||
221 | prog_req.fr1 = interp_req.fr1 && prog_req.fr1; | ||
222 | prog_req.frdefault = interp_req.frdefault && prog_req.frdefault; | ||
223 | prog_req.fre = interp_req.fre && prog_req.fre; | ||
224 | |||
225 | /* | ||
226 | * Determine the desired FPU mode | ||
227 | * | ||
228 | * Decision making: | ||
229 | * | ||
230 | * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This | ||
231 | * means that we have a combination of program and interpreter | ||
232 | * that inherently require the hybrid FP mode. | ||
233 | * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or | ||
234 | * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU | ||
235 | * instructions so we don't care about the mode. We will simply use | ||
236 | * the one preferred by the hardware. In fpxx case, that ABI can | ||
237 | * handle both FR=1 and FR=0, so, again, we simply choose the one | ||
238 | * preferred by the hardware. Next, if we only use single-precision | ||
239 | * FPU instructions, and the default ABI FPU mode is not good | ||
240 | * (ie single + any ABI combination), we set again the FPU mode to the | ||
241 | * one is preferred by the hardware. Next, if we know that the code | ||
242 | * will only use single-precision instructions, shown by single being | ||
243 | * true but frdefault being false, then we again set the FPU mode to | ||
244 | * the one that is preferred by the hardware. | ||
245 | * - We want FP_FR1 if that's the only matching mode and the default one | ||
246 | * is not good. | ||
247 | * - Return with -ELIBADD if we can't find a matching FPU mode. | ||
248 | */ | ||
249 | if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1) | ||
250 | state->overall_fp_mode = FP_FRE; | ||
251 | else if ((prog_req.fr1 && prog_req.frdefault) || | ||
252 | (prog_req.single && !prog_req.frdefault)) | ||
253 | /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */ | ||
254 | state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) && | ||
255 | cpu_has_mips_r2_r6) ? | ||
256 | FP_FR1 : FP_FR0; | ||
257 | else if (prog_req.fr1) | ||
258 | state->overall_fp_mode = FP_FR1; | ||
259 | else if (!prog_req.fre && !prog_req.frdefault && | ||
260 | !prog_req.fr1 && !prog_req.single && !prog_req.soft) | ||
261 | return -ELIBBAD; | ||
262 | |||
263 | return 0; | ||
264 | } | ||
265 | |||
266 | static inline void set_thread_fp_mode(int hybrid, int regs32) | ||
267 | { | ||
268 | if (hybrid) | ||
269 | set_thread_flag(TIF_HYBRID_FPREGS); | ||
270 | else | ||
271 | clear_thread_flag(TIF_HYBRID_FPREGS); | ||
272 | if (regs32) | ||
273 | set_thread_flag(TIF_32BIT_FPREGS); | ||
274 | else | ||
275 | clear_thread_flag(TIF_32BIT_FPREGS); | ||
276 | } | ||
277 | |||
278 | void mips_set_personality_fp(struct arch_elf_state *state) | ||
279 | { | ||
280 | /* | ||
281 | * This function is only ever called for O32 ELFs so we should | ||
282 | * not be worried about N32/N64 binaries. | ||
283 | */ | ||
284 | |||
285 | if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) | ||
286 | return; | ||
287 | |||
288 | switch (state->overall_fp_mode) { | ||
289 | case FP_FRE: | ||
290 | set_thread_fp_mode(1, 0); | ||
291 | break; | ||
292 | case FP_FR0: | ||
293 | set_thread_fp_mode(0, 1); | ||
294 | break; | ||
295 | case FP_FR1: | ||
296 | set_thread_fp_mode(0, 0); | ||
297 | break; | ||
298 | default: | ||
299 | BUG(); | ||
300 | } | ||
301 | } | ||
302 | |||
303 | /* | ||
304 | * Select the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode | ||
305 | * in FCSR according to the ELF NaN personality. | ||
306 | */ | ||
307 | void mips_set_personality_nan(struct arch_elf_state *state) | ||
308 | { | ||
309 | struct cpuinfo_mips *c = &boot_cpu_data; | ||
310 | struct task_struct *t = current; | ||
311 | |||
312 | t->thread.fpu.fcr31 = c->fpu_csr31; | ||
313 | switch (state->nan_2008) { | ||
314 | case 0: | ||
315 | break; | ||
316 | case 1: | ||
317 | if (!(c->fpu_msk31 & FPU_CSR_NAN2008)) | ||
318 | t->thread.fpu.fcr31 |= FPU_CSR_NAN2008; | ||
319 | if (!(c->fpu_msk31 & FPU_CSR_ABS2008)) | ||
320 | t->thread.fpu.fcr31 |= FPU_CSR_ABS2008; | ||
321 | break; | ||
322 | default: | ||
323 | BUG(); | ||
324 | } | ||
325 | } | ||
326 | |||
327 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
328 | |||
329 | int mips_elf_read_implies_exec(void *elf_ex, int exstack) | ||
330 | { | ||
331 | if (exstack != EXSTACK_DISABLE_X) { | ||
332 | /* The binary doesn't request a non-executable stack */ | ||
333 | return 1; | ||
334 | } | ||
335 | |||
336 | if (!cpu_has_rixi) { | ||
337 | /* The CPU doesn't support non-executable memory */ | ||
338 | return 1; | ||
339 | } | ||
340 | |||
341 | return 0; | ||
342 | } | ||
343 | EXPORT_SYMBOL(mips_elf_read_implies_exec); | ||
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S new file mode 100644 index 000000000..4b896f502 --- /dev/null +++ b/arch/mips/kernel/entry.S | |||
@@ -0,0 +1,186 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | |||
11 | #include <asm/asm.h> | ||
12 | #include <asm/asmmacro.h> | ||
13 | #include <asm/compiler.h> | ||
14 | #include <asm/irqflags.h> | ||
15 | #include <asm/regdef.h> | ||
16 | #include <asm/mipsregs.h> | ||
17 | #include <asm/stackframe.h> | ||
18 | #include <asm/isadep.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/war.h> | ||
21 | |||
22 | #ifndef CONFIG_PREEMPTION | ||
23 | #define resume_kernel restore_all | ||
24 | #else | ||
25 | #define __ret_from_irq ret_from_exception | ||
26 | #endif | ||
27 | |||
28 | .text | ||
29 | .align 5 | ||
30 | #ifndef CONFIG_PREEMPTION | ||
31 | FEXPORT(ret_from_exception) | ||
32 | local_irq_disable # preempt stop | ||
33 | b __ret_from_irq | ||
34 | #endif | ||
35 | FEXPORT(ret_from_irq) | ||
36 | LONG_S s0, TI_REGS($28) | ||
37 | FEXPORT(__ret_from_irq) | ||
38 | /* | ||
39 | * We can be coming here from a syscall done in the kernel space, | ||
40 | * e.g. a failed kernel_execve(). | ||
41 | */ | ||
42 | resume_userspace_check: | ||
43 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? | ||
44 | andi t0, t0, KU_USER | ||
45 | beqz t0, resume_kernel | ||
46 | |||
47 | resume_userspace: | ||
48 | local_irq_disable # make sure we dont miss an | ||
49 | # interrupt setting need_resched | ||
50 | # between sampling and return | ||
51 | LONG_L a2, TI_FLAGS($28) # current->work | ||
52 | andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace) | ||
53 | bnez t0, work_pending | ||
54 | j restore_all | ||
55 | |||
56 | #ifdef CONFIG_PREEMPTION | ||
57 | resume_kernel: | ||
58 | local_irq_disable | ||
59 | lw t0, TI_PRE_COUNT($28) | ||
60 | bnez t0, restore_all | ||
61 | LONG_L t0, TI_FLAGS($28) | ||
62 | andi t1, t0, _TIF_NEED_RESCHED | ||
63 | beqz t1, restore_all | ||
64 | LONG_L t0, PT_STATUS(sp) # Interrupts off? | ||
65 | andi t0, 1 | ||
66 | beqz t0, restore_all | ||
67 | PTR_LA ra, restore_all | ||
68 | j preempt_schedule_irq | ||
69 | #endif | ||
70 | |||
71 | FEXPORT(ret_from_kernel_thread) | ||
72 | jal schedule_tail # a0 = struct task_struct *prev | ||
73 | move a0, s1 | ||
74 | jal s0 | ||
75 | j syscall_exit | ||
76 | |||
77 | FEXPORT(ret_from_fork) | ||
78 | jal schedule_tail # a0 = struct task_struct *prev | ||
79 | |||
80 | FEXPORT(syscall_exit) | ||
81 | #ifdef CONFIG_DEBUG_RSEQ | ||
82 | move a0, sp | ||
83 | jal rseq_syscall | ||
84 | #endif | ||
85 | local_irq_disable # make sure need_resched and | ||
86 | # signals dont change between | ||
87 | # sampling and return | ||
88 | LONG_L a2, TI_FLAGS($28) # current->work | ||
89 | li t0, _TIF_ALLWORK_MASK | ||
90 | and t0, a2, t0 | ||
91 | bnez t0, syscall_exit_work | ||
92 | |||
93 | restore_all: # restore full frame | ||
94 | .set noat | ||
95 | RESTORE_TEMP | ||
96 | RESTORE_AT | ||
97 | RESTORE_STATIC | ||
98 | restore_partial: # restore partial frame | ||
99 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
100 | SAVE_STATIC | ||
101 | SAVE_AT | ||
102 | SAVE_TEMP | ||
103 | LONG_L v0, PT_STATUS(sp) | ||
104 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
105 | and v0, ST0_IEP | ||
106 | #else | ||
107 | and v0, ST0_IE | ||
108 | #endif | ||
109 | beqz v0, 1f | ||
110 | jal trace_hardirqs_on | ||
111 | b 2f | ||
112 | 1: jal trace_hardirqs_off | ||
113 | 2: | ||
114 | RESTORE_TEMP | ||
115 | RESTORE_AT | ||
116 | RESTORE_STATIC | ||
117 | #endif | ||
118 | RESTORE_SOME | ||
119 | RESTORE_SP_AND_RET | ||
120 | .set at | ||
121 | |||
122 | work_pending: | ||
123 | andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS | ||
124 | beqz t0, work_notifysig | ||
125 | work_resched: | ||
126 | TRACE_IRQS_OFF | ||
127 | jal schedule | ||
128 | |||
129 | local_irq_disable # make sure need_resched and | ||
130 | # signals dont change between | ||
131 | # sampling and return | ||
132 | LONG_L a2, TI_FLAGS($28) | ||
133 | andi t0, a2, _TIF_WORK_MASK # is there any work to be done | ||
134 | # other than syscall tracing? | ||
135 | beqz t0, restore_all | ||
136 | andi t0, a2, _TIF_NEED_RESCHED | ||
137 | bnez t0, work_resched | ||
138 | |||
139 | work_notifysig: # deal with pending signals and | ||
140 | # notify-resume requests | ||
141 | move a0, sp | ||
142 | li a1, 0 | ||
143 | jal do_notify_resume # a2 already loaded | ||
144 | j resume_userspace_check | ||
145 | |||
146 | FEXPORT(syscall_exit_partial) | ||
147 | #ifdef CONFIG_DEBUG_RSEQ | ||
148 | move a0, sp | ||
149 | jal rseq_syscall | ||
150 | #endif | ||
151 | local_irq_disable # make sure need_resched doesn't | ||
152 | # change between and return | ||
153 | LONG_L a2, TI_FLAGS($28) # current->work | ||
154 | li t0, _TIF_ALLWORK_MASK | ||
155 | and t0, a2 | ||
156 | beqz t0, restore_partial | ||
157 | SAVE_STATIC | ||
158 | syscall_exit_work: | ||
159 | LONG_L t0, PT_STATUS(sp) # returning to kernel mode? | ||
160 | andi t0, t0, KU_USER | ||
161 | beqz t0, resume_kernel | ||
162 | li t0, _TIF_WORK_SYSCALL_EXIT | ||
163 | and t0, a2 # a2 is preloaded with TI_FLAGS | ||
164 | beqz t0, work_pending # trace bit set? | ||
165 | local_irq_enable # could let syscall_trace_leave() | ||
166 | # call schedule() instead | ||
167 | TRACE_IRQS_ON | ||
168 | move a0, sp | ||
169 | jal syscall_trace_leave | ||
170 | b resume_userspace | ||
171 | |||
172 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \ | ||
173 | defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT) | ||
174 | |||
175 | /* | ||
176 | * MIPS32R2 Instruction Hazard Barrier - must be called | ||
177 | * | ||
178 | * For C code use the inline version named instruction_hazard(). | ||
179 | */ | ||
180 | LEAF(mips_ihb) | ||
181 | .set MIPS_ISA_LEVEL_RAW | ||
182 | jr.hb ra | ||
183 | nop | ||
184 | END(mips_ihb) | ||
185 | |||
186 | #endif /* CONFIG_CPU_MIPSR2 - CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */ | ||
diff --git a/arch/mips/kernel/fpu-probe.c b/arch/mips/kernel/fpu-probe.c new file mode 100644 index 000000000..e689d6a83 --- /dev/null +++ b/arch/mips/kernel/fpu-probe.c | |||
@@ -0,0 +1,321 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Processor capabilities determination functions. | ||
4 | * | ||
5 | * Copyright (C) xxxx the Anonymous | ||
6 | * Copyright (C) 1994 - 2006 Ralf Baechle | ||
7 | * Copyright (C) 2003, 2004 Maciej W. Rozycki | ||
8 | * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/kernel.h> | ||
13 | |||
14 | #include <asm/bugs.h> | ||
15 | #include <asm/cpu.h> | ||
16 | #include <asm/cpu-features.h> | ||
17 | #include <asm/cpu-type.h> | ||
18 | #include <asm/elf.h> | ||
19 | #include <asm/fpu.h> | ||
20 | #include <asm/mipsregs.h> | ||
21 | |||
22 | #include "fpu-probe.h" | ||
23 | |||
24 | /* | ||
25 | * Get the FPU Implementation/Revision. | ||
26 | */ | ||
27 | static inline unsigned long cpu_get_fpu_id(void) | ||
28 | { | ||
29 | unsigned long tmp, fpu_id; | ||
30 | |||
31 | tmp = read_c0_status(); | ||
32 | __enable_fpu(FPU_AS_IS); | ||
33 | fpu_id = read_32bit_cp1_register(CP1_REVISION); | ||
34 | write_c0_status(tmp); | ||
35 | return fpu_id; | ||
36 | } | ||
37 | |||
38 | /* | ||
39 | * Check if the CPU has an external FPU. | ||
40 | */ | ||
41 | int __cpu_has_fpu(void) | ||
42 | { | ||
43 | return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * Determine the FCSR mask for FPU hardware. | ||
48 | */ | ||
49 | static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c) | ||
50 | { | ||
51 | unsigned long sr, mask, fcsr, fcsr0, fcsr1; | ||
52 | |||
53 | fcsr = c->fpu_csr31; | ||
54 | mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM; | ||
55 | |||
56 | sr = read_c0_status(); | ||
57 | __enable_fpu(FPU_AS_IS); | ||
58 | |||
59 | fcsr0 = fcsr & mask; | ||
60 | write_32bit_cp1_register(CP1_STATUS, fcsr0); | ||
61 | fcsr0 = read_32bit_cp1_register(CP1_STATUS); | ||
62 | |||
63 | fcsr1 = fcsr | ~mask; | ||
64 | write_32bit_cp1_register(CP1_STATUS, fcsr1); | ||
65 | fcsr1 = read_32bit_cp1_register(CP1_STATUS); | ||
66 | |||
67 | write_32bit_cp1_register(CP1_STATUS, fcsr); | ||
68 | |||
69 | write_c0_status(sr); | ||
70 | |||
71 | c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask; | ||
72 | } | ||
73 | |||
74 | /* | ||
75 | * Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes | ||
76 | * supported by FPU hardware. | ||
77 | */ | ||
78 | static void cpu_set_fpu_2008(struct cpuinfo_mips *c) | ||
79 | { | ||
80 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | | ||
81 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | | ||
82 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | | ||
83 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { | ||
84 | unsigned long sr, fir, fcsr, fcsr0, fcsr1; | ||
85 | |||
86 | sr = read_c0_status(); | ||
87 | __enable_fpu(FPU_AS_IS); | ||
88 | |||
89 | fir = read_32bit_cp1_register(CP1_REVISION); | ||
90 | if (fir & MIPS_FPIR_HAS2008) { | ||
91 | fcsr = read_32bit_cp1_register(CP1_STATUS); | ||
92 | |||
93 | /* | ||
94 | * MAC2008 toolchain never landed in real world, so | ||
95 | * we're only testing whether it can be disabled and | ||
96 | * don't try to enabled it. | ||
97 | */ | ||
98 | fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 | | ||
99 | FPU_CSR_MAC2008); | ||
100 | write_32bit_cp1_register(CP1_STATUS, fcsr0); | ||
101 | fcsr0 = read_32bit_cp1_register(CP1_STATUS); | ||
102 | |||
103 | fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008; | ||
104 | write_32bit_cp1_register(CP1_STATUS, fcsr1); | ||
105 | fcsr1 = read_32bit_cp1_register(CP1_STATUS); | ||
106 | |||
107 | write_32bit_cp1_register(CP1_STATUS, fcsr); | ||
108 | |||
109 | if (c->isa_level & (MIPS_CPU_ISA_M32R2 | | ||
110 | MIPS_CPU_ISA_M64R2)) { | ||
111 | /* | ||
112 | * The bit for MAC2008 might be reused by R6 | ||
113 | * in future, so we only test for R2-R5. | ||
114 | */ | ||
115 | if (fcsr0 & FPU_CSR_MAC2008) | ||
116 | c->options |= MIPS_CPU_MAC_2008_ONLY; | ||
117 | } | ||
118 | |||
119 | if (!(fcsr0 & FPU_CSR_NAN2008)) | ||
120 | c->options |= MIPS_CPU_NAN_LEGACY; | ||
121 | if (fcsr1 & FPU_CSR_NAN2008) | ||
122 | c->options |= MIPS_CPU_NAN_2008; | ||
123 | |||
124 | if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008) | ||
125 | c->fpu_msk31 &= ~FPU_CSR_ABS2008; | ||
126 | else | ||
127 | c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008; | ||
128 | |||
129 | if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008) | ||
130 | c->fpu_msk31 &= ~FPU_CSR_NAN2008; | ||
131 | else | ||
132 | c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008; | ||
133 | } else { | ||
134 | c->options |= MIPS_CPU_NAN_LEGACY; | ||
135 | } | ||
136 | |||
137 | write_c0_status(sr); | ||
138 | } else { | ||
139 | c->options |= MIPS_CPU_NAN_LEGACY; | ||
140 | } | ||
141 | } | ||
142 | |||
143 | /* | ||
144 | * IEEE 754 conformance mode to use. Affects the NaN encoding and the | ||
145 | * ABS.fmt/NEG.fmt execution mode. | ||
146 | */ | ||
147 | static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT; | ||
148 | |||
149 | /* | ||
150 | * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes | ||
151 | * to support by the FPU emulator according to the IEEE 754 conformance | ||
152 | * mode selected. Note that "relaxed" straps the emulator so that it | ||
153 | * allows 2008-NaN binaries even for legacy processors. | ||
154 | */ | ||
155 | static void cpu_set_nofpu_2008(struct cpuinfo_mips *c) | ||
156 | { | ||
157 | c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY); | ||
158 | c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); | ||
159 | c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008); | ||
160 | |||
161 | switch (ieee754) { | ||
162 | case STRICT: | ||
163 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | | ||
164 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | | ||
165 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | | ||
166 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { | ||
167 | c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; | ||
168 | } else { | ||
169 | c->options |= MIPS_CPU_NAN_LEGACY; | ||
170 | c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; | ||
171 | } | ||
172 | break; | ||
173 | case LEGACY: | ||
174 | c->options |= MIPS_CPU_NAN_LEGACY; | ||
175 | c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; | ||
176 | break; | ||
177 | case STD2008: | ||
178 | c->options |= MIPS_CPU_NAN_2008; | ||
179 | c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; | ||
180 | c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008; | ||
181 | break; | ||
182 | case RELAXED: | ||
183 | c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY; | ||
184 | break; | ||
185 | } | ||
186 | } | ||
187 | |||
188 | /* | ||
189 | * Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode | ||
190 | * according to the "ieee754=" parameter. | ||
191 | */ | ||
192 | static void cpu_set_nan_2008(struct cpuinfo_mips *c) | ||
193 | { | ||
194 | switch (ieee754) { | ||
195 | case STRICT: | ||
196 | mips_use_nan_legacy = !!cpu_has_nan_legacy; | ||
197 | mips_use_nan_2008 = !!cpu_has_nan_2008; | ||
198 | break; | ||
199 | case LEGACY: | ||
200 | mips_use_nan_legacy = !!cpu_has_nan_legacy; | ||
201 | mips_use_nan_2008 = !cpu_has_nan_legacy; | ||
202 | break; | ||
203 | case STD2008: | ||
204 | mips_use_nan_legacy = !cpu_has_nan_2008; | ||
205 | mips_use_nan_2008 = !!cpu_has_nan_2008; | ||
206 | break; | ||
207 | case RELAXED: | ||
208 | mips_use_nan_legacy = true; | ||
209 | mips_use_nan_2008 = true; | ||
210 | break; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | /* | ||
215 | * IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override | ||
216 | * settings: | ||
217 | * | ||
218 | * strict: accept binaries that request a NaN encoding supported by the FPU | ||
219 | * legacy: only accept legacy-NaN binaries | ||
220 | * 2008: only accept 2008-NaN binaries | ||
221 | * relaxed: accept any binaries regardless of whether supported by the FPU | ||
222 | */ | ||
223 | static int __init ieee754_setup(char *s) | ||
224 | { | ||
225 | if (!s) | ||
226 | return -1; | ||
227 | else if (!strcmp(s, "strict")) | ||
228 | ieee754 = STRICT; | ||
229 | else if (!strcmp(s, "legacy")) | ||
230 | ieee754 = LEGACY; | ||
231 | else if (!strcmp(s, "2008")) | ||
232 | ieee754 = STD2008; | ||
233 | else if (!strcmp(s, "relaxed")) | ||
234 | ieee754 = RELAXED; | ||
235 | else | ||
236 | return -1; | ||
237 | |||
238 | if (!(boot_cpu_data.options & MIPS_CPU_FPU)) | ||
239 | cpu_set_nofpu_2008(&boot_cpu_data); | ||
240 | cpu_set_nan_2008(&boot_cpu_data); | ||
241 | |||
242 | return 0; | ||
243 | } | ||
244 | |||
245 | early_param("ieee754", ieee754_setup); | ||
246 | |||
247 | /* | ||
248 | * Set the FIR feature flags for the FPU emulator. | ||
249 | */ | ||
250 | static void cpu_set_nofpu_id(struct cpuinfo_mips *c) | ||
251 | { | ||
252 | u32 value; | ||
253 | |||
254 | value = 0; | ||
255 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | | ||
256 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | | ||
257 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | | ||
258 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) | ||
259 | value |= MIPS_FPIR_D | MIPS_FPIR_S; | ||
260 | if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | | ||
261 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | | ||
262 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) | ||
263 | value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W; | ||
264 | if (c->options & MIPS_CPU_NAN_2008) | ||
265 | value |= MIPS_FPIR_HAS2008; | ||
266 | c->fpu_id = value; | ||
267 | } | ||
268 | |||
269 | /* Determined FPU emulator mask to use for the boot CPU with "nofpu". */ | ||
270 | static unsigned int mips_nofpu_msk31; | ||
271 | |||
272 | /* | ||
273 | * Set options for FPU hardware. | ||
274 | */ | ||
275 | void cpu_set_fpu_opts(struct cpuinfo_mips *c) | ||
276 | { | ||
277 | c->fpu_id = cpu_get_fpu_id(); | ||
278 | mips_nofpu_msk31 = c->fpu_msk31; | ||
279 | |||
280 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | | ||
281 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | | ||
282 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | | ||
283 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { | ||
284 | if (c->fpu_id & MIPS_FPIR_3D) | ||
285 | c->ases |= MIPS_ASE_MIPS3D; | ||
286 | if (c->fpu_id & MIPS_FPIR_UFRP) | ||
287 | c->options |= MIPS_CPU_UFR; | ||
288 | if (c->fpu_id & MIPS_FPIR_FREP) | ||
289 | c->options |= MIPS_CPU_FRE; | ||
290 | } | ||
291 | |||
292 | cpu_set_fpu_fcsr_mask(c); | ||
293 | cpu_set_fpu_2008(c); | ||
294 | cpu_set_nan_2008(c); | ||
295 | } | ||
296 | |||
297 | /* | ||
298 | * Set options for the FPU emulator. | ||
299 | */ | ||
300 | void cpu_set_nofpu_opts(struct cpuinfo_mips *c) | ||
301 | { | ||
302 | c->options &= ~MIPS_CPU_FPU; | ||
303 | c->fpu_msk31 = mips_nofpu_msk31; | ||
304 | |||
305 | cpu_set_nofpu_2008(c); | ||
306 | cpu_set_nan_2008(c); | ||
307 | cpu_set_nofpu_id(c); | ||
308 | } | ||
309 | |||
310 | int mips_fpu_disabled; | ||
311 | |||
312 | static int __init fpu_disable(char *s) | ||
313 | { | ||
314 | cpu_set_nofpu_opts(&boot_cpu_data); | ||
315 | mips_fpu_disabled = 1; | ||
316 | |||
317 | return 1; | ||
318 | } | ||
319 | |||
320 | __setup("nofpu", fpu_disable); | ||
321 | |||
diff --git a/arch/mips/kernel/fpu-probe.h b/arch/mips/kernel/fpu-probe.h new file mode 100644 index 000000000..951ce5089 --- /dev/null +++ b/arch/mips/kernel/fpu-probe.h | |||
@@ -0,0 +1,40 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
2 | |||
3 | #include <linux/kernel.h> | ||
4 | |||
5 | #include <asm/cpu.h> | ||
6 | #include <asm/cpu-info.h> | ||
7 | |||
8 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
9 | |||
10 | extern int mips_fpu_disabled; | ||
11 | |||
12 | int __cpu_has_fpu(void); | ||
13 | void cpu_set_fpu_opts(struct cpuinfo_mips *c); | ||
14 | void cpu_set_nofpu_opts(struct cpuinfo_mips *c); | ||
15 | |||
16 | #else /* !CONFIG_MIPS_FP_SUPPORT */ | ||
17 | |||
18 | #define mips_fpu_disabled 1 | ||
19 | |||
20 | static inline unsigned long cpu_get_fpu_id(void) | ||
21 | { | ||
22 | return FPIR_IMP_NONE; | ||
23 | } | ||
24 | |||
25 | static inline int __cpu_has_fpu(void) | ||
26 | { | ||
27 | return 0; | ||
28 | } | ||
29 | |||
30 | static inline void cpu_set_fpu_opts(struct cpuinfo_mips *c) | ||
31 | { | ||
32 | /* no-op */ | ||
33 | } | ||
34 | |||
35 | static inline void cpu_set_nofpu_opts(struct cpuinfo_mips *c) | ||
36 | { | ||
37 | /* no-op */ | ||
38 | } | ||
39 | |||
40 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c new file mode 100644 index 000000000..f57e68f40 --- /dev/null +++ b/arch/mips/kernel/ftrace.c | |||
@@ -0,0 +1,414 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Code for replacing ftrace calls with jumps. | ||
4 | * | ||
5 | * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com> | ||
6 | * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China | ||
7 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> | ||
8 | * | ||
9 | * Thanks goes to Steven Rostedt for writing the original x86 version. | ||
10 | */ | ||
11 | |||
12 | #include <linux/uaccess.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/ftrace.h> | ||
15 | #include <linux/syscalls.h> | ||
16 | |||
17 | #include <asm/asm.h> | ||
18 | #include <asm/asm-offsets.h> | ||
19 | #include <asm/cacheflush.h> | ||
20 | #include <asm/syscall.h> | ||
21 | #include <asm/uasm.h> | ||
22 | #include <asm/unistd.h> | ||
23 | |||
24 | #include <asm-generic/sections.h> | ||
25 | |||
26 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
27 | #define MCOUNT_OFFSET_INSNS 5 | ||
28 | #else | ||
29 | #define MCOUNT_OFFSET_INSNS 4 | ||
30 | #endif | ||
31 | |||
32 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
33 | |||
34 | /* Arch override because MIPS doesn't need to run this from stop_machine() */ | ||
35 | void arch_ftrace_update_code(int command) | ||
36 | { | ||
37 | ftrace_modify_all_code(command); | ||
38 | } | ||
39 | |||
40 | #define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */ | ||
41 | #define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */ | ||
42 | #define JUMP_RANGE_MASK ((1UL << 28) - 1) | ||
43 | |||
44 | #define INSN_NOP 0x00000000 /* nop */ | ||
45 | #define INSN_JAL(addr) \ | ||
46 | ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK))) | ||
47 | |||
48 | static unsigned int insn_jal_ftrace_caller __read_mostly; | ||
49 | static unsigned int insn_la_mcount[2] __read_mostly; | ||
50 | static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly; | ||
51 | |||
52 | static inline void ftrace_dyn_arch_init_insns(void) | ||
53 | { | ||
54 | u32 *buf; | ||
55 | unsigned int v1; | ||
56 | |||
57 | /* la v1, _mcount */ | ||
58 | v1 = 3; | ||
59 | buf = (u32 *)&insn_la_mcount[0]; | ||
60 | UASM_i_LA(&buf, v1, MCOUNT_ADDR); | ||
61 | |||
62 | /* jal (ftrace_caller + 8), jump over the first two instruction */ | ||
63 | buf = (u32 *)&insn_jal_ftrace_caller; | ||
64 | uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK); | ||
65 | |||
66 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
67 | /* j ftrace_graph_caller */ | ||
68 | buf = (u32 *)&insn_j_ftrace_graph_caller; | ||
69 | uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK); | ||
70 | #endif | ||
71 | } | ||
72 | |||
73 | static int ftrace_modify_code(unsigned long ip, unsigned int new_code) | ||
74 | { | ||
75 | int faulted; | ||
76 | mm_segment_t old_fs; | ||
77 | |||
78 | /* *(unsigned int *)ip = new_code; */ | ||
79 | safe_store_code(new_code, ip, faulted); | ||
80 | |||
81 | if (unlikely(faulted)) | ||
82 | return -EFAULT; | ||
83 | |||
84 | old_fs = get_fs(); | ||
85 | set_fs(KERNEL_DS); | ||
86 | flush_icache_range(ip, ip + 8); | ||
87 | set_fs(old_fs); | ||
88 | |||
89 | return 0; | ||
90 | } | ||
91 | |||
92 | #ifndef CONFIG_64BIT | ||
93 | static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1, | ||
94 | unsigned int new_code2) | ||
95 | { | ||
96 | int faulted; | ||
97 | mm_segment_t old_fs; | ||
98 | |||
99 | safe_store_code(new_code1, ip, faulted); | ||
100 | if (unlikely(faulted)) | ||
101 | return -EFAULT; | ||
102 | |||
103 | ip += 4; | ||
104 | safe_store_code(new_code2, ip, faulted); | ||
105 | if (unlikely(faulted)) | ||
106 | return -EFAULT; | ||
107 | |||
108 | ip -= 4; | ||
109 | old_fs = get_fs(); | ||
110 | set_fs(KERNEL_DS); | ||
111 | flush_icache_range(ip, ip + 8); | ||
112 | set_fs(old_fs); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1, | ||
118 | unsigned int new_code2) | ||
119 | { | ||
120 | int faulted; | ||
121 | mm_segment_t old_fs; | ||
122 | |||
123 | ip += 4; | ||
124 | safe_store_code(new_code2, ip, faulted); | ||
125 | if (unlikely(faulted)) | ||
126 | return -EFAULT; | ||
127 | |||
128 | ip -= 4; | ||
129 | safe_store_code(new_code1, ip, faulted); | ||
130 | if (unlikely(faulted)) | ||
131 | return -EFAULT; | ||
132 | |||
133 | old_fs = get_fs(); | ||
134 | set_fs(KERNEL_DS); | ||
135 | flush_icache_range(ip, ip + 8); | ||
136 | set_fs(old_fs); | ||
137 | |||
138 | return 0; | ||
139 | } | ||
140 | #endif | ||
141 | |||
142 | /* | ||
143 | * The details about the calling site of mcount on MIPS | ||
144 | * | ||
145 | * 1. For kernel: | ||
146 | * | ||
147 | * move at, ra | ||
148 | * jal _mcount --> nop | ||
149 | * sub sp, sp, 8 --> nop (CONFIG_32BIT) | ||
150 | * | ||
151 | * 2. For modules: | ||
152 | * | ||
153 | * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT | ||
154 | * | ||
155 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005) | ||
156 | * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) | ||
157 | * move at, ra | ||
158 | * move $12, ra_address | ||
159 | * jalr v1 | ||
160 | * sub sp, sp, 8 | ||
161 | * 1: offset = 5 instructions | ||
162 | * 2.2 For the Other situations | ||
163 | * | ||
164 | * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004) | ||
165 | * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT) | ||
166 | * move at, ra | ||
167 | * jalr v1 | ||
168 | * nop | move $12, ra_address | sub sp, sp, 8 | ||
169 | * 1: offset = 4 instructions | ||
170 | */ | ||
171 | |||
172 | #define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS) | ||
173 | |||
174 | int ftrace_make_nop(struct module *mod, | ||
175 | struct dyn_ftrace *rec, unsigned long addr) | ||
176 | { | ||
177 | unsigned int new; | ||
178 | unsigned long ip = rec->ip; | ||
179 | |||
180 | /* | ||
181 | * If ip is in kernel space, no long call, otherwise, long call is | ||
182 | * needed. | ||
183 | */ | ||
184 | new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F; | ||
185 | #ifdef CONFIG_64BIT | ||
186 | return ftrace_modify_code(ip, new); | ||
187 | #else | ||
188 | /* | ||
189 | * On 32 bit MIPS platforms, gcc adds a stack adjust | ||
190 | * instruction in the delay slot after the branch to | ||
191 | * mcount and expects mcount to restore the sp on return. | ||
192 | * This is based on a legacy API and does nothing but | ||
193 | * waste instructions so it's being removed at runtime. | ||
194 | */ | ||
195 | return ftrace_modify_code_2(ip, new, INSN_NOP); | ||
196 | #endif | ||
197 | } | ||
198 | |||
199 | int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) | ||
200 | { | ||
201 | unsigned int new; | ||
202 | unsigned long ip = rec->ip; | ||
203 | |||
204 | new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0]; | ||
205 | |||
206 | #ifdef CONFIG_64BIT | ||
207 | return ftrace_modify_code(ip, new); | ||
208 | #else | ||
209 | return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ? | ||
210 | INSN_NOP : insn_la_mcount[1]); | ||
211 | #endif | ||
212 | } | ||
213 | |||
214 | #define FTRACE_CALL_IP ((unsigned long)(&ftrace_call)) | ||
215 | |||
216 | int ftrace_update_ftrace_func(ftrace_func_t func) | ||
217 | { | ||
218 | unsigned int new; | ||
219 | |||
220 | new = INSN_JAL((unsigned long)func); | ||
221 | |||
222 | return ftrace_modify_code(FTRACE_CALL_IP, new); | ||
223 | } | ||
224 | |||
225 | int __init ftrace_dyn_arch_init(void) | ||
226 | { | ||
227 | /* Encode the instructions when booting */ | ||
228 | ftrace_dyn_arch_init_insns(); | ||
229 | |||
230 | /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */ | ||
231 | ftrace_modify_code(MCOUNT_ADDR, INSN_NOP); | ||
232 | |||
233 | return 0; | ||
234 | } | ||
235 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
236 | |||
237 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
238 | |||
239 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
240 | |||
241 | extern void ftrace_graph_call(void); | ||
242 | #define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call)) | ||
243 | |||
244 | int ftrace_enable_ftrace_graph_caller(void) | ||
245 | { | ||
246 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, | ||
247 | insn_j_ftrace_graph_caller); | ||
248 | } | ||
249 | |||
250 | int ftrace_disable_ftrace_graph_caller(void) | ||
251 | { | ||
252 | return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP); | ||
253 | } | ||
254 | |||
255 | #endif /* CONFIG_DYNAMIC_FTRACE */ | ||
256 | |||
257 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | ||
258 | |||
259 | #define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */ | ||
260 | #define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */ | ||
261 | #define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */ | ||
262 | |||
263 | unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long | ||
264 | old_parent_ra, unsigned long parent_ra_addr, unsigned long fp) | ||
265 | { | ||
266 | unsigned long sp, ip, tmp; | ||
267 | unsigned int code; | ||
268 | int faulted; | ||
269 | |||
270 | /* | ||
271 | * For module, move the ip from the return address after the | ||
272 | * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for | ||
273 | * kernel, move after the instruction "move ra, at"(offset is 16) | ||
274 | */ | ||
275 | ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24); | ||
276 | |||
277 | /* | ||
278 | * search the text until finding the non-store instruction or "s{d,w} | ||
279 | * ra, offset(sp)" instruction | ||
280 | */ | ||
281 | do { | ||
282 | /* get the code at "ip": code = *(unsigned int *)ip; */ | ||
283 | safe_load_code(code, ip, faulted); | ||
284 | |||
285 | if (unlikely(faulted)) | ||
286 | return 0; | ||
287 | /* | ||
288 | * If we hit the non-store instruction before finding where the | ||
289 | * ra is stored, then this is a leaf function and it does not | ||
290 | * store the ra on the stack | ||
291 | */ | ||
292 | if ((code & S_R_SP) != S_R_SP) | ||
293 | return parent_ra_addr; | ||
294 | |||
295 | /* Move to the next instruction */ | ||
296 | ip -= 4; | ||
297 | } while ((code & S_RA_SP) != S_RA_SP); | ||
298 | |||
299 | sp = fp + (code & OFFSET_MASK); | ||
300 | |||
301 | /* tmp = *(unsigned long *)sp; */ | ||
302 | safe_load_stack(tmp, sp, faulted); | ||
303 | if (unlikely(faulted)) | ||
304 | return 0; | ||
305 | |||
306 | if (tmp == old_parent_ra) | ||
307 | return sp; | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | #endif /* !KBUILD_MCOUNT_RA_ADDRESS */ | ||
312 | |||
313 | /* | ||
314 | * Hook the return address and push it in the stack of return addrs | ||
315 | * in current thread info. | ||
316 | */ | ||
317 | void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra, | ||
318 | unsigned long fp) | ||
319 | { | ||
320 | unsigned long old_parent_ra; | ||
321 | unsigned long return_hooker = (unsigned long) | ||
322 | &return_to_handler; | ||
323 | int faulted, insns; | ||
324 | |||
325 | if (unlikely(ftrace_graph_is_dead())) | ||
326 | return; | ||
327 | |||
328 | if (unlikely(atomic_read(¤t->tracing_graph_pause))) | ||
329 | return; | ||
330 | |||
331 | /* | ||
332 | * "parent_ra_addr" is the stack address where the return address of | ||
333 | * the caller of _mcount is saved. | ||
334 | * | ||
335 | * If gcc < 4.5, a leaf function does not save the return address | ||
336 | * in the stack address, so we "emulate" one in _mcount's stack space, | ||
337 | * and hijack it directly. | ||
338 | * For a non-leaf function, it does save the return address to its own | ||
339 | * stack space, so we can not hijack it directly, but need to find the | ||
340 | * real stack address, which is done by ftrace_get_parent_addr(). | ||
341 | * | ||
342 | * If gcc >= 4.5, with the new -mmcount-ra-address option, for a | ||
343 | * non-leaf function, the location of the return address will be saved | ||
344 | * to $12 for us. | ||
345 | * For a leaf function, it just puts a zero into $12, so we handle | ||
346 | * it in ftrace_graph_caller() of mcount.S. | ||
347 | */ | ||
348 | |||
349 | /* old_parent_ra = *parent_ra_addr; */ | ||
350 | safe_load_stack(old_parent_ra, parent_ra_addr, faulted); | ||
351 | if (unlikely(faulted)) | ||
352 | goto out; | ||
353 | #ifndef KBUILD_MCOUNT_RA_ADDRESS | ||
354 | parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra, | ||
355 | old_parent_ra, (unsigned long)parent_ra_addr, fp); | ||
356 | /* | ||
357 | * If fails when getting the stack address of the non-leaf function's | ||
358 | * ra, stop function graph tracer and return | ||
359 | */ | ||
360 | if (parent_ra_addr == NULL) | ||
361 | goto out; | ||
362 | #endif | ||
363 | /* *parent_ra_addr = return_hooker; */ | ||
364 | safe_store_stack(return_hooker, parent_ra_addr, faulted); | ||
365 | if (unlikely(faulted)) | ||
366 | goto out; | ||
367 | |||
368 | /* | ||
369 | * Get the recorded ip of the current mcount calling site in the | ||
370 | * __mcount_loc section, which will be used to filter the function | ||
371 | * entries configured through the tracing/set_graph_function interface. | ||
372 | */ | ||
373 | |||
374 | insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1; | ||
375 | self_ra -= (MCOUNT_INSN_SIZE * insns); | ||
376 | |||
377 | if (function_graph_enter(old_parent_ra, self_ra, fp, NULL)) | ||
378 | *parent_ra_addr = old_parent_ra; | ||
379 | return; | ||
380 | out: | ||
381 | ftrace_graph_stop(); | ||
382 | WARN_ON(1); | ||
383 | } | ||
384 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
385 | |||
386 | #ifdef CONFIG_FTRACE_SYSCALLS | ||
387 | |||
388 | #ifdef CONFIG_32BIT | ||
389 | unsigned long __init arch_syscall_addr(int nr) | ||
390 | { | ||
391 | return (unsigned long)sys_call_table[nr - __NR_O32_Linux]; | ||
392 | } | ||
393 | #endif | ||
394 | |||
395 | #ifdef CONFIG_64BIT | ||
396 | |||
397 | unsigned long __init arch_syscall_addr(int nr) | ||
398 | { | ||
399 | #ifdef CONFIG_MIPS32_N32 | ||
400 | if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls) | ||
401 | return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux]; | ||
402 | #endif | ||
403 | if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls) | ||
404 | return (unsigned long)sys_call_table[nr - __NR_64_Linux]; | ||
405 | #ifdef CONFIG_MIPS32_O32 | ||
406 | if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls) | ||
407 | return (unsigned long)sys32_call_table[nr - __NR_O32_Linux]; | ||
408 | #endif | ||
409 | |||
410 | return (unsigned long) &sys_ni_syscall; | ||
411 | } | ||
412 | #endif | ||
413 | |||
414 | #endif /* CONFIG_FTRACE_SYSCALLS */ | ||
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S new file mode 100644 index 000000000..bcce32a3d --- /dev/null +++ b/arch/mips/kernel/genex.S | |||
@@ -0,0 +1,682 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | * Copyright (C) 2002, 2007 Maciej W. Rozycki | ||
9 | * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved. | ||
10 | */ | ||
11 | #include <linux/init.h> | ||
12 | |||
13 | #include <asm/asm.h> | ||
14 | #include <asm/asmmacro.h> | ||
15 | #include <asm/cacheops.h> | ||
16 | #include <asm/irqflags.h> | ||
17 | #include <asm/regdef.h> | ||
18 | #include <asm/fpregdef.h> | ||
19 | #include <asm/mipsregs.h> | ||
20 | #include <asm/stackframe.h> | ||
21 | #include <asm/sync.h> | ||
22 | #include <asm/war.h> | ||
23 | #include <asm/thread_info.h> | ||
24 | |||
25 | __INIT | ||
26 | |||
27 | /* | ||
28 | * General exception vector for all other CPUs. | ||
29 | * | ||
30 | * Be careful when changing this, it has to be at most 128 bytes | ||
31 | * to fit into space reserved for the exception handler. | ||
32 | */ | ||
33 | NESTED(except_vec3_generic, 0, sp) | ||
34 | .set push | ||
35 | .set noat | ||
36 | mfc0 k1, CP0_CAUSE | ||
37 | andi k1, k1, 0x7c | ||
38 | #ifdef CONFIG_64BIT | ||
39 | dsll k1, k1, 1 | ||
40 | #endif | ||
41 | PTR_L k0, exception_handlers(k1) | ||
42 | jr k0 | ||
43 | .set pop | ||
44 | END(except_vec3_generic) | ||
45 | |||
46 | /* | ||
47 | * General exception handler for CPUs with virtual coherency exception. | ||
48 | * | ||
49 | * Be careful when changing this, it has to be at most 256 (as a special | ||
50 | * exception) bytes to fit into space reserved for the exception handler. | ||
51 | */ | ||
52 | NESTED(except_vec3_r4000, 0, sp) | ||
53 | .set push | ||
54 | .set arch=r4000 | ||
55 | .set noat | ||
56 | mfc0 k1, CP0_CAUSE | ||
57 | li k0, 31<<2 | ||
58 | andi k1, k1, 0x7c | ||
59 | .set push | ||
60 | .set noreorder | ||
61 | .set nomacro | ||
62 | beq k1, k0, handle_vced | ||
63 | li k0, 14<<2 | ||
64 | beq k1, k0, handle_vcei | ||
65 | #ifdef CONFIG_64BIT | ||
66 | dsll k1, k1, 1 | ||
67 | #endif | ||
68 | .set pop | ||
69 | PTR_L k0, exception_handlers(k1) | ||
70 | jr k0 | ||
71 | |||
72 | /* | ||
73 | * Big shit, we now may have two dirty primary cache lines for the same | ||
74 | * physical address. We can safely invalidate the line pointed to by | ||
75 | * c0_badvaddr because after return from this exception handler the | ||
76 | * load / store will be re-executed. | ||
77 | */ | ||
78 | handle_vced: | ||
79 | MFC0 k0, CP0_BADVADDR | ||
80 | li k1, -4 # Is this ... | ||
81 | and k0, k1 # ... really needed? | ||
82 | mtc0 zero, CP0_TAGLO | ||
83 | cache Index_Store_Tag_D, (k0) | ||
84 | cache Hit_Writeback_Inv_SD, (k0) | ||
85 | #ifdef CONFIG_PROC_FS | ||
86 | PTR_LA k0, vced_count | ||
87 | lw k1, (k0) | ||
88 | addiu k1, 1 | ||
89 | sw k1, (k0) | ||
90 | #endif | ||
91 | eret | ||
92 | |||
93 | handle_vcei: | ||
94 | MFC0 k0, CP0_BADVADDR | ||
95 | cache Hit_Writeback_Inv_SD, (k0) # also cleans pi | ||
96 | #ifdef CONFIG_PROC_FS | ||
97 | PTR_LA k0, vcei_count | ||
98 | lw k1, (k0) | ||
99 | addiu k1, 1 | ||
100 | sw k1, (k0) | ||
101 | #endif | ||
102 | eret | ||
103 | .set pop | ||
104 | END(except_vec3_r4000) | ||
105 | |||
106 | __FINIT | ||
107 | |||
108 | .align 5 /* 32 byte rollback region */ | ||
109 | LEAF(__r4k_wait) | ||
110 | .set push | ||
111 | .set noreorder | ||
112 | /* start of rollback region */ | ||
113 | LONG_L t0, TI_FLAGS($28) | ||
114 | nop | ||
115 | andi t0, _TIF_NEED_RESCHED | ||
116 | bnez t0, 1f | ||
117 | nop | ||
118 | nop | ||
119 | nop | ||
120 | #ifdef CONFIG_CPU_MICROMIPS | ||
121 | nop | ||
122 | nop | ||
123 | nop | ||
124 | nop | ||
125 | #endif | ||
126 | .set MIPS_ISA_ARCH_LEVEL_RAW | ||
127 | wait | ||
128 | /* end of rollback region (the region size must be power of two) */ | ||
129 | 1: | ||
130 | jr ra | ||
131 | nop | ||
132 | .set pop | ||
133 | END(__r4k_wait) | ||
134 | |||
135 | .macro BUILD_ROLLBACK_PROLOGUE handler | ||
136 | FEXPORT(rollback_\handler) | ||
137 | .set push | ||
138 | .set noat | ||
139 | MFC0 k0, CP0_EPC | ||
140 | PTR_LA k1, __r4k_wait | ||
141 | ori k0, 0x1f /* 32 byte rollback region */ | ||
142 | xori k0, 0x1f | ||
143 | bne k0, k1, \handler | ||
144 | MTC0 k0, CP0_EPC | ||
145 | .set pop | ||
146 | .endm | ||
147 | |||
148 | .align 5 | ||
149 | BUILD_ROLLBACK_PROLOGUE handle_int | ||
150 | NESTED(handle_int, PT_SIZE, sp) | ||
151 | .cfi_signal_frame | ||
152 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
153 | /* | ||
154 | * Check to see if the interrupted code has just disabled | ||
155 | * interrupts and ignore this interrupt for now if so. | ||
156 | * | ||
157 | * local_irq_disable() disables interrupts and then calls | ||
158 | * trace_hardirqs_off() to track the state. If an interrupt is taken | ||
159 | * after interrupts are disabled but before the state is updated | ||
160 | * it will appear to restore_all that it is incorrectly returning with | ||
161 | * interrupts disabled | ||
162 | */ | ||
163 | .set push | ||
164 | .set noat | ||
165 | mfc0 k0, CP0_STATUS | ||
166 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
167 | and k0, ST0_IEP | ||
168 | bnez k0, 1f | ||
169 | |||
170 | mfc0 k0, CP0_EPC | ||
171 | .set noreorder | ||
172 | j k0 | ||
173 | rfe | ||
174 | #else | ||
175 | and k0, ST0_IE | ||
176 | bnez k0, 1f | ||
177 | |||
178 | eret | ||
179 | #endif | ||
180 | 1: | ||
181 | .set pop | ||
182 | #endif | ||
183 | SAVE_ALL docfi=1 | ||
184 | CLI | ||
185 | TRACE_IRQS_OFF | ||
186 | |||
187 | LONG_L s0, TI_REGS($28) | ||
188 | LONG_S sp, TI_REGS($28) | ||
189 | |||
190 | /* | ||
191 | * SAVE_ALL ensures we are using a valid kernel stack for the thread. | ||
192 | * Check if we are already using the IRQ stack. | ||
193 | */ | ||
194 | move s1, sp # Preserve the sp | ||
195 | |||
196 | /* Get IRQ stack for this CPU */ | ||
197 | ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG | ||
198 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) | ||
199 | lui k1, %hi(irq_stack) | ||
200 | #else | ||
201 | lui k1, %highest(irq_stack) | ||
202 | daddiu k1, %higher(irq_stack) | ||
203 | dsll k1, 16 | ||
204 | daddiu k1, %hi(irq_stack) | ||
205 | dsll k1, 16 | ||
206 | #endif | ||
207 | LONG_SRL k0, SMP_CPUID_PTRSHIFT | ||
208 | LONG_ADDU k1, k0 | ||
209 | LONG_L t0, %lo(irq_stack)(k1) | ||
210 | |||
211 | # Check if already on IRQ stack | ||
212 | PTR_LI t1, ~(_THREAD_SIZE-1) | ||
213 | and t1, t1, sp | ||
214 | beq t0, t1, 2f | ||
215 | |||
216 | /* Switch to IRQ stack */ | ||
217 | li t1, _IRQ_STACK_START | ||
218 | PTR_ADD sp, t0, t1 | ||
219 | |||
220 | /* Save task's sp on IRQ stack so that unwinding can follow it */ | ||
221 | LONG_S s1, 0(sp) | ||
222 | 2: | ||
223 | jal plat_irq_dispatch | ||
224 | |||
225 | /* Restore sp */ | ||
226 | move sp, s1 | ||
227 | |||
228 | j ret_from_irq | ||
229 | #ifdef CONFIG_CPU_MICROMIPS | ||
230 | nop | ||
231 | #endif | ||
232 | END(handle_int) | ||
233 | |||
234 | __INIT | ||
235 | |||
236 | /* | ||
237 | * Special interrupt vector for MIPS64 ISA & embedded MIPS processors. | ||
238 | * This is a dedicated interrupt exception vector which reduces the | ||
239 | * interrupt processing overhead. The jump instruction will be replaced | ||
240 | * at the initialization time. | ||
241 | * | ||
242 | * Be careful when changing this, it has to be at most 128 bytes | ||
243 | * to fit into space reserved for the exception handler. | ||
244 | */ | ||
245 | NESTED(except_vec4, 0, sp) | ||
246 | 1: j 1b /* Dummy, will be replaced */ | ||
247 | END(except_vec4) | ||
248 | |||
249 | /* | ||
250 | * EJTAG debug exception handler. | ||
251 | * The EJTAG debug exception entry point is 0xbfc00480, which | ||
252 | * normally is in the boot PROM, so the boot PROM must do an | ||
253 | * unconditional jump to this vector. | ||
254 | */ | ||
255 | NESTED(except_vec_ejtag_debug, 0, sp) | ||
256 | j ejtag_debug_handler | ||
257 | #ifdef CONFIG_CPU_MICROMIPS | ||
258 | nop | ||
259 | #endif | ||
260 | END(except_vec_ejtag_debug) | ||
261 | |||
262 | __FINIT | ||
263 | |||
264 | /* | ||
265 | * Vectored interrupt handler. | ||
266 | * This prototype is copied to ebase + n*IntCtl.VS and patched | ||
267 | * to invoke the handler | ||
268 | */ | ||
269 | BUILD_ROLLBACK_PROLOGUE except_vec_vi | ||
270 | NESTED(except_vec_vi, 0, sp) | ||
271 | SAVE_SOME docfi=1 | ||
272 | SAVE_AT docfi=1 | ||
273 | .set push | ||
274 | .set noreorder | ||
275 | PTR_LA v1, except_vec_vi_handler | ||
276 | FEXPORT(except_vec_vi_lui) | ||
277 | lui v0, 0 /* Patched */ | ||
278 | jr v1 | ||
279 | FEXPORT(except_vec_vi_ori) | ||
280 | ori v0, 0 /* Patched */ | ||
281 | .set pop | ||
282 | END(except_vec_vi) | ||
283 | EXPORT(except_vec_vi_end) | ||
284 | |||
285 | /* | ||
286 | * Common Vectored Interrupt code | ||
287 | * Complete the register saves and invoke the handler which is passed in $v0 | ||
288 | */ | ||
289 | NESTED(except_vec_vi_handler, 0, sp) | ||
290 | SAVE_TEMP | ||
291 | SAVE_STATIC | ||
292 | CLI | ||
293 | #ifdef CONFIG_TRACE_IRQFLAGS | ||
294 | move s0, v0 | ||
295 | TRACE_IRQS_OFF | ||
296 | move v0, s0 | ||
297 | #endif | ||
298 | |||
299 | LONG_L s0, TI_REGS($28) | ||
300 | LONG_S sp, TI_REGS($28) | ||
301 | |||
302 | /* | ||
303 | * SAVE_ALL ensures we are using a valid kernel stack for the thread. | ||
304 | * Check if we are already using the IRQ stack. | ||
305 | */ | ||
306 | move s1, sp # Preserve the sp | ||
307 | |||
308 | /* Get IRQ stack for this CPU */ | ||
309 | ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG | ||
310 | #if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32) | ||
311 | lui k1, %hi(irq_stack) | ||
312 | #else | ||
313 | lui k1, %highest(irq_stack) | ||
314 | daddiu k1, %higher(irq_stack) | ||
315 | dsll k1, 16 | ||
316 | daddiu k1, %hi(irq_stack) | ||
317 | dsll k1, 16 | ||
318 | #endif | ||
319 | LONG_SRL k0, SMP_CPUID_PTRSHIFT | ||
320 | LONG_ADDU k1, k0 | ||
321 | LONG_L t0, %lo(irq_stack)(k1) | ||
322 | |||
323 | # Check if already on IRQ stack | ||
324 | PTR_LI t1, ~(_THREAD_SIZE-1) | ||
325 | and t1, t1, sp | ||
326 | beq t0, t1, 2f | ||
327 | |||
328 | /* Switch to IRQ stack */ | ||
329 | li t1, _IRQ_STACK_START | ||
330 | PTR_ADD sp, t0, t1 | ||
331 | |||
332 | /* Save task's sp on IRQ stack so that unwinding can follow it */ | ||
333 | LONG_S s1, 0(sp) | ||
334 | 2: | ||
335 | jalr v0 | ||
336 | |||
337 | /* Restore sp */ | ||
338 | move sp, s1 | ||
339 | |||
340 | j ret_from_irq | ||
341 | END(except_vec_vi_handler) | ||
342 | |||
343 | /* | ||
344 | * EJTAG debug exception handler. | ||
345 | */ | ||
346 | NESTED(ejtag_debug_handler, PT_SIZE, sp) | ||
347 | .set push | ||
348 | .set noat | ||
349 | MTC0 k0, CP0_DESAVE | ||
350 | mfc0 k0, CP0_DEBUG | ||
351 | |||
352 | sll k0, k0, 30 # Check for SDBBP. | ||
353 | bgez k0, ejtag_return | ||
354 | |||
355 | #ifdef CONFIG_SMP | ||
356 | 1: PTR_LA k0, ejtag_debug_buffer_spinlock | ||
357 | __SYNC(full, loongson3_war) | ||
358 | 2: ll k0, 0(k0) | ||
359 | bnez k0, 2b | ||
360 | PTR_LA k0, ejtag_debug_buffer_spinlock | ||
361 | sc k0, 0(k0) | ||
362 | beqz k0, 1b | ||
363 | # ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC | ||
364 | sync | ||
365 | # endif | ||
366 | |||
367 | PTR_LA k0, ejtag_debug_buffer | ||
368 | LONG_S k1, 0(k0) | ||
369 | |||
370 | ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG | ||
371 | PTR_SRL k1, SMP_CPUID_PTRSHIFT | ||
372 | PTR_SLL k1, LONGLOG | ||
373 | PTR_LA k0, ejtag_debug_buffer_per_cpu | ||
374 | PTR_ADDU k0, k1 | ||
375 | |||
376 | PTR_LA k1, ejtag_debug_buffer | ||
377 | LONG_L k1, 0(k1) | ||
378 | LONG_S k1, 0(k0) | ||
379 | |||
380 | PTR_LA k0, ejtag_debug_buffer_spinlock | ||
381 | sw zero, 0(k0) | ||
382 | #else | ||
383 | PTR_LA k0, ejtag_debug_buffer | ||
384 | LONG_S k1, 0(k0) | ||
385 | #endif | ||
386 | |||
387 | SAVE_ALL | ||
388 | move a0, sp | ||
389 | jal ejtag_exception_handler | ||
390 | RESTORE_ALL | ||
391 | |||
392 | #ifdef CONFIG_SMP | ||
393 | ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG | ||
394 | PTR_SRL k1, SMP_CPUID_PTRSHIFT | ||
395 | PTR_SLL k1, LONGLOG | ||
396 | PTR_LA k0, ejtag_debug_buffer_per_cpu | ||
397 | PTR_ADDU k0, k1 | ||
398 | LONG_L k1, 0(k0) | ||
399 | #else | ||
400 | PTR_LA k0, ejtag_debug_buffer | ||
401 | LONG_L k1, 0(k0) | ||
402 | #endif | ||
403 | |||
404 | ejtag_return: | ||
405 | back_to_back_c0_hazard | ||
406 | MFC0 k0, CP0_DESAVE | ||
407 | .set mips32 | ||
408 | deret | ||
409 | .set pop | ||
410 | END(ejtag_debug_handler) | ||
411 | |||
412 | /* | ||
413 | * This buffer is reserved for the use of the EJTAG debug | ||
414 | * handler. | ||
415 | */ | ||
416 | .data | ||
417 | EXPORT(ejtag_debug_buffer) | ||
418 | .fill LONGSIZE | ||
419 | #ifdef CONFIG_SMP | ||
420 | EXPORT(ejtag_debug_buffer_spinlock) | ||
421 | .fill LONGSIZE | ||
422 | EXPORT(ejtag_debug_buffer_per_cpu) | ||
423 | .fill LONGSIZE * NR_CPUS | ||
424 | #endif | ||
425 | .previous | ||
426 | |||
427 | __INIT | ||
428 | |||
429 | /* | ||
430 | * NMI debug exception handler for MIPS reference boards. | ||
431 | * The NMI debug exception entry point is 0xbfc00000, which | ||
432 | * normally is in the boot PROM, so the boot PROM must do a | ||
433 | * unconditional jump to this vector. | ||
434 | */ | ||
435 | NESTED(except_vec_nmi, 0, sp) | ||
436 | j nmi_handler | ||
437 | #ifdef CONFIG_CPU_MICROMIPS | ||
438 | nop | ||
439 | #endif | ||
440 | END(except_vec_nmi) | ||
441 | |||
442 | __FINIT | ||
443 | |||
444 | NESTED(nmi_handler, PT_SIZE, sp) | ||
445 | .cfi_signal_frame | ||
446 | .set push | ||
447 | .set noat | ||
448 | /* | ||
449 | * Clear ERL - restore segment mapping | ||
450 | * Clear BEV - required for page fault exception handler to work | ||
451 | */ | ||
452 | mfc0 k0, CP0_STATUS | ||
453 | ori k0, k0, ST0_EXL | ||
454 | li k1, ~(ST0_BEV | ST0_ERL) | ||
455 | and k0, k0, k1 | ||
456 | mtc0 k0, CP0_STATUS | ||
457 | _ehb | ||
458 | SAVE_ALL | ||
459 | move a0, sp | ||
460 | jal nmi_exception_handler | ||
461 | /* nmi_exception_handler never returns */ | ||
462 | .set pop | ||
463 | END(nmi_handler) | ||
464 | |||
465 | .macro __build_clear_none | ||
466 | .endm | ||
467 | |||
468 | .macro __build_clear_sti | ||
469 | TRACE_IRQS_ON | ||
470 | STI | ||
471 | .endm | ||
472 | |||
473 | .macro __build_clear_cli | ||
474 | CLI | ||
475 | TRACE_IRQS_OFF | ||
476 | .endm | ||
477 | |||
478 | .macro __build_clear_fpe | ||
479 | CLI | ||
480 | TRACE_IRQS_OFF | ||
481 | .set push | ||
482 | /* gas fails to assemble cfc1 for some archs (octeon).*/ \ | ||
483 | .set mips1 | ||
484 | SET_HARDFLOAT | ||
485 | cfc1 a1, fcr31 | ||
486 | .set pop | ||
487 | .endm | ||
488 | |||
489 | .macro __build_clear_msa_fpe | ||
490 | CLI | ||
491 | TRACE_IRQS_OFF | ||
492 | _cfcmsa a1, MSA_CSR | ||
493 | .endm | ||
494 | |||
495 | .macro __build_clear_ade | ||
496 | MFC0 t0, CP0_BADVADDR | ||
497 | PTR_S t0, PT_BVADDR(sp) | ||
498 | KMODE | ||
499 | .endm | ||
500 | |||
501 | .macro __build_clear_gsexc | ||
502 | .set push | ||
503 | /* | ||
504 | * We need to specify a selector to access the CP0.Diag1 (GSCause) | ||
505 | * register. All GSExc-equipped processors have MIPS32. | ||
506 | */ | ||
507 | .set mips32 | ||
508 | mfc0 a1, CP0_DIAGNOSTIC1 | ||
509 | .set pop | ||
510 | TRACE_IRQS_ON | ||
511 | STI | ||
512 | .endm | ||
513 | |||
514 | .macro __BUILD_silent exception | ||
515 | .endm | ||
516 | |||
517 | /* Gas tries to parse the ASM_PRINT argument as a string containing | ||
518 | string escapes and emits bogus warnings if it believes to | ||
519 | recognize an unknown escape code. So make the arguments | ||
520 | start with an n and gas will believe \n is ok ... */ | ||
521 | .macro __BUILD_verbose nexception | ||
522 | LONG_L a1, PT_EPC(sp) | ||
523 | #ifdef CONFIG_32BIT | ||
524 | ASM_PRINT("Got \nexception at %08lx\012") | ||
525 | #endif | ||
526 | #ifdef CONFIG_64BIT | ||
527 | ASM_PRINT("Got \nexception at %016lx\012") | ||
528 | #endif | ||
529 | .endm | ||
530 | |||
531 | .macro __BUILD_count exception | ||
532 | LONG_L t0,exception_count_\exception | ||
533 | LONG_ADDIU t0, 1 | ||
534 | LONG_S t0,exception_count_\exception | ||
535 | .comm exception_count\exception, 8, 8 | ||
536 | .endm | ||
537 | |||
538 | .macro __BUILD_HANDLER exception handler clear verbose ext | ||
539 | .align 5 | ||
540 | NESTED(handle_\exception, PT_SIZE, sp) | ||
541 | .cfi_signal_frame | ||
542 | .set noat | ||
543 | SAVE_ALL | ||
544 | FEXPORT(handle_\exception\ext) | ||
545 | __build_clear_\clear | ||
546 | .set at | ||
547 | __BUILD_\verbose \exception | ||
548 | move a0, sp | ||
549 | jal do_\handler | ||
550 | j ret_from_exception | ||
551 | END(handle_\exception) | ||
552 | .endm | ||
553 | |||
554 | .macro BUILD_HANDLER exception handler clear verbose | ||
555 | __BUILD_HANDLER \exception \handler \clear \verbose _int | ||
556 | .endm | ||
557 | |||
558 | BUILD_HANDLER adel ade ade silent /* #4 */ | ||
559 | BUILD_HANDLER ades ade ade silent /* #5 */ | ||
560 | BUILD_HANDLER ibe be cli silent /* #6 */ | ||
561 | BUILD_HANDLER dbe be cli silent /* #7 */ | ||
562 | BUILD_HANDLER bp bp sti silent /* #9 */ | ||
563 | BUILD_HANDLER ri ri sti silent /* #10 */ | ||
564 | BUILD_HANDLER cpu cpu sti silent /* #11 */ | ||
565 | BUILD_HANDLER ov ov sti silent /* #12 */ | ||
566 | BUILD_HANDLER tr tr sti silent /* #13 */ | ||
567 | BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */ | ||
568 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
569 | BUILD_HANDLER fpe fpe fpe silent /* #15 */ | ||
570 | #endif | ||
571 | BUILD_HANDLER ftlb ftlb none silent /* #16 */ | ||
572 | BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */ | ||
573 | BUILD_HANDLER msa msa sti silent /* #21 */ | ||
574 | BUILD_HANDLER mdmx mdmx sti silent /* #22 */ | ||
575 | #ifdef CONFIG_HARDWARE_WATCHPOINTS | ||
576 | /* | ||
577 | * For watch, interrupts will be enabled after the watch | ||
578 | * registers are read. | ||
579 | */ | ||
580 | BUILD_HANDLER watch watch cli silent /* #23 */ | ||
581 | #else | ||
582 | BUILD_HANDLER watch watch sti verbose /* #23 */ | ||
583 | #endif | ||
584 | BUILD_HANDLER mcheck mcheck cli verbose /* #24 */ | ||
585 | BUILD_HANDLER mt mt sti silent /* #25 */ | ||
586 | BUILD_HANDLER dsp dsp sti silent /* #26 */ | ||
587 | BUILD_HANDLER reserved reserved sti verbose /* others */ | ||
588 | |||
589 | .align 5 | ||
590 | LEAF(handle_ri_rdhwr_tlbp) | ||
591 | .set push | ||
592 | .set noat | ||
593 | .set noreorder | ||
594 | /* check if TLB contains a entry for EPC */ | ||
595 | MFC0 k1, CP0_ENTRYHI | ||
596 | andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX | ||
597 | MFC0 k0, CP0_EPC | ||
598 | PTR_SRL k0, _PAGE_SHIFT + 1 | ||
599 | PTR_SLL k0, _PAGE_SHIFT + 1 | ||
600 | or k1, k0 | ||
601 | MTC0 k1, CP0_ENTRYHI | ||
602 | mtc0_tlbw_hazard | ||
603 | tlbp | ||
604 | tlb_probe_hazard | ||
605 | mfc0 k1, CP0_INDEX | ||
606 | .set pop | ||
607 | bltz k1, handle_ri /* slow path */ | ||
608 | /* fall thru */ | ||
609 | END(handle_ri_rdhwr_tlbp) | ||
610 | |||
611 | LEAF(handle_ri_rdhwr) | ||
612 | .set push | ||
613 | .set noat | ||
614 | .set noreorder | ||
615 | /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */ | ||
616 | /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */ | ||
617 | MFC0 k1, CP0_EPC | ||
618 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2) | ||
619 | and k0, k1, 1 | ||
620 | beqz k0, 1f | ||
621 | xor k1, k0 | ||
622 | lhu k0, (k1) | ||
623 | lhu k1, 2(k1) | ||
624 | ins k1, k0, 16, 16 | ||
625 | lui k0, 0x007d | ||
626 | b docheck | ||
627 | ori k0, 0x6b3c | ||
628 | 1: | ||
629 | lui k0, 0x7c03 | ||
630 | lw k1, (k1) | ||
631 | ori k0, 0xe83b | ||
632 | #else | ||
633 | andi k0, k1, 1 | ||
634 | bnez k0, handle_ri | ||
635 | lui k0, 0x7c03 | ||
636 | lw k1, (k1) | ||
637 | ori k0, 0xe83b | ||
638 | #endif | ||
639 | .set reorder | ||
640 | docheck: | ||
641 | bne k0, k1, handle_ri /* if not ours */ | ||
642 | |||
643 | isrdhwr: | ||
644 | /* The insn is rdhwr. No need to check CAUSE.BD here. */ | ||
645 | get_saved_sp /* k1 := current_thread_info */ | ||
646 | .set noreorder | ||
647 | MFC0 k0, CP0_EPC | ||
648 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
649 | ori k1, _THREAD_MASK | ||
650 | xori k1, _THREAD_MASK | ||
651 | LONG_L v1, TI_TP_VALUE(k1) | ||
652 | LONG_ADDIU k0, 4 | ||
653 | jr k0 | ||
654 | rfe | ||
655 | #else | ||
656 | #ifndef CONFIG_CPU_DADDI_WORKAROUNDS | ||
657 | LONG_ADDIU k0, 4 /* stall on $k0 */ | ||
658 | #else | ||
659 | .set at=v1 | ||
660 | LONG_ADDIU k0, 4 | ||
661 | .set noat | ||
662 | #endif | ||
663 | MTC0 k0, CP0_EPC | ||
664 | /* I hope three instructions between MTC0 and ERET are enough... */ | ||
665 | ori k1, _THREAD_MASK | ||
666 | xori k1, _THREAD_MASK | ||
667 | LONG_L v1, TI_TP_VALUE(k1) | ||
668 | .set push | ||
669 | .set arch=r4000 | ||
670 | eret | ||
671 | .set pop | ||
672 | #endif | ||
673 | .set pop | ||
674 | END(handle_ri_rdhwr) | ||
675 | |||
676 | #ifdef CONFIG_CPU_R4X00_BUGS64 | ||
677 | /* A temporary overflow handler used by check_daddi(). */ | ||
678 | |||
679 | __INIT | ||
680 | |||
681 | BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */ | ||
682 | #endif | ||
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c new file mode 100644 index 000000000..8c083612d --- /dev/null +++ b/arch/mips/kernel/gpio_txx9.c | |||
@@ -0,0 +1,86 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * A gpio chip driver for TXx9 SoCs | ||
4 | * | ||
5 | * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/gpio/driver.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/io.h> | ||
13 | #include <asm/txx9pio.h> | ||
14 | |||
15 | static DEFINE_SPINLOCK(txx9_gpio_lock); | ||
16 | |||
17 | static struct txx9_pio_reg __iomem *txx9_pioptr; | ||
18 | |||
19 | static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset) | ||
20 | { | ||
21 | return !!(__raw_readl(&txx9_pioptr->din) & (1 << offset)); | ||
22 | } | ||
23 | |||
24 | static void txx9_gpio_set_raw(unsigned int offset, int value) | ||
25 | { | ||
26 | u32 val; | ||
27 | val = __raw_readl(&txx9_pioptr->dout); | ||
28 | if (value) | ||
29 | val |= 1 << offset; | ||
30 | else | ||
31 | val &= ~(1 << offset); | ||
32 | __raw_writel(val, &txx9_pioptr->dout); | ||
33 | } | ||
34 | |||
35 | static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset, | ||
36 | int value) | ||
37 | { | ||
38 | unsigned long flags; | ||
39 | spin_lock_irqsave(&txx9_gpio_lock, flags); | ||
40 | txx9_gpio_set_raw(offset, value); | ||
41 | mmiowb(); | ||
42 | spin_unlock_irqrestore(&txx9_gpio_lock, flags); | ||
43 | } | ||
44 | |||
45 | static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset) | ||
46 | { | ||
47 | unsigned long flags; | ||
48 | spin_lock_irqsave(&txx9_gpio_lock, flags); | ||
49 | __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset), | ||
50 | &txx9_pioptr->dir); | ||
51 | mmiowb(); | ||
52 | spin_unlock_irqrestore(&txx9_gpio_lock, flags); | ||
53 | return 0; | ||
54 | } | ||
55 | |||
56 | static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset, | ||
57 | int value) | ||
58 | { | ||
59 | unsigned long flags; | ||
60 | spin_lock_irqsave(&txx9_gpio_lock, flags); | ||
61 | txx9_gpio_set_raw(offset, value); | ||
62 | __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset), | ||
63 | &txx9_pioptr->dir); | ||
64 | mmiowb(); | ||
65 | spin_unlock_irqrestore(&txx9_gpio_lock, flags); | ||
66 | return 0; | ||
67 | } | ||
68 | |||
69 | static struct gpio_chip txx9_gpio_chip = { | ||
70 | .get = txx9_gpio_get, | ||
71 | .set = txx9_gpio_set, | ||
72 | .direction_input = txx9_gpio_dir_in, | ||
73 | .direction_output = txx9_gpio_dir_out, | ||
74 | .label = "TXx9", | ||
75 | }; | ||
76 | |||
77 | int __init txx9_gpio_init(unsigned long baseaddr, | ||
78 | unsigned int base, unsigned int num) | ||
79 | { | ||
80 | txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg)); | ||
81 | if (!txx9_pioptr) | ||
82 | return -ENODEV; | ||
83 | txx9_gpio_chip.base = base; | ||
84 | txx9_gpio_chip.ngpio = num; | ||
85 | return gpiochip_add_data(&txx9_gpio_chip, NULL); | ||
86 | } | ||
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S new file mode 100644 index 000000000..61b73580b --- /dev/null +++ b/arch/mips/kernel/head.S | |||
@@ -0,0 +1,185 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 1995 Waldorf Electronics | ||
7 | * Written by Ralf Baechle and Andreas Busse | ||
8 | * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle | ||
9 | * Copyright (C) 1996 Paul M. Antoine | ||
10 | * Modified for DECStation and hence R3000 support by Paul M. Antoine | ||
11 | * Further modifications by David S. Miller and Harald Koerfgen | ||
12 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
13 | * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | ||
14 | * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved. | ||
15 | */ | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/threads.h> | ||
18 | |||
19 | #include <asm/addrspace.h> | ||
20 | #include <asm/asm.h> | ||
21 | #include <asm/asmmacro.h> | ||
22 | #include <asm/irqflags.h> | ||
23 | #include <asm/regdef.h> | ||
24 | #include <asm/mipsregs.h> | ||
25 | #include <asm/stackframe.h> | ||
26 | |||
27 | #include <kernel-entry-init.h> | ||
28 | |||
29 | /* | ||
30 | * For the moment disable interrupts, mark the kernel mode and | ||
31 | * set ST0_KX so that the CPU does not spit fire when using | ||
32 | * 64-bit addresses. A full initialization of the CPU's status | ||
33 | * register is done later in per_cpu_trap_init(). | ||
34 | */ | ||
35 | .macro setup_c0_status set clr | ||
36 | .set push | ||
37 | mfc0 t0, CP0_STATUS | ||
38 | or t0, ST0_KERNEL_CUMASK|\set|0x1f|\clr | ||
39 | xor t0, 0x1f|\clr | ||
40 | mtc0 t0, CP0_STATUS | ||
41 | .set noreorder | ||
42 | sll zero,3 # ehb | ||
43 | .set pop | ||
44 | .endm | ||
45 | |||
46 | .macro setup_c0_status_pri | ||
47 | #ifdef CONFIG_64BIT | ||
48 | setup_c0_status ST0_KX 0 | ||
49 | #else | ||
50 | setup_c0_status 0 0 | ||
51 | #endif | ||
52 | .endm | ||
53 | |||
54 | .macro setup_c0_status_sec | ||
55 | #ifdef CONFIG_64BIT | ||
56 | setup_c0_status ST0_KX ST0_BEV | ||
57 | #else | ||
58 | setup_c0_status 0 ST0_BEV | ||
59 | #endif | ||
60 | .endm | ||
61 | |||
62 | #ifndef CONFIG_NO_EXCEPT_FILL | ||
63 | /* | ||
64 | * Reserved space for exception handlers. | ||
65 | * Necessary for machines which link their kernels at KSEG0. | ||
66 | */ | ||
67 | .fill 0x400 | ||
68 | #endif | ||
69 | |||
70 | EXPORT(_stext) | ||
71 | |||
72 | #ifdef CONFIG_BOOT_RAW | ||
73 | /* | ||
74 | * Give us a fighting chance of running if execution beings at the | ||
75 | * kernel load address. This is needed because this platform does | ||
76 | * not have a ELF loader yet. | ||
77 | */ | ||
78 | FEXPORT(__kernel_entry) | ||
79 | j kernel_entry | ||
80 | #endif /* CONFIG_BOOT_RAW */ | ||
81 | |||
82 | __REF | ||
83 | |||
84 | NESTED(kernel_entry, 16, sp) # kernel entry point | ||
85 | |||
86 | kernel_entry_setup # cpu specific setup | ||
87 | |||
88 | setup_c0_status_pri | ||
89 | |||
90 | /* We might not get launched at the address the kernel is linked to, | ||
91 | so we jump there. */ | ||
92 | PTR_LA t0, 0f | ||
93 | jr t0 | ||
94 | 0: | ||
95 | |||
96 | #ifdef CONFIG_USE_OF | ||
97 | #if defined(CONFIG_MIPS_RAW_APPENDED_DTB) || \ | ||
98 | defined(CONFIG_MIPS_ELF_APPENDED_DTB) | ||
99 | |||
100 | PTR_LA t2, __appended_dtb | ||
101 | |||
102 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
103 | li t1, 0xd00dfeed | ||
104 | #else /* !CONFIG_CPU_BIG_ENDIAN */ | ||
105 | li t1, 0xedfe0dd0 | ||
106 | #endif /* !CONFIG_CPU_BIG_ENDIAN */ | ||
107 | lw t0, (t2) | ||
108 | beq t0, t1, dtb_found | ||
109 | #endif /* CONFIG_MIPS_RAW_APPENDED_DTB || CONFIG_MIPS_ELF_APPENDED_DTB */ | ||
110 | li t1, -2 | ||
111 | move t2, a1 | ||
112 | beq a0, t1, dtb_found | ||
113 | |||
114 | #ifdef CONFIG_BUILTIN_DTB | ||
115 | PTR_LA t2, __dtb_start | ||
116 | PTR_LA t1, __dtb_end | ||
117 | bne t1, t2, dtb_found | ||
118 | #endif /* CONFIG_BUILTIN_DTB */ | ||
119 | |||
120 | li t2, 0 | ||
121 | dtb_found: | ||
122 | #endif /* CONFIG_USE_OF */ | ||
123 | PTR_LA t0, __bss_start # clear .bss | ||
124 | LONG_S zero, (t0) | ||
125 | PTR_LA t1, __bss_stop - LONGSIZE | ||
126 | 1: | ||
127 | PTR_ADDIU t0, LONGSIZE | ||
128 | LONG_S zero, (t0) | ||
129 | bne t0, t1, 1b | ||
130 | |||
131 | LONG_S a0, fw_arg0 # firmware arguments | ||
132 | LONG_S a1, fw_arg1 | ||
133 | LONG_S a2, fw_arg2 | ||
134 | LONG_S a3, fw_arg3 | ||
135 | |||
136 | #ifdef CONFIG_USE_OF | ||
137 | LONG_S t2, fw_passed_dtb | ||
138 | #endif | ||
139 | |||
140 | MTC0 zero, CP0_CONTEXT # clear context register | ||
141 | #ifdef CONFIG_64BIT | ||
142 | MTC0 zero, CP0_XCONTEXT | ||
143 | #endif | ||
144 | PTR_LA $28, init_thread_union | ||
145 | /* Set the SP after an empty pt_regs. */ | ||
146 | PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE | ||
147 | PTR_ADDU sp, $28 | ||
148 | back_to_back_c0_hazard | ||
149 | set_saved_sp sp, t0, t1 | ||
150 | PTR_SUBU sp, 4 * SZREG # init stack pointer | ||
151 | |||
152 | #ifdef CONFIG_RELOCATABLE | ||
153 | /* Copy kernel and apply the relocations */ | ||
154 | jal relocate_kernel | ||
155 | |||
156 | /* Repoint the sp into the new kernel image */ | ||
157 | PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE | ||
158 | PTR_ADDU sp, $28 | ||
159 | set_saved_sp sp, t0, t1 | ||
160 | PTR_SUBU sp, 4 * SZREG # init stack pointer | ||
161 | |||
162 | /* | ||
163 | * relocate_kernel returns the entry point either | ||
164 | * in the relocated kernel or the original if for | ||
165 | * some reason relocation failed - jump there now | ||
166 | * with instruction hazard barrier because of the | ||
167 | * newly sync'd icache. | ||
168 | */ | ||
169 | jr.hb v0 | ||
170 | #else /* !CONFIG_RELOCATABLE */ | ||
171 | j start_kernel | ||
172 | #endif /* !CONFIG_RELOCATABLE */ | ||
173 | END(kernel_entry) | ||
174 | |||
175 | #ifdef CONFIG_SMP | ||
176 | /* | ||
177 | * SMP slave cpus entry point. Board specific code for bootstrap calls this | ||
178 | * function after setting up the stack and gp registers. | ||
179 | */ | ||
180 | NESTED(smp_bootstrap, 16, sp) | ||
181 | smp_slave_setup | ||
182 | setup_c0_status_sec | ||
183 | j start_secondary | ||
184 | END(smp_bootstrap) | ||
185 | #endif /* CONFIG_SMP */ | ||
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c new file mode 100644 index 000000000..ca21210e0 --- /dev/null +++ b/arch/mips/kernel/i8253.c | |||
@@ -0,0 +1,38 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * i8253.c 8253/PIT functions | ||
4 | * | ||
5 | */ | ||
6 | #include <linux/clockchips.h> | ||
7 | #include <linux/i8253.h> | ||
8 | #include <linux/export.h> | ||
9 | #include <linux/smp.h> | ||
10 | #include <linux/irq.h> | ||
11 | |||
12 | #include <asm/time.h> | ||
13 | |||
14 | static irqreturn_t timer_interrupt(int irq, void *dev_id) | ||
15 | { | ||
16 | i8253_clockevent.event_handler(&i8253_clockevent); | ||
17 | |||
18 | return IRQ_HANDLED; | ||
19 | } | ||
20 | |||
21 | void __init setup_pit_timer(void) | ||
22 | { | ||
23 | unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER; | ||
24 | |||
25 | clockevent_i8253_init(true); | ||
26 | if (request_irq(0, timer_interrupt, flags, "timer", NULL)) | ||
27 | pr_err("Failed to request irq 0 (timer)\n"); | ||
28 | } | ||
29 | |||
30 | static int __init init_pit_clocksource(void) | ||
31 | { | ||
32 | if (num_possible_cpus() > 1 || /* PIT does not scale! */ | ||
33 | !clockevent_state_periodic(&i8253_clockevent)) | ||
34 | return 0; | ||
35 | |||
36 | return clocksource_i8253_init(); | ||
37 | } | ||
38 | arch_initcall(init_pit_clocksource); | ||
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c new file mode 100644 index 000000000..18e69ebf5 --- /dev/null +++ b/arch/mips/kernel/idle.c | |||
@@ -0,0 +1,272 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * MIPS idle loop and WAIT instruction support. | ||
4 | * | ||
5 | * Copyright (C) xxxx the Anonymous | ||
6 | * Copyright (C) 1994 - 2006 Ralf Baechle | ||
7 | * Copyright (C) 2003, 2004 Maciej W. Rozycki | ||
8 | * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | #include <linux/cpu.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/irqflags.h> | ||
14 | #include <linux/printk.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <asm/cpu.h> | ||
17 | #include <asm/cpu-info.h> | ||
18 | #include <asm/cpu-type.h> | ||
19 | #include <asm/idle.h> | ||
20 | #include <asm/mipsregs.h> | ||
21 | |||
22 | /* | ||
23 | * Not all of the MIPS CPUs have the "wait" instruction available. Moreover, | ||
24 | * the implementation of the "wait" feature differs between CPU families. This | ||
25 | * points to the function that implements CPU specific wait. | ||
26 | * The wait instruction stops the pipeline and reduces the power consumption of | ||
27 | * the CPU very much. | ||
28 | */ | ||
29 | void (*cpu_wait)(void); | ||
30 | EXPORT_SYMBOL(cpu_wait); | ||
31 | |||
32 | static void __cpuidle r3081_wait(void) | ||
33 | { | ||
34 | unsigned long cfg = read_c0_conf(); | ||
35 | write_c0_conf(cfg | R30XX_CONF_HALT); | ||
36 | raw_local_irq_enable(); | ||
37 | } | ||
38 | |||
39 | static void __cpuidle r39xx_wait(void) | ||
40 | { | ||
41 | if (!need_resched()) | ||
42 | write_c0_conf(read_c0_conf() | TX39_CONF_HALT); | ||
43 | raw_local_irq_enable(); | ||
44 | } | ||
45 | |||
46 | void __cpuidle r4k_wait(void) | ||
47 | { | ||
48 | raw_local_irq_enable(); | ||
49 | __r4k_wait(); | ||
50 | } | ||
51 | |||
52 | /* | ||
53 | * This variant is preferable as it allows testing need_resched and going to | ||
54 | * sleep depending on the outcome atomically. Unfortunately the "It is | ||
55 | * implementation-dependent whether the pipeline restarts when a non-enabled | ||
56 | * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes | ||
57 | * using this version a gamble. | ||
58 | */ | ||
59 | void __cpuidle r4k_wait_irqoff(void) | ||
60 | { | ||
61 | if (!need_resched()) | ||
62 | __asm__( | ||
63 | " .set push \n" | ||
64 | " .set arch=r4000 \n" | ||
65 | " wait \n" | ||
66 | " .set pop \n"); | ||
67 | raw_local_irq_enable(); | ||
68 | } | ||
69 | |||
70 | /* | ||
71 | * The RM7000 variant has to handle erratum 38. The workaround is to not | ||
72 | * have any pending stores when the WAIT instruction is executed. | ||
73 | */ | ||
74 | static void __cpuidle rm7k_wait_irqoff(void) | ||
75 | { | ||
76 | if (!need_resched()) | ||
77 | __asm__( | ||
78 | " .set push \n" | ||
79 | " .set arch=r4000 \n" | ||
80 | " .set noat \n" | ||
81 | " mfc0 $1, $12 \n" | ||
82 | " sync \n" | ||
83 | " mtc0 $1, $12 # stalls until W stage \n" | ||
84 | " wait \n" | ||
85 | " mtc0 $1, $12 # stalls until W stage \n" | ||
86 | " .set pop \n"); | ||
87 | raw_local_irq_enable(); | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Au1 'wait' is only useful when the 32kHz counter is used as timer, | ||
92 | * since coreclock (and the cp0 counter) stops upon executing it. Only an | ||
93 | * interrupt can wake it, so they must be enabled before entering idle modes. | ||
94 | */ | ||
95 | static void __cpuidle au1k_wait(void) | ||
96 | { | ||
97 | unsigned long c0status = read_c0_status() | 1; /* irqs on */ | ||
98 | |||
99 | __asm__( | ||
100 | " .set push \n" | ||
101 | " .set arch=r4000 \n" | ||
102 | " cache 0x14, 0(%0) \n" | ||
103 | " cache 0x14, 32(%0) \n" | ||
104 | " sync \n" | ||
105 | " mtc0 %1, $12 \n" /* wr c0status */ | ||
106 | " wait \n" | ||
107 | " nop \n" | ||
108 | " nop \n" | ||
109 | " nop \n" | ||
110 | " nop \n" | ||
111 | " .set pop \n" | ||
112 | : : "r" (au1k_wait), "r" (c0status)); | ||
113 | } | ||
114 | |||
115 | static int __initdata nowait; | ||
116 | |||
117 | static int __init wait_disable(char *s) | ||
118 | { | ||
119 | nowait = 1; | ||
120 | |||
121 | return 1; | ||
122 | } | ||
123 | |||
124 | __setup("nowait", wait_disable); | ||
125 | |||
126 | void __init check_wait(void) | ||
127 | { | ||
128 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
129 | |||
130 | if (nowait) { | ||
131 | printk("Wait instruction disabled.\n"); | ||
132 | return; | ||
133 | } | ||
134 | |||
135 | /* | ||
136 | * MIPSr6 specifies that masked interrupts should unblock an executing | ||
137 | * wait instruction, and thus that it is safe for us to use | ||
138 | * r4k_wait_irqoff. Yippee! | ||
139 | */ | ||
140 | if (cpu_has_mips_r6) { | ||
141 | cpu_wait = r4k_wait_irqoff; | ||
142 | return; | ||
143 | } | ||
144 | |||
145 | switch (current_cpu_type()) { | ||
146 | case CPU_R3081: | ||
147 | case CPU_R3081E: | ||
148 | cpu_wait = r3081_wait; | ||
149 | break; | ||
150 | case CPU_TX3927: | ||
151 | cpu_wait = r39xx_wait; | ||
152 | break; | ||
153 | case CPU_R4200: | ||
154 | case CPU_R4600: | ||
155 | case CPU_R4640: | ||
156 | case CPU_R4650: | ||
157 | case CPU_R4700: | ||
158 | case CPU_R5000: | ||
159 | case CPU_R5500: | ||
160 | case CPU_NEVADA: | ||
161 | case CPU_4KC: | ||
162 | case CPU_4KEC: | ||
163 | case CPU_4KSC: | ||
164 | case CPU_5KC: | ||
165 | case CPU_5KE: | ||
166 | case CPU_25KF: | ||
167 | case CPU_PR4450: | ||
168 | case CPU_BMIPS3300: | ||
169 | case CPU_BMIPS4350: | ||
170 | case CPU_BMIPS4380: | ||
171 | case CPU_CAVIUM_OCTEON: | ||
172 | case CPU_CAVIUM_OCTEON_PLUS: | ||
173 | case CPU_CAVIUM_OCTEON2: | ||
174 | case CPU_CAVIUM_OCTEON3: | ||
175 | case CPU_XBURST: | ||
176 | case CPU_LOONGSON32: | ||
177 | case CPU_XLR: | ||
178 | case CPU_XLP: | ||
179 | cpu_wait = r4k_wait; | ||
180 | break; | ||
181 | case CPU_LOONGSON64: | ||
182 | if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >= | ||
183 | (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) || | ||
184 | (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) | ||
185 | cpu_wait = r4k_wait; | ||
186 | break; | ||
187 | |||
188 | case CPU_BMIPS5000: | ||
189 | cpu_wait = r4k_wait_irqoff; | ||
190 | break; | ||
191 | case CPU_RM7000: | ||
192 | cpu_wait = rm7k_wait_irqoff; | ||
193 | break; | ||
194 | |||
195 | case CPU_PROAPTIV: | ||
196 | case CPU_P5600: | ||
197 | /* | ||
198 | * Incoming Fast Debug Channel (FDC) data during a wait | ||
199 | * instruction causes the wait never to resume, even if an | ||
200 | * interrupt is received. Avoid using wait at all if FDC data is | ||
201 | * likely to be received. | ||
202 | */ | ||
203 | if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY)) | ||
204 | break; | ||
205 | fallthrough; | ||
206 | case CPU_M14KC: | ||
207 | case CPU_M14KEC: | ||
208 | case CPU_24K: | ||
209 | case CPU_34K: | ||
210 | case CPU_1004K: | ||
211 | case CPU_1074K: | ||
212 | case CPU_INTERAPTIV: | ||
213 | case CPU_M5150: | ||
214 | case CPU_QEMU_GENERIC: | ||
215 | cpu_wait = r4k_wait; | ||
216 | if (read_c0_config7() & MIPS_CONF7_WII) | ||
217 | cpu_wait = r4k_wait_irqoff; | ||
218 | break; | ||
219 | |||
220 | case CPU_74K: | ||
221 | cpu_wait = r4k_wait; | ||
222 | if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0)) | ||
223 | cpu_wait = r4k_wait_irqoff; | ||
224 | break; | ||
225 | |||
226 | case CPU_TX49XX: | ||
227 | cpu_wait = r4k_wait_irqoff; | ||
228 | break; | ||
229 | case CPU_ALCHEMY: | ||
230 | cpu_wait = au1k_wait; | ||
231 | break; | ||
232 | case CPU_20KC: | ||
233 | /* | ||
234 | * WAIT on Rev1.0 has E1, E2, E3 and E16. | ||
235 | * WAIT on Rev2.0 and Rev3.0 has E16. | ||
236 | * Rev3.1 WAIT is nop, why bother | ||
237 | */ | ||
238 | if ((c->processor_id & 0xff) <= 0x64) | ||
239 | break; | ||
240 | |||
241 | /* | ||
242 | * Another rev is incremeting c0_count at a reduced clock | ||
243 | * rate while in WAIT mode. So we basically have the choice | ||
244 | * between using the cp0 timer as clocksource or avoiding | ||
245 | * the WAIT instruction. Until more details are known, | ||
246 | * disable the use of WAIT for 20Kc entirely. | ||
247 | cpu_wait = r4k_wait; | ||
248 | */ | ||
249 | break; | ||
250 | default: | ||
251 | break; | ||
252 | } | ||
253 | } | ||
254 | |||
255 | void arch_cpu_idle(void) | ||
256 | { | ||
257 | if (cpu_wait) | ||
258 | cpu_wait(); | ||
259 | else | ||
260 | raw_local_irq_enable(); | ||
261 | } | ||
262 | |||
263 | #ifdef CONFIG_CPU_IDLE | ||
264 | |||
265 | int mips_cpuidle_wait_enter(struct cpuidle_device *dev, | ||
266 | struct cpuidle_driver *drv, int index) | ||
267 | { | ||
268 | arch_cpu_idle(); | ||
269 | return index; | ||
270 | } | ||
271 | |||
272 | #endif | ||
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c new file mode 100644 index 000000000..93bcf5736 --- /dev/null +++ b/arch/mips/kernel/irq-gt641xx.c | |||
@@ -0,0 +1,118 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * GT641xx IRQ routines. | ||
4 | * | ||
5 | * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org> | ||
6 | */ | ||
7 | #include <linux/hardirq.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/irq.h> | ||
10 | #include <linux/spinlock.h> | ||
11 | #include <linux/types.h> | ||
12 | |||
13 | #include <asm/gt64120.h> | ||
14 | |||
15 | #define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE)) | ||
16 | |||
17 | static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock); | ||
18 | |||
19 | static void ack_gt641xx_irq(struct irq_data *d) | ||
20 | { | ||
21 | unsigned long flags; | ||
22 | u32 cause; | ||
23 | |||
24 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | ||
25 | cause = GT_READ(GT_INTRCAUSE_OFS); | ||
26 | cause &= ~GT641XX_IRQ_TO_BIT(d->irq); | ||
27 | GT_WRITE(GT_INTRCAUSE_OFS, cause); | ||
28 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | ||
29 | } | ||
30 | |||
31 | static void mask_gt641xx_irq(struct irq_data *d) | ||
32 | { | ||
33 | unsigned long flags; | ||
34 | u32 mask; | ||
35 | |||
36 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | ||
37 | mask = GT_READ(GT_INTRMASK_OFS); | ||
38 | mask &= ~GT641XX_IRQ_TO_BIT(d->irq); | ||
39 | GT_WRITE(GT_INTRMASK_OFS, mask); | ||
40 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | ||
41 | } | ||
42 | |||
43 | static void mask_ack_gt641xx_irq(struct irq_data *d) | ||
44 | { | ||
45 | unsigned long flags; | ||
46 | u32 cause, mask; | ||
47 | |||
48 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | ||
49 | mask = GT_READ(GT_INTRMASK_OFS); | ||
50 | mask &= ~GT641XX_IRQ_TO_BIT(d->irq); | ||
51 | GT_WRITE(GT_INTRMASK_OFS, mask); | ||
52 | |||
53 | cause = GT_READ(GT_INTRCAUSE_OFS); | ||
54 | cause &= ~GT641XX_IRQ_TO_BIT(d->irq); | ||
55 | GT_WRITE(GT_INTRCAUSE_OFS, cause); | ||
56 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | ||
57 | } | ||
58 | |||
59 | static void unmask_gt641xx_irq(struct irq_data *d) | ||
60 | { | ||
61 | unsigned long flags; | ||
62 | u32 mask; | ||
63 | |||
64 | raw_spin_lock_irqsave(>641xx_irq_lock, flags); | ||
65 | mask = GT_READ(GT_INTRMASK_OFS); | ||
66 | mask |= GT641XX_IRQ_TO_BIT(d->irq); | ||
67 | GT_WRITE(GT_INTRMASK_OFS, mask); | ||
68 | raw_spin_unlock_irqrestore(>641xx_irq_lock, flags); | ||
69 | } | ||
70 | |||
71 | static struct irq_chip gt641xx_irq_chip = { | ||
72 | .name = "GT641xx", | ||
73 | .irq_ack = ack_gt641xx_irq, | ||
74 | .irq_mask = mask_gt641xx_irq, | ||
75 | .irq_mask_ack = mask_ack_gt641xx_irq, | ||
76 | .irq_unmask = unmask_gt641xx_irq, | ||
77 | }; | ||
78 | |||
79 | void gt641xx_irq_dispatch(void) | ||
80 | { | ||
81 | u32 cause, mask; | ||
82 | int i; | ||
83 | |||
84 | cause = GT_READ(GT_INTRCAUSE_OFS); | ||
85 | mask = GT_READ(GT_INTRMASK_OFS); | ||
86 | cause &= mask; | ||
87 | |||
88 | /* | ||
89 | * bit0 : logical or of all the interrupt bits. | ||
90 | * bit30: logical or of bits[29:26,20:1]. | ||
91 | * bit31: logical or of bits[25:1]. | ||
92 | */ | ||
93 | for (i = 1; i < 30; i++) { | ||
94 | if (cause & (1U << i)) { | ||
95 | do_IRQ(GT641XX_IRQ_BASE + i); | ||
96 | return; | ||
97 | } | ||
98 | } | ||
99 | |||
100 | atomic_inc(&irq_err_count); | ||
101 | } | ||
102 | |||
103 | void __init gt641xx_irq_init(void) | ||
104 | { | ||
105 | int i; | ||
106 | |||
107 | GT_WRITE(GT_INTRMASK_OFS, 0); | ||
108 | GT_WRITE(GT_INTRCAUSE_OFS, 0); | ||
109 | |||
110 | /* | ||
111 | * bit0 : logical or of all the interrupt bits. | ||
112 | * bit30: logical or of bits[29:26,20:1]. | ||
113 | * bit31: logical or of bits[25:1]. | ||
114 | */ | ||
115 | for (i = 1; i < 30; i++) | ||
116 | irq_set_chip_and_handler(GT641XX_IRQ_BASE + i, | ||
117 | >641xx_irq_chip, handle_level_irq); | ||
118 | } | ||
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c new file mode 100644 index 000000000..ab511b64a --- /dev/null +++ b/arch/mips/kernel/irq-msc01.c | |||
@@ -0,0 +1,156 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (c) 2004 MIPS Inc | ||
5 | * Author: chris@mips.com | ||
6 | * | ||
7 | * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org> | ||
8 | */ | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/kernel_stat.h> | ||
13 | #include <asm/io.h> | ||
14 | #include <asm/irq.h> | ||
15 | #include <asm/msc01_ic.h> | ||
16 | #include <asm/traps.h> | ||
17 | |||
18 | static unsigned long _icctrl_msc; | ||
19 | #define MSC01_IC_REG_BASE _icctrl_msc | ||
20 | |||
21 | #define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0) | ||
22 | #define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0) | ||
23 | |||
24 | static unsigned int irq_base; | ||
25 | |||
26 | /* mask off an interrupt */ | ||
27 | static inline void mask_msc_irq(struct irq_data *d) | ||
28 | { | ||
29 | unsigned int irq = d->irq; | ||
30 | |||
31 | if (irq < (irq_base + 32)) | ||
32 | MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base)); | ||
33 | else | ||
34 | MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32)); | ||
35 | } | ||
36 | |||
37 | /* unmask an interrupt */ | ||
38 | static inline void unmask_msc_irq(struct irq_data *d) | ||
39 | { | ||
40 | unsigned int irq = d->irq; | ||
41 | |||
42 | if (irq < (irq_base + 32)) | ||
43 | MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base)); | ||
44 | else | ||
45 | MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32)); | ||
46 | } | ||
47 | |||
48 | /* | ||
49 | * Masks and ACKs an IRQ | ||
50 | */ | ||
51 | static void level_mask_and_ack_msc_irq(struct irq_data *d) | ||
52 | { | ||
53 | mask_msc_irq(d); | ||
54 | if (!cpu_has_veic) | ||
55 | MSCIC_WRITE(MSC01_IC_EOI, 0); | ||
56 | } | ||
57 | |||
58 | /* | ||
59 | * Masks and ACKs an IRQ | ||
60 | */ | ||
61 | static void edge_mask_and_ack_msc_irq(struct irq_data *d) | ||
62 | { | ||
63 | unsigned int irq = d->irq; | ||
64 | |||
65 | mask_msc_irq(d); | ||
66 | if (!cpu_has_veic) | ||
67 | MSCIC_WRITE(MSC01_IC_EOI, 0); | ||
68 | else { | ||
69 | u32 r; | ||
70 | MSCIC_READ(MSC01_IC_SUP+irq*8, r); | ||
71 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); | ||
72 | MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); | ||
73 | } | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Interrupt handler for interrupts coming from SOC-it. | ||
78 | */ | ||
79 | void ll_msc_irq(void) | ||
80 | { | ||
81 | unsigned int irq; | ||
82 | |||
83 | /* read the interrupt vector register */ | ||
84 | MSCIC_READ(MSC01_IC_VEC, irq); | ||
85 | if (irq < 64) | ||
86 | do_IRQ(irq + irq_base); | ||
87 | else { | ||
88 | /* Ignore spurious interrupt */ | ||
89 | } | ||
90 | } | ||
91 | |||
92 | static void msc_bind_eic_interrupt(int irq, int set) | ||
93 | { | ||
94 | MSCIC_WRITE(MSC01_IC_RAMW, | ||
95 | (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF)); | ||
96 | } | ||
97 | |||
98 | static struct irq_chip msc_levelirq_type = { | ||
99 | .name = "SOC-it-Level", | ||
100 | .irq_ack = level_mask_and_ack_msc_irq, | ||
101 | .irq_mask = mask_msc_irq, | ||
102 | .irq_mask_ack = level_mask_and_ack_msc_irq, | ||
103 | .irq_unmask = unmask_msc_irq, | ||
104 | .irq_eoi = unmask_msc_irq, | ||
105 | }; | ||
106 | |||
107 | static struct irq_chip msc_edgeirq_type = { | ||
108 | .name = "SOC-it-Edge", | ||
109 | .irq_ack = edge_mask_and_ack_msc_irq, | ||
110 | .irq_mask = mask_msc_irq, | ||
111 | .irq_mask_ack = edge_mask_and_ack_msc_irq, | ||
112 | .irq_unmask = unmask_msc_irq, | ||
113 | .irq_eoi = unmask_msc_irq, | ||
114 | }; | ||
115 | |||
116 | |||
117 | void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq) | ||
118 | { | ||
119 | _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000); | ||
120 | |||
121 | /* Reset interrupt controller - initialises all registers to 0 */ | ||
122 | MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT); | ||
123 | |||
124 | board_bind_eic_interrupt = &msc_bind_eic_interrupt; | ||
125 | |||
126 | for (; nirq > 0; nirq--, imp++) { | ||
127 | int n = imp->im_irq; | ||
128 | |||
129 | switch (imp->im_type) { | ||
130 | case MSC01_IRQ_EDGE: | ||
131 | irq_set_chip_and_handler_name(irqbase + n, | ||
132 | &msc_edgeirq_type, | ||
133 | handle_edge_irq, | ||
134 | "edge"); | ||
135 | if (cpu_has_veic) | ||
136 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT); | ||
137 | else | ||
138 | MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl); | ||
139 | break; | ||
140 | case MSC01_IRQ_LEVEL: | ||
141 | irq_set_chip_and_handler_name(irqbase + n, | ||
142 | &msc_levelirq_type, | ||
143 | handle_level_irq, | ||
144 | "level"); | ||
145 | if (cpu_has_veic) | ||
146 | MSCIC_WRITE(MSC01_IC_SUP+n*8, 0); | ||
147 | else | ||
148 | MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl); | ||
149 | } | ||
150 | } | ||
151 | |||
152 | irq_base = irqbase; | ||
153 | |||
154 | MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */ | ||
155 | |||
156 | } | ||
diff --git a/arch/mips/kernel/irq-rm7000.c b/arch/mips/kernel/irq-rm7000.c new file mode 100644 index 000000000..e1a497f63 --- /dev/null +++ b/arch/mips/kernel/irq-rm7000.c | |||
@@ -0,0 +1,45 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2003 Ralf Baechle | ||
4 | * | ||
5 | * Handler for RM7000 extended interrupts. These are a non-standard | ||
6 | * feature so we handle them separately from standard interrupts. | ||
7 | */ | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/interrupt.h> | ||
10 | #include <linux/irq.h> | ||
11 | #include <linux/kernel.h> | ||
12 | |||
13 | #include <asm/irq_cpu.h> | ||
14 | #include <asm/mipsregs.h> | ||
15 | |||
16 | static inline void unmask_rm7k_irq(struct irq_data *d) | ||
17 | { | ||
18 | set_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); | ||
19 | } | ||
20 | |||
21 | static inline void mask_rm7k_irq(struct irq_data *d) | ||
22 | { | ||
23 | clear_c0_intcontrol(0x100 << (d->irq - RM7K_CPU_IRQ_BASE)); | ||
24 | } | ||
25 | |||
26 | static struct irq_chip rm7k_irq_controller = { | ||
27 | .name = "RM7000", | ||
28 | .irq_ack = mask_rm7k_irq, | ||
29 | .irq_mask = mask_rm7k_irq, | ||
30 | .irq_mask_ack = mask_rm7k_irq, | ||
31 | .irq_unmask = unmask_rm7k_irq, | ||
32 | .irq_eoi = unmask_rm7k_irq | ||
33 | }; | ||
34 | |||
35 | void __init rm7k_cpu_irq_init(void) | ||
36 | { | ||
37 | int base = RM7K_CPU_IRQ_BASE; | ||
38 | int i; | ||
39 | |||
40 | clear_c0_intcontrol(0x00000f00); /* Mask all */ | ||
41 | |||
42 | for (i = base; i < base + 4; i++) | ||
43 | irq_set_chip_and_handler(i, &rm7k_irq_controller, | ||
44 | handle_percpu_irq); | ||
45 | } | ||
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c new file mode 100644 index 000000000..85b6c60f2 --- /dev/null +++ b/arch/mips/kernel/irq.c | |||
@@ -0,0 +1,109 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Code to handle x86 style IRQs plus some generic interrupt stuff. | ||
7 | * | ||
8 | * Copyright (C) 1992 Linus Torvalds | ||
9 | * Copyright (C) 1994 - 2000 Ralf Baechle | ||
10 | */ | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/delay.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/kernel_stat.h> | ||
16 | #include <linux/proc_fs.h> | ||
17 | #include <linux/mm.h> | ||
18 | #include <linux/random.h> | ||
19 | #include <linux/sched.h> | ||
20 | #include <linux/seq_file.h> | ||
21 | #include <linux/kallsyms.h> | ||
22 | #include <linux/kgdb.h> | ||
23 | #include <linux/ftrace.h> | ||
24 | |||
25 | #include <linux/atomic.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | |||
28 | void *irq_stack[NR_CPUS]; | ||
29 | |||
30 | /* | ||
31 | * 'what should we do if we get a hw irq event on an illegal vector'. | ||
32 | * each architecture has to answer this themselves. | ||
33 | */ | ||
34 | void ack_bad_irq(unsigned int irq) | ||
35 | { | ||
36 | printk("unexpected IRQ # %d\n", irq); | ||
37 | } | ||
38 | |||
39 | atomic_t irq_err_count; | ||
40 | |||
41 | int arch_show_interrupts(struct seq_file *p, int prec) | ||
42 | { | ||
43 | seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count)); | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | asmlinkage void spurious_interrupt(void) | ||
48 | { | ||
49 | atomic_inc(&irq_err_count); | ||
50 | } | ||
51 | |||
52 | void __init init_IRQ(void) | ||
53 | { | ||
54 | int i; | ||
55 | unsigned int order = get_order(IRQ_STACK_SIZE); | ||
56 | |||
57 | for (i = 0; i < NR_IRQS; i++) | ||
58 | irq_set_noprobe(i); | ||
59 | |||
60 | if (cpu_has_veic) | ||
61 | clear_c0_status(ST0_IM); | ||
62 | |||
63 | arch_init_irq(); | ||
64 | |||
65 | for_each_possible_cpu(i) { | ||
66 | void *s = (void *)__get_free_pages(GFP_KERNEL, order); | ||
67 | |||
68 | irq_stack[i] = s; | ||
69 | pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i, | ||
70 | irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE); | ||
71 | } | ||
72 | } | ||
73 | |||
74 | #ifdef CONFIG_DEBUG_STACKOVERFLOW | ||
75 | static inline void check_stack_overflow(void) | ||
76 | { | ||
77 | unsigned long sp; | ||
78 | |||
79 | __asm__ __volatile__("move %0, $sp" : "=r" (sp)); | ||
80 | sp &= THREAD_MASK; | ||
81 | |||
82 | /* | ||
83 | * Check for stack overflow: is there less than STACK_WARN free? | ||
84 | * STACK_WARN is defined as 1/8 of THREAD_SIZE by default. | ||
85 | */ | ||
86 | if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) { | ||
87 | printk("do_IRQ: stack overflow: %ld\n", | ||
88 | sp - sizeof(struct thread_info)); | ||
89 | dump_stack(); | ||
90 | } | ||
91 | } | ||
92 | #else | ||
93 | static inline void check_stack_overflow(void) {} | ||
94 | #endif | ||
95 | |||
96 | |||
97 | /* | ||
98 | * do_IRQ handles all normal device IRQ's (the special | ||
99 | * SMP cross-CPU interrupts have their own specific | ||
100 | * handlers). | ||
101 | */ | ||
102 | void __irq_entry do_IRQ(unsigned int irq) | ||
103 | { | ||
104 | irq_enter(); | ||
105 | check_stack_overflow(); | ||
106 | generic_handle_irq(irq); | ||
107 | irq_exit(); | ||
108 | } | ||
109 | |||
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c new file mode 100644 index 000000000..ab00e4904 --- /dev/null +++ b/arch/mips/kernel/irq_txx9.c | |||
@@ -0,0 +1,191 @@ | |||
1 | /* | ||
2 | * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c, | ||
3 | * linux/arch/mips/tx4927/common/tx4927_irq.c, | ||
4 | * linux/arch/mips/tx4938/common/irq.c | ||
5 | * | ||
6 | * Copyright 2001, 2003-2005 MontaVista Software Inc. | ||
7 | * Author: MontaVista Software, Inc. | ||
8 | * ahennessy@mvista.com | ||
9 | * source@mvista.com | ||
10 | * Copyright (C) 2000-2001 Toshiba Corporation | ||
11 | * | ||
12 | * This file is subject to the terms and conditions of the GNU General Public | ||
13 | * License. See the file "COPYING" in the main directory of this archive | ||
14 | * for more details. | ||
15 | */ | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/interrupt.h> | ||
18 | #include <linux/types.h> | ||
19 | #include <linux/irq.h> | ||
20 | #include <asm/txx9irq.h> | ||
21 | |||
22 | struct txx9_irc_reg { | ||
23 | u32 cer; | ||
24 | u32 cr[2]; | ||
25 | u32 unused0; | ||
26 | u32 ilr[8]; | ||
27 | u32 unused1[4]; | ||
28 | u32 imr; | ||
29 | u32 unused2[7]; | ||
30 | u32 scr; | ||
31 | u32 unused3[7]; | ||
32 | u32 ssr; | ||
33 | u32 unused4[7]; | ||
34 | u32 csr; | ||
35 | }; | ||
36 | |||
37 | /* IRCER : Int. Control Enable */ | ||
38 | #define TXx9_IRCER_ICE 0x00000001 | ||
39 | |||
40 | /* IRCR : Int. Control */ | ||
41 | #define TXx9_IRCR_LOW 0x00000000 | ||
42 | #define TXx9_IRCR_HIGH 0x00000001 | ||
43 | #define TXx9_IRCR_DOWN 0x00000002 | ||
44 | #define TXx9_IRCR_UP 0x00000003 | ||
45 | #define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002) | ||
46 | |||
47 | /* IRSCR : Int. Status Control */ | ||
48 | #define TXx9_IRSCR_EIClrE 0x00000100 | ||
49 | #define TXx9_IRSCR_EIClr_MASK 0x0000000f | ||
50 | |||
51 | /* IRCSR : Int. Current Status */ | ||
52 | #define TXx9_IRCSR_IF 0x00010000 | ||
53 | #define TXx9_IRCSR_ILV_MASK 0x00000700 | ||
54 | #define TXx9_IRCSR_IVL_MASK 0x0000001f | ||
55 | |||
56 | #define irc_dlevel 0 | ||
57 | #define irc_elevel 1 | ||
58 | |||
59 | static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly; | ||
60 | |||
61 | static struct { | ||
62 | unsigned char level; | ||
63 | unsigned char mode; | ||
64 | } txx9irq[TXx9_MAX_IR] __read_mostly; | ||
65 | |||
66 | static void txx9_irq_unmask(struct irq_data *d) | ||
67 | { | ||
68 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; | ||
69 | u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2]; | ||
70 | int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; | ||
71 | |||
72 | __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs)) | ||
73 | | (txx9irq[irq_nr].level << ofs), | ||
74 | ilrp); | ||
75 | #ifdef CONFIG_CPU_TX39XX | ||
76 | /* update IRCSR */ | ||
77 | __raw_writel(0, &txx9_ircptr->imr); | ||
78 | __raw_writel(irc_elevel, &txx9_ircptr->imr); | ||
79 | #endif | ||
80 | } | ||
81 | |||
82 | static inline void txx9_irq_mask(struct irq_data *d) | ||
83 | { | ||
84 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; | ||
85 | u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2]; | ||
86 | int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8; | ||
87 | |||
88 | __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs)) | ||
89 | | (irc_dlevel << ofs), | ||
90 | ilrp); | ||
91 | #ifdef CONFIG_CPU_TX39XX | ||
92 | /* update IRCSR */ | ||
93 | __raw_writel(0, &txx9_ircptr->imr); | ||
94 | __raw_writel(irc_elevel, &txx9_ircptr->imr); | ||
95 | /* flush write buffer */ | ||
96 | __raw_readl(&txx9_ircptr->ssr); | ||
97 | #else | ||
98 | mmiowb(); | ||
99 | #endif | ||
100 | } | ||
101 | |||
102 | static void txx9_irq_mask_ack(struct irq_data *d) | ||
103 | { | ||
104 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; | ||
105 | |||
106 | txx9_irq_mask(d); | ||
107 | /* clear edge detection */ | ||
108 | if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode))) | ||
109 | __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr); | ||
110 | } | ||
111 | |||
112 | static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type) | ||
113 | { | ||
114 | unsigned int irq_nr = d->irq - TXX9_IRQ_BASE; | ||
115 | u32 cr; | ||
116 | u32 __iomem *crp; | ||
117 | int ofs; | ||
118 | int mode; | ||
119 | |||
120 | if (flow_type & IRQF_TRIGGER_PROBE) | ||
121 | return 0; | ||
122 | switch (flow_type & IRQF_TRIGGER_MASK) { | ||
123 | case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break; | ||
124 | case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break; | ||
125 | case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break; | ||
126 | case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break; | ||
127 | default: | ||
128 | return -EINVAL; | ||
129 | } | ||
130 | crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8]; | ||
131 | cr = __raw_readl(crp); | ||
132 | ofs = (irq_nr & (8 - 1)) * 2; | ||
133 | cr &= ~(0x3 << ofs); | ||
134 | cr |= (mode & 0x3) << ofs; | ||
135 | __raw_writel(cr, crp); | ||
136 | txx9irq[irq_nr].mode = mode; | ||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | static struct irq_chip txx9_irq_chip = { | ||
141 | .name = "TXX9", | ||
142 | .irq_ack = txx9_irq_mask_ack, | ||
143 | .irq_mask = txx9_irq_mask, | ||
144 | .irq_mask_ack = txx9_irq_mask_ack, | ||
145 | .irq_unmask = txx9_irq_unmask, | ||
146 | .irq_set_type = txx9_irq_set_type, | ||
147 | }; | ||
148 | |||
149 | void __init txx9_irq_init(unsigned long baseaddr) | ||
150 | { | ||
151 | int i; | ||
152 | |||
153 | txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg)); | ||
154 | for (i = 0; i < TXx9_MAX_IR; i++) { | ||
155 | txx9irq[i].level = 4; /* middle level */ | ||
156 | txx9irq[i].mode = TXx9_IRCR_LOW; | ||
157 | irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip, | ||
158 | handle_level_irq); | ||
159 | } | ||
160 | |||
161 | /* mask all IRC interrupts */ | ||
162 | __raw_writel(0, &txx9_ircptr->imr); | ||
163 | for (i = 0; i < 8; i++) | ||
164 | __raw_writel(0, &txx9_ircptr->ilr[i]); | ||
165 | /* setup IRC interrupt mode (Low Active) */ | ||
166 | for (i = 0; i < 2; i++) | ||
167 | __raw_writel(0, &txx9_ircptr->cr[i]); | ||
168 | /* enable interrupt control */ | ||
169 | __raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer); | ||
170 | __raw_writel(irc_elevel, &txx9_ircptr->imr); | ||
171 | } | ||
172 | |||
173 | int __init txx9_irq_set_pri(int irc_irq, int new_pri) | ||
174 | { | ||
175 | int old_pri; | ||
176 | |||
177 | if ((unsigned int)irc_irq >= TXx9_MAX_IR) | ||
178 | return 0; | ||
179 | old_pri = txx9irq[irc_irq].level; | ||
180 | txx9irq[irc_irq].level = new_pri; | ||
181 | return old_pri; | ||
182 | } | ||
183 | |||
184 | int txx9_irq(void) | ||
185 | { | ||
186 | u32 csr = __raw_readl(&txx9_ircptr->csr); | ||
187 | |||
188 | if (likely(!(csr & TXx9_IRCSR_IF))) | ||
189 | return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1)); | ||
190 | return -1; | ||
191 | } | ||
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c new file mode 100644 index 000000000..9f5b1247b --- /dev/null +++ b/arch/mips/kernel/jump_label.c | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2010 Cavium Networks, Inc. | ||
7 | */ | ||
8 | |||
9 | #include <linux/jump_label.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/memory.h> | ||
12 | #include <linux/mutex.h> | ||
13 | #include <linux/types.h> | ||
14 | #include <linux/cpu.h> | ||
15 | |||
16 | #include <asm/cacheflush.h> | ||
17 | #include <asm/inst.h> | ||
18 | |||
19 | /* | ||
20 | * Define parameters for the standard MIPS and the microMIPS jump | ||
21 | * instruction encoding respectively: | ||
22 | * | ||
23 | * - the ISA bit of the target, either 0 or 1 respectively, | ||
24 | * | ||
25 | * - the amount the jump target address is shifted right to fit in the | ||
26 | * immediate field of the machine instruction, either 2 or 1, | ||
27 | * | ||
28 | * - the mask determining the size of the jump region relative to the | ||
29 | * delay-slot instruction, either 256MB or 128MB, | ||
30 | * | ||
31 | * - the jump target alignment, either 4 or 2 bytes. | ||
32 | */ | ||
33 | #define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS) | ||
34 | #define J_RANGE_SHIFT (2 - J_ISA_BIT) | ||
35 | #define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1) | ||
36 | #define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1) | ||
37 | |||
38 | void arch_jump_label_transform(struct jump_entry *e, | ||
39 | enum jump_label_type type) | ||
40 | { | ||
41 | union mips_instruction *insn_p; | ||
42 | union mips_instruction insn; | ||
43 | long offset; | ||
44 | |||
45 | insn_p = (union mips_instruction *)msk_isa16_mode(e->code); | ||
46 | |||
47 | /* Target must have the right alignment and ISA must be preserved. */ | ||
48 | BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT); | ||
49 | |||
50 | if (type == JUMP_LABEL_JMP) { | ||
51 | if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) { | ||
52 | offset = e->target - ((unsigned long)insn_p + 4); | ||
53 | offset >>= 2; | ||
54 | |||
55 | /* | ||
56 | * The branch offset must fit in the instruction's 26 | ||
57 | * bit field. | ||
58 | */ | ||
59 | WARN_ON((offset >= (long)BIT(25)) || | ||
60 | (offset < -(long)BIT(25))); | ||
61 | |||
62 | insn.j_format.opcode = bc6_op; | ||
63 | insn.j_format.target = offset; | ||
64 | } else { | ||
65 | /* | ||
66 | * Jump only works within an aligned region its delay | ||
67 | * slot is in. | ||
68 | */ | ||
69 | WARN_ON((e->target & ~J_RANGE_MASK) != | ||
70 | ((e->code + 4) & ~J_RANGE_MASK)); | ||
71 | |||
72 | insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op; | ||
73 | insn.j_format.target = e->target >> J_RANGE_SHIFT; | ||
74 | } | ||
75 | } else { | ||
76 | insn.word = 0; /* nop */ | ||
77 | } | ||
78 | |||
79 | mutex_lock(&text_mutex); | ||
80 | if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) { | ||
81 | insn_p->halfword[0] = insn.word >> 16; | ||
82 | insn_p->halfword[1] = insn.word; | ||
83 | } else | ||
84 | *insn_p = insn; | ||
85 | |||
86 | flush_icache_range((unsigned long)insn_p, | ||
87 | (unsigned long)insn_p + sizeof(*insn_p)); | ||
88 | |||
89 | mutex_unlock(&text_mutex); | ||
90 | } | ||
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c new file mode 100644 index 000000000..ea781b29f --- /dev/null +++ b/arch/mips/kernel/kgdb.c | |||
@@ -0,0 +1,415 @@ | |||
1 | /* | ||
2 | * Originally written by Glenn Engel, Lake Stevens Instrument Division | ||
3 | * | ||
4 | * Contributed by HP Systems | ||
5 | * | ||
6 | * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse | ||
7 | * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de> | ||
8 | * | ||
9 | * Copyright (C) 1995 Andreas Busse | ||
10 | * | ||
11 | * Copyright (C) 2003 MontaVista Software Inc. | ||
12 | * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net | ||
13 | * | ||
14 | * Copyright (C) 2004-2005 MontaVista Software Inc. | ||
15 | * Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com | ||
16 | * | ||
17 | * Copyright (C) 2007-2008 Wind River Systems, Inc. | ||
18 | * Author/Maintainer: Jason Wessel, jason.wessel@windriver.com | ||
19 | * | ||
20 | * This file is licensed under the terms of the GNU General Public License | ||
21 | * version 2. This program is licensed "as is" without any warranty of any | ||
22 | * kind, whether express or implied. | ||
23 | */ | ||
24 | |||
25 | #include <linux/ptrace.h> /* for linux pt_regs struct */ | ||
26 | #include <linux/kgdb.h> | ||
27 | #include <linux/kdebug.h> | ||
28 | #include <linux/sched.h> | ||
29 | #include <linux/smp.h> | ||
30 | #include <asm/inst.h> | ||
31 | #include <asm/fpu.h> | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <asm/processor.h> | ||
34 | #include <asm/sigcontext.h> | ||
35 | #include <linux/uaccess.h> | ||
36 | #include <asm/irq_regs.h> | ||
37 | |||
38 | static struct hard_trap_info { | ||
39 | unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */ | ||
40 | unsigned char signo; /* Signal that we map this trap into */ | ||
41 | } hard_trap_info[] = { | ||
42 | { 6, SIGBUS }, /* instruction bus error */ | ||
43 | { 7, SIGBUS }, /* data bus error */ | ||
44 | { 9, SIGTRAP }, /* break */ | ||
45 | /* { 11, SIGILL }, */ /* CPU unusable */ | ||
46 | { 12, SIGFPE }, /* overflow */ | ||
47 | { 13, SIGTRAP }, /* trap */ | ||
48 | { 14, SIGSEGV }, /* virtual instruction cache coherency */ | ||
49 | { 15, SIGFPE }, /* floating point exception */ | ||
50 | { 23, SIGSEGV }, /* watch */ | ||
51 | { 31, SIGSEGV }, /* virtual data cache coherency */ | ||
52 | { 0, 0} /* Must be last */ | ||
53 | }; | ||
54 | |||
55 | struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = | ||
56 | { | ||
57 | { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) }, | ||
58 | { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) }, | ||
59 | { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) }, | ||
60 | { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) }, | ||
61 | { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) }, | ||
62 | { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) }, | ||
63 | { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) }, | ||
64 | { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) }, | ||
65 | { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) }, | ||
66 | { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) }, | ||
67 | { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) }, | ||
68 | { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) }, | ||
69 | { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) }, | ||
70 | { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) }, | ||
71 | { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) }, | ||
72 | { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) }, | ||
73 | { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) }, | ||
74 | { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) }, | ||
75 | { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) }, | ||
76 | { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) }, | ||
77 | { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) }, | ||
78 | { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) }, | ||
79 | { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) }, | ||
80 | { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) }, | ||
81 | { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) }, | ||
82 | { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) }, | ||
83 | { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) }, | ||
84 | { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) }, | ||
85 | { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) }, | ||
86 | { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) }, | ||
87 | { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) }, | ||
88 | { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) }, | ||
89 | { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) }, | ||
90 | { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) }, | ||
91 | { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) }, | ||
92 | { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) }, | ||
93 | { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) }, | ||
94 | { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) }, | ||
95 | { "f0", GDB_SIZEOF_REG, 0 }, | ||
96 | { "f1", GDB_SIZEOF_REG, 1 }, | ||
97 | { "f2", GDB_SIZEOF_REG, 2 }, | ||
98 | { "f3", GDB_SIZEOF_REG, 3 }, | ||
99 | { "f4", GDB_SIZEOF_REG, 4 }, | ||
100 | { "f5", GDB_SIZEOF_REG, 5 }, | ||
101 | { "f6", GDB_SIZEOF_REG, 6 }, | ||
102 | { "f7", GDB_SIZEOF_REG, 7 }, | ||
103 | { "f8", GDB_SIZEOF_REG, 8 }, | ||
104 | { "f9", GDB_SIZEOF_REG, 9 }, | ||
105 | { "f10", GDB_SIZEOF_REG, 10 }, | ||
106 | { "f11", GDB_SIZEOF_REG, 11 }, | ||
107 | { "f12", GDB_SIZEOF_REG, 12 }, | ||
108 | { "f13", GDB_SIZEOF_REG, 13 }, | ||
109 | { "f14", GDB_SIZEOF_REG, 14 }, | ||
110 | { "f15", GDB_SIZEOF_REG, 15 }, | ||
111 | { "f16", GDB_SIZEOF_REG, 16 }, | ||
112 | { "f17", GDB_SIZEOF_REG, 17 }, | ||
113 | { "f18", GDB_SIZEOF_REG, 18 }, | ||
114 | { "f19", GDB_SIZEOF_REG, 19 }, | ||
115 | { "f20", GDB_SIZEOF_REG, 20 }, | ||
116 | { "f21", GDB_SIZEOF_REG, 21 }, | ||
117 | { "f22", GDB_SIZEOF_REG, 22 }, | ||
118 | { "f23", GDB_SIZEOF_REG, 23 }, | ||
119 | { "f24", GDB_SIZEOF_REG, 24 }, | ||
120 | { "f25", GDB_SIZEOF_REG, 25 }, | ||
121 | { "f26", GDB_SIZEOF_REG, 26 }, | ||
122 | { "f27", GDB_SIZEOF_REG, 27 }, | ||
123 | { "f28", GDB_SIZEOF_REG, 28 }, | ||
124 | { "f29", GDB_SIZEOF_REG, 29 }, | ||
125 | { "f30", GDB_SIZEOF_REG, 30 }, | ||
126 | { "f31", GDB_SIZEOF_REG, 31 }, | ||
127 | { "fsr", GDB_SIZEOF_REG, 0 }, | ||
128 | { "fir", GDB_SIZEOF_REG, 0 }, | ||
129 | }; | ||
130 | |||
131 | int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) | ||
132 | { | ||
133 | int fp_reg; | ||
134 | |||
135 | if (regno < 0 || regno >= DBG_MAX_REG_NUM) | ||
136 | return -EINVAL; | ||
137 | |||
138 | if (dbg_reg_def[regno].offset != -1 && regno < 38) { | ||
139 | memcpy((void *)regs + dbg_reg_def[regno].offset, mem, | ||
140 | dbg_reg_def[regno].size); | ||
141 | } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { | ||
142 | /* FP registers 38 -> 69 */ | ||
143 | if (!(regs->cp0_status & ST0_CU1)) | ||
144 | return 0; | ||
145 | if (regno == 70) { | ||
146 | /* Process the fcr31/fsr (register 70) */ | ||
147 | memcpy((void *)¤t->thread.fpu.fcr31, mem, | ||
148 | dbg_reg_def[regno].size); | ||
149 | goto out_save; | ||
150 | } else if (regno == 71) { | ||
151 | /* Ignore the fir (register 71) */ | ||
152 | goto out_save; | ||
153 | } | ||
154 | fp_reg = dbg_reg_def[regno].offset; | ||
155 | memcpy((void *)¤t->thread.fpu.fpr[fp_reg], mem, | ||
156 | dbg_reg_def[regno].size); | ||
157 | out_save: | ||
158 | restore_fp(current); | ||
159 | } | ||
160 | |||
161 | return 0; | ||
162 | } | ||
163 | |||
164 | char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) | ||
165 | { | ||
166 | int fp_reg; | ||
167 | |||
168 | if (regno >= DBG_MAX_REG_NUM || regno < 0) | ||
169 | return NULL; | ||
170 | |||
171 | if (dbg_reg_def[regno].offset != -1 && regno < 38) { | ||
172 | /* First 38 registers */ | ||
173 | memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, | ||
174 | dbg_reg_def[regno].size); | ||
175 | } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) { | ||
176 | /* FP registers 38 -> 69 */ | ||
177 | if (!(regs->cp0_status & ST0_CU1)) | ||
178 | goto out; | ||
179 | save_fp(current); | ||
180 | if (regno == 70) { | ||
181 | /* Process the fcr31/fsr (register 70) */ | ||
182 | memcpy(mem, (void *)¤t->thread.fpu.fcr31, | ||
183 | dbg_reg_def[regno].size); | ||
184 | goto out; | ||
185 | } else if (regno == 71) { | ||
186 | /* Ignore the fir (register 71) */ | ||
187 | memset(mem, 0, dbg_reg_def[regno].size); | ||
188 | goto out; | ||
189 | } | ||
190 | fp_reg = dbg_reg_def[regno].offset; | ||
191 | memcpy(mem, (void *)¤t->thread.fpu.fpr[fp_reg], | ||
192 | dbg_reg_def[regno].size); | ||
193 | } | ||
194 | |||
195 | out: | ||
196 | return dbg_reg_def[regno].name; | ||
197 | |||
198 | } | ||
199 | |||
200 | void arch_kgdb_breakpoint(void) | ||
201 | { | ||
202 | __asm__ __volatile__( | ||
203 | ".globl breakinst\n\t" | ||
204 | ".set\tnoreorder\n\t" | ||
205 | "nop\n" | ||
206 | "breakinst:\tbreak\n\t" | ||
207 | "nop\n\t" | ||
208 | ".set\treorder"); | ||
209 | } | ||
210 | |||
211 | void kgdb_call_nmi_hook(void *ignored) | ||
212 | { | ||
213 | mm_segment_t old_fs; | ||
214 | |||
215 | old_fs = get_fs(); | ||
216 | set_fs(KERNEL_DS); | ||
217 | |||
218 | kgdb_nmicallback(raw_smp_processor_id(), get_irq_regs()); | ||
219 | |||
220 | set_fs(old_fs); | ||
221 | } | ||
222 | |||
223 | static int compute_signal(int tt) | ||
224 | { | ||
225 | struct hard_trap_info *ht; | ||
226 | |||
227 | for (ht = hard_trap_info; ht->tt && ht->signo; ht++) | ||
228 | if (ht->tt == tt) | ||
229 | return ht->signo; | ||
230 | |||
231 | return SIGHUP; /* default for things we don't know about */ | ||
232 | } | ||
233 | |||
234 | /* | ||
235 | * Similar to regs_to_gdb_regs() except that process is sleeping and so | ||
236 | * we may not be able to get all the info. | ||
237 | */ | ||
238 | void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) | ||
239 | { | ||
240 | int reg; | ||
241 | #if (KGDB_GDB_REG_SIZE == 32) | ||
242 | u32 *ptr = (u32 *)gdb_regs; | ||
243 | #else | ||
244 | u64 *ptr = (u64 *)gdb_regs; | ||
245 | #endif | ||
246 | |||
247 | for (reg = 0; reg < 16; reg++) | ||
248 | *(ptr++) = 0; | ||
249 | |||
250 | /* S0 - S7 */ | ||
251 | *(ptr++) = p->thread.reg16; | ||
252 | *(ptr++) = p->thread.reg17; | ||
253 | *(ptr++) = p->thread.reg18; | ||
254 | *(ptr++) = p->thread.reg19; | ||
255 | *(ptr++) = p->thread.reg20; | ||
256 | *(ptr++) = p->thread.reg21; | ||
257 | *(ptr++) = p->thread.reg22; | ||
258 | *(ptr++) = p->thread.reg23; | ||
259 | |||
260 | for (reg = 24; reg < 28; reg++) | ||
261 | *(ptr++) = 0; | ||
262 | |||
263 | /* GP, SP, FP, RA */ | ||
264 | *(ptr++) = (long)p; | ||
265 | *(ptr++) = p->thread.reg29; | ||
266 | *(ptr++) = p->thread.reg30; | ||
267 | *(ptr++) = p->thread.reg31; | ||
268 | |||
269 | *(ptr++) = p->thread.cp0_status; | ||
270 | |||
271 | /* lo, hi */ | ||
272 | *(ptr++) = 0; | ||
273 | *(ptr++) = 0; | ||
274 | |||
275 | /* | ||
276 | * BadVAddr, Cause | ||
277 | * Ideally these would come from the last exception frame up the stack | ||
278 | * but that requires unwinding, otherwise we can't know much for sure. | ||
279 | */ | ||
280 | *(ptr++) = 0; | ||
281 | *(ptr++) = 0; | ||
282 | |||
283 | /* | ||
284 | * PC | ||
285 | * use return address (RA), i.e. the moment after return from resume() | ||
286 | */ | ||
287 | *(ptr++) = p->thread.reg31; | ||
288 | } | ||
289 | |||
290 | void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc) | ||
291 | { | ||
292 | regs->cp0_epc = pc; | ||
293 | } | ||
294 | |||
295 | /* | ||
296 | * Calls linux_debug_hook before the kernel dies. If KGDB is enabled, | ||
297 | * then try to fall into the debugger | ||
298 | */ | ||
299 | static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, | ||
300 | void *ptr) | ||
301 | { | ||
302 | struct die_args *args = (struct die_args *)ptr; | ||
303 | struct pt_regs *regs = args->regs; | ||
304 | int trap = (regs->cp0_cause & 0x7c) >> 2; | ||
305 | mm_segment_t old_fs; | ||
306 | |||
307 | #ifdef CONFIG_KPROBES | ||
308 | /* | ||
309 | * Return immediately if the kprobes fault notifier has set | ||
310 | * DIE_PAGE_FAULT. | ||
311 | */ | ||
312 | if (cmd == DIE_PAGE_FAULT) | ||
313 | return NOTIFY_DONE; | ||
314 | #endif /* CONFIG_KPROBES */ | ||
315 | |||
316 | /* Userspace events, ignore. */ | ||
317 | if (user_mode(regs)) | ||
318 | return NOTIFY_DONE; | ||
319 | |||
320 | /* Kernel mode. Set correct address limit */ | ||
321 | old_fs = get_fs(); | ||
322 | set_fs(KERNEL_DS); | ||
323 | |||
324 | if (atomic_read(&kgdb_active) != -1) | ||
325 | kgdb_nmicallback(smp_processor_id(), regs); | ||
326 | |||
327 | if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs)) { | ||
328 | set_fs(old_fs); | ||
329 | return NOTIFY_DONE; | ||
330 | } | ||
331 | |||
332 | if (atomic_read(&kgdb_setting_breakpoint)) | ||
333 | if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst)) | ||
334 | regs->cp0_epc += 4; | ||
335 | |||
336 | /* In SMP mode, __flush_cache_all does IPI */ | ||
337 | local_irq_enable(); | ||
338 | __flush_cache_all(); | ||
339 | |||
340 | set_fs(old_fs); | ||
341 | return NOTIFY_STOP; | ||
342 | } | ||
343 | |||
344 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP | ||
345 | int kgdb_ll_trap(int cmd, const char *str, | ||
346 | struct pt_regs *regs, long err, int trap, int sig) | ||
347 | { | ||
348 | struct die_args args = { | ||
349 | .regs = regs, | ||
350 | .str = str, | ||
351 | .err = err, | ||
352 | .trapnr = trap, | ||
353 | .signr = sig, | ||
354 | |||
355 | }; | ||
356 | |||
357 | if (!kgdb_io_module_registered) | ||
358 | return NOTIFY_DONE; | ||
359 | |||
360 | return kgdb_mips_notify(NULL, cmd, &args); | ||
361 | } | ||
362 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ | ||
363 | |||
364 | static struct notifier_block kgdb_notifier = { | ||
365 | .notifier_call = kgdb_mips_notify, | ||
366 | }; | ||
367 | |||
368 | /* | ||
369 | * Handle the 'c' command | ||
370 | */ | ||
371 | int kgdb_arch_handle_exception(int vector, int signo, int err_code, | ||
372 | char *remcom_in_buffer, char *remcom_out_buffer, | ||
373 | struct pt_regs *regs) | ||
374 | { | ||
375 | char *ptr; | ||
376 | unsigned long address; | ||
377 | |||
378 | switch (remcom_in_buffer[0]) { | ||
379 | case 'c': | ||
380 | /* handle the optional parameter */ | ||
381 | ptr = &remcom_in_buffer[1]; | ||
382 | if (kgdb_hex2long(&ptr, &address)) | ||
383 | regs->cp0_epc = address; | ||
384 | |||
385 | return 0; | ||
386 | } | ||
387 | |||
388 | return -1; | ||
389 | } | ||
390 | |||
391 | const struct kgdb_arch arch_kgdb_ops = { | ||
392 | #ifdef CONFIG_CPU_BIG_ENDIAN | ||
393 | .gdb_bpt_instr = { spec_op << 2, 0x00, 0x00, break_op }, | ||
394 | #else | ||
395 | .gdb_bpt_instr = { break_op, 0x00, 0x00, spec_op << 2 }, | ||
396 | #endif | ||
397 | }; | ||
398 | |||
399 | int kgdb_arch_init(void) | ||
400 | { | ||
401 | register_die_notifier(&kgdb_notifier); | ||
402 | |||
403 | return 0; | ||
404 | } | ||
405 | |||
406 | /* | ||
407 | * kgdb_arch_exit - Perform any architecture specific uninitalization. | ||
408 | * | ||
409 | * This function will handle the uninitalization of any architecture | ||
410 | * specific callbacks, for dynamic registration and unregistration. | ||
411 | */ | ||
412 | void kgdb_arch_exit(void) | ||
413 | { | ||
414 | unregister_die_notifier(&kgdb_notifier); | ||
415 | } | ||
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c new file mode 100644 index 000000000..54dfba8fa --- /dev/null +++ b/arch/mips/kernel/kprobes.c | |||
@@ -0,0 +1,518 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * Kernel Probes (KProbes) | ||
4 | * arch/mips/kernel/kprobes.c | ||
5 | * | ||
6 | * Copyright 2006 Sony Corp. | ||
7 | * Copyright 2010 Cavium Networks | ||
8 | * | ||
9 | * Some portions copied from the powerpc version. | ||
10 | * | ||
11 | * Copyright (C) IBM Corporation, 2002, 2004 | ||
12 | */ | ||
13 | |||
14 | #include <linux/kprobes.h> | ||
15 | #include <linux/preempt.h> | ||
16 | #include <linux/uaccess.h> | ||
17 | #include <linux/kdebug.h> | ||
18 | #include <linux/slab.h> | ||
19 | |||
20 | #include <asm/ptrace.h> | ||
21 | #include <asm/branch.h> | ||
22 | #include <asm/break.h> | ||
23 | |||
24 | #include "probes-common.h" | ||
25 | |||
26 | static const union mips_instruction breakpoint_insn = { | ||
27 | .b_format = { | ||
28 | .opcode = spec_op, | ||
29 | .code = BRK_KPROBE_BP, | ||
30 | .func = break_op | ||
31 | } | ||
32 | }; | ||
33 | |||
34 | static const union mips_instruction breakpoint2_insn = { | ||
35 | .b_format = { | ||
36 | .opcode = spec_op, | ||
37 | .code = BRK_KPROBE_SSTEPBP, | ||
38 | .func = break_op | ||
39 | } | ||
40 | }; | ||
41 | |||
42 | DEFINE_PER_CPU(struct kprobe *, current_kprobe); | ||
43 | DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); | ||
44 | |||
45 | static int __kprobes insn_has_delayslot(union mips_instruction insn) | ||
46 | { | ||
47 | return __insn_has_delay_slot(insn); | ||
48 | } | ||
49 | |||
50 | /* | ||
51 | * insn_has_ll_or_sc function checks whether instruction is ll or sc | ||
52 | * one; putting breakpoint on top of atomic ll/sc pair is bad idea; | ||
53 | * so we need to prevent it and refuse kprobes insertion for such | ||
54 | * instructions; cannot do much about breakpoint in the middle of | ||
55 | * ll/sc pair; it is upto user to avoid those places | ||
56 | */ | ||
57 | static int __kprobes insn_has_ll_or_sc(union mips_instruction insn) | ||
58 | { | ||
59 | int ret = 0; | ||
60 | |||
61 | switch (insn.i_format.opcode) { | ||
62 | case ll_op: | ||
63 | case lld_op: | ||
64 | case sc_op: | ||
65 | case scd_op: | ||
66 | ret = 1; | ||
67 | break; | ||
68 | default: | ||
69 | break; | ||
70 | } | ||
71 | return ret; | ||
72 | } | ||
73 | |||
74 | int __kprobes arch_prepare_kprobe(struct kprobe *p) | ||
75 | { | ||
76 | union mips_instruction insn; | ||
77 | union mips_instruction prev_insn; | ||
78 | int ret = 0; | ||
79 | |||
80 | insn = p->addr[0]; | ||
81 | |||
82 | if (insn_has_ll_or_sc(insn)) { | ||
83 | pr_notice("Kprobes for ll and sc instructions are not" | ||
84 | "supported\n"); | ||
85 | ret = -EINVAL; | ||
86 | goto out; | ||
87 | } | ||
88 | |||
89 | if (copy_from_kernel_nofault(&prev_insn, p->addr - 1, | ||
90 | sizeof(mips_instruction)) == 0 && | ||
91 | insn_has_delayslot(prev_insn)) { | ||
92 | pr_notice("Kprobes for branch delayslot are not supported\n"); | ||
93 | ret = -EINVAL; | ||
94 | goto out; | ||
95 | } | ||
96 | |||
97 | if (__insn_is_compact_branch(insn)) { | ||
98 | pr_notice("Kprobes for compact branches are not supported\n"); | ||
99 | ret = -EINVAL; | ||
100 | goto out; | ||
101 | } | ||
102 | |||
103 | /* insn: must be on special executable page on mips. */ | ||
104 | p->ainsn.insn = get_insn_slot(); | ||
105 | if (!p->ainsn.insn) { | ||
106 | ret = -ENOMEM; | ||
107 | goto out; | ||
108 | } | ||
109 | |||
110 | /* | ||
111 | * In the kprobe->ainsn.insn[] array we store the original | ||
112 | * instruction at index zero and a break trap instruction at | ||
113 | * index one. | ||
114 | * | ||
115 | * On MIPS arch if the instruction at probed address is a | ||
116 | * branch instruction, we need to execute the instruction at | ||
117 | * Branch Delayslot (BD) at the time of probe hit. As MIPS also | ||
118 | * doesn't have single stepping support, the BD instruction can | ||
119 | * not be executed in-line and it would be executed on SSOL slot | ||
120 | * using a normal breakpoint instruction in the next slot. | ||
121 | * So, read the instruction and save it for later execution. | ||
122 | */ | ||
123 | if (insn_has_delayslot(insn)) | ||
124 | memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t)); | ||
125 | else | ||
126 | memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t)); | ||
127 | |||
128 | p->ainsn.insn[1] = breakpoint2_insn; | ||
129 | p->opcode = *p->addr; | ||
130 | |||
131 | out: | ||
132 | return ret; | ||
133 | } | ||
134 | |||
135 | void __kprobes arch_arm_kprobe(struct kprobe *p) | ||
136 | { | ||
137 | *p->addr = breakpoint_insn; | ||
138 | flush_insn_slot(p); | ||
139 | } | ||
140 | |||
141 | void __kprobes arch_disarm_kprobe(struct kprobe *p) | ||
142 | { | ||
143 | *p->addr = p->opcode; | ||
144 | flush_insn_slot(p); | ||
145 | } | ||
146 | |||
147 | void __kprobes arch_remove_kprobe(struct kprobe *p) | ||
148 | { | ||
149 | if (p->ainsn.insn) { | ||
150 | free_insn_slot(p->ainsn.insn, 0); | ||
151 | p->ainsn.insn = NULL; | ||
152 | } | ||
153 | } | ||
154 | |||
155 | static void save_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
156 | { | ||
157 | kcb->prev_kprobe.kp = kprobe_running(); | ||
158 | kcb->prev_kprobe.status = kcb->kprobe_status; | ||
159 | kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR; | ||
160 | kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR; | ||
161 | kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc; | ||
162 | } | ||
163 | |||
164 | static void restore_previous_kprobe(struct kprobe_ctlblk *kcb) | ||
165 | { | ||
166 | __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp); | ||
167 | kcb->kprobe_status = kcb->prev_kprobe.status; | ||
168 | kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR; | ||
169 | kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR; | ||
170 | kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc; | ||
171 | } | ||
172 | |||
173 | static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs, | ||
174 | struct kprobe_ctlblk *kcb) | ||
175 | { | ||
176 | __this_cpu_write(current_kprobe, p); | ||
177 | kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE); | ||
178 | kcb->kprobe_saved_epc = regs->cp0_epc; | ||
179 | } | ||
180 | |||
181 | /** | ||
182 | * evaluate_branch_instrucion - | ||
183 | * | ||
184 | * Evaluate the branch instruction at probed address during probe hit. The | ||
185 | * result of evaluation would be the updated epc. The insturction in delayslot | ||
186 | * would actually be single stepped using a normal breakpoint) on SSOL slot. | ||
187 | * | ||
188 | * The result is also saved in the kprobe control block for later use, | ||
189 | * in case we need to execute the delayslot instruction. The latter will be | ||
190 | * false for NOP instruction in dealyslot and the branch-likely instructions | ||
191 | * when the branch is taken. And for those cases we set a flag as | ||
192 | * SKIP_DELAYSLOT in the kprobe control block | ||
193 | */ | ||
194 | static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs, | ||
195 | struct kprobe_ctlblk *kcb) | ||
196 | { | ||
197 | union mips_instruction insn = p->opcode; | ||
198 | long epc; | ||
199 | int ret = 0; | ||
200 | |||
201 | epc = regs->cp0_epc; | ||
202 | if (epc & 3) | ||
203 | goto unaligned; | ||
204 | |||
205 | if (p->ainsn.insn->word == 0) | ||
206 | kcb->flags |= SKIP_DELAYSLOT; | ||
207 | else | ||
208 | kcb->flags &= ~SKIP_DELAYSLOT; | ||
209 | |||
210 | ret = __compute_return_epc_for_insn(regs, insn); | ||
211 | if (ret < 0) | ||
212 | return ret; | ||
213 | |||
214 | if (ret == BRANCH_LIKELY_TAKEN) | ||
215 | kcb->flags |= SKIP_DELAYSLOT; | ||
216 | |||
217 | kcb->target_epc = regs->cp0_epc; | ||
218 | |||
219 | return 0; | ||
220 | |||
221 | unaligned: | ||
222 | pr_notice("%s: unaligned epc - sending SIGBUS.\n", current->comm); | ||
223 | force_sig(SIGBUS); | ||
224 | return -EFAULT; | ||
225 | |||
226 | } | ||
227 | |||
228 | static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs, | ||
229 | struct kprobe_ctlblk *kcb) | ||
230 | { | ||
231 | int ret = 0; | ||
232 | |||
233 | regs->cp0_status &= ~ST0_IE; | ||
234 | |||
235 | /* single step inline if the instruction is a break */ | ||
236 | if (p->opcode.word == breakpoint_insn.word || | ||
237 | p->opcode.word == breakpoint2_insn.word) | ||
238 | regs->cp0_epc = (unsigned long)p->addr; | ||
239 | else if (insn_has_delayslot(p->opcode)) { | ||
240 | ret = evaluate_branch_instruction(p, regs, kcb); | ||
241 | if (ret < 0) { | ||
242 | pr_notice("Kprobes: Error in evaluating branch\n"); | ||
243 | return; | ||
244 | } | ||
245 | } | ||
246 | regs->cp0_epc = (unsigned long)&p->ainsn.insn[0]; | ||
247 | } | ||
248 | |||
249 | /* | ||
250 | * Called after single-stepping. p->addr is the address of the | ||
251 | * instruction whose first byte has been replaced by the "break 0" | ||
252 | * instruction. To avoid the SMP problems that can occur when we | ||
253 | * temporarily put back the original opcode to single-step, we | ||
254 | * single-stepped a copy of the instruction. The address of this | ||
255 | * copy is p->ainsn.insn. | ||
256 | * | ||
257 | * This function prepares to return from the post-single-step | ||
258 | * breakpoint trap. In case of branch instructions, the target | ||
259 | * epc to be restored. | ||
260 | */ | ||
261 | static void __kprobes resume_execution(struct kprobe *p, | ||
262 | struct pt_regs *regs, | ||
263 | struct kprobe_ctlblk *kcb) | ||
264 | { | ||
265 | if (insn_has_delayslot(p->opcode)) | ||
266 | regs->cp0_epc = kcb->target_epc; | ||
267 | else { | ||
268 | unsigned long orig_epc = kcb->kprobe_saved_epc; | ||
269 | regs->cp0_epc = orig_epc + 4; | ||
270 | } | ||
271 | } | ||
272 | |||
273 | static int __kprobes kprobe_handler(struct pt_regs *regs) | ||
274 | { | ||
275 | struct kprobe *p; | ||
276 | int ret = 0; | ||
277 | kprobe_opcode_t *addr; | ||
278 | struct kprobe_ctlblk *kcb; | ||
279 | |||
280 | addr = (kprobe_opcode_t *) regs->cp0_epc; | ||
281 | |||
282 | /* | ||
283 | * We don't want to be preempted for the entire | ||
284 | * duration of kprobe processing | ||
285 | */ | ||
286 | preempt_disable(); | ||
287 | kcb = get_kprobe_ctlblk(); | ||
288 | |||
289 | /* Check we're not actually recursing */ | ||
290 | if (kprobe_running()) { | ||
291 | p = get_kprobe(addr); | ||
292 | if (p) { | ||
293 | if (kcb->kprobe_status == KPROBE_HIT_SS && | ||
294 | p->ainsn.insn->word == breakpoint_insn.word) { | ||
295 | regs->cp0_status &= ~ST0_IE; | ||
296 | regs->cp0_status |= kcb->kprobe_saved_SR; | ||
297 | goto no_kprobe; | ||
298 | } | ||
299 | /* | ||
300 | * We have reentered the kprobe_handler(), since | ||
301 | * another probe was hit while within the handler. | ||
302 | * We here save the original kprobes variables and | ||
303 | * just single step on the instruction of the new probe | ||
304 | * without calling any user handlers. | ||
305 | */ | ||
306 | save_previous_kprobe(kcb); | ||
307 | set_current_kprobe(p, regs, kcb); | ||
308 | kprobes_inc_nmissed_count(p); | ||
309 | prepare_singlestep(p, regs, kcb); | ||
310 | kcb->kprobe_status = KPROBE_REENTER; | ||
311 | if (kcb->flags & SKIP_DELAYSLOT) { | ||
312 | resume_execution(p, regs, kcb); | ||
313 | restore_previous_kprobe(kcb); | ||
314 | preempt_enable_no_resched(); | ||
315 | } | ||
316 | return 1; | ||
317 | } else if (addr->word != breakpoint_insn.word) { | ||
318 | /* | ||
319 | * The breakpoint instruction was removed by | ||
320 | * another cpu right after we hit, no further | ||
321 | * handling of this interrupt is appropriate | ||
322 | */ | ||
323 | ret = 1; | ||
324 | } | ||
325 | goto no_kprobe; | ||
326 | } | ||
327 | |||
328 | p = get_kprobe(addr); | ||
329 | if (!p) { | ||
330 | if (addr->word != breakpoint_insn.word) { | ||
331 | /* | ||
332 | * The breakpoint instruction was removed right | ||
333 | * after we hit it. Another cpu has removed | ||
334 | * either a probepoint or a debugger breakpoint | ||
335 | * at this address. In either case, no further | ||
336 | * handling of this interrupt is appropriate. | ||
337 | */ | ||
338 | ret = 1; | ||
339 | } | ||
340 | /* Not one of ours: let kernel handle it */ | ||
341 | goto no_kprobe; | ||
342 | } | ||
343 | |||
344 | set_current_kprobe(p, regs, kcb); | ||
345 | kcb->kprobe_status = KPROBE_HIT_ACTIVE; | ||
346 | |||
347 | if (p->pre_handler && p->pre_handler(p, regs)) { | ||
348 | /* handler has already set things up, so skip ss setup */ | ||
349 | reset_current_kprobe(); | ||
350 | preempt_enable_no_resched(); | ||
351 | return 1; | ||
352 | } | ||
353 | |||
354 | prepare_singlestep(p, regs, kcb); | ||
355 | if (kcb->flags & SKIP_DELAYSLOT) { | ||
356 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
357 | if (p->post_handler) | ||
358 | p->post_handler(p, regs, 0); | ||
359 | resume_execution(p, regs, kcb); | ||
360 | preempt_enable_no_resched(); | ||
361 | } else | ||
362 | kcb->kprobe_status = KPROBE_HIT_SS; | ||
363 | |||
364 | return 1; | ||
365 | |||
366 | no_kprobe: | ||
367 | preempt_enable_no_resched(); | ||
368 | return ret; | ||
369 | |||
370 | } | ||
371 | |||
372 | static inline int post_kprobe_handler(struct pt_regs *regs) | ||
373 | { | ||
374 | struct kprobe *cur = kprobe_running(); | ||
375 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
376 | |||
377 | if (!cur) | ||
378 | return 0; | ||
379 | |||
380 | if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) { | ||
381 | kcb->kprobe_status = KPROBE_HIT_SSDONE; | ||
382 | cur->post_handler(cur, regs, 0); | ||
383 | } | ||
384 | |||
385 | resume_execution(cur, regs, kcb); | ||
386 | |||
387 | regs->cp0_status |= kcb->kprobe_saved_SR; | ||
388 | |||
389 | /* Restore back the original saved kprobes variables and continue. */ | ||
390 | if (kcb->kprobe_status == KPROBE_REENTER) { | ||
391 | restore_previous_kprobe(kcb); | ||
392 | goto out; | ||
393 | } | ||
394 | reset_current_kprobe(); | ||
395 | out: | ||
396 | preempt_enable_no_resched(); | ||
397 | |||
398 | return 1; | ||
399 | } | ||
400 | |||
401 | int kprobe_fault_handler(struct pt_regs *regs, int trapnr) | ||
402 | { | ||
403 | struct kprobe *cur = kprobe_running(); | ||
404 | struct kprobe_ctlblk *kcb = get_kprobe_ctlblk(); | ||
405 | |||
406 | if (cur->fault_handler && cur->fault_handler(cur, regs, trapnr)) | ||
407 | return 1; | ||
408 | |||
409 | if (kcb->kprobe_status & KPROBE_HIT_SS) { | ||
410 | resume_execution(cur, regs, kcb); | ||
411 | regs->cp0_status |= kcb->kprobe_old_SR; | ||
412 | |||
413 | reset_current_kprobe(); | ||
414 | preempt_enable_no_resched(); | ||
415 | } | ||
416 | return 0; | ||
417 | } | ||
418 | |||
419 | /* | ||
420 | * Wrapper routine for handling exceptions. | ||
421 | */ | ||
422 | int __kprobes kprobe_exceptions_notify(struct notifier_block *self, | ||
423 | unsigned long val, void *data) | ||
424 | { | ||
425 | |||
426 | struct die_args *args = (struct die_args *)data; | ||
427 | int ret = NOTIFY_DONE; | ||
428 | |||
429 | switch (val) { | ||
430 | case DIE_BREAK: | ||
431 | if (kprobe_handler(args->regs)) | ||
432 | ret = NOTIFY_STOP; | ||
433 | break; | ||
434 | case DIE_SSTEPBP: | ||
435 | if (post_kprobe_handler(args->regs)) | ||
436 | ret = NOTIFY_STOP; | ||
437 | break; | ||
438 | |||
439 | case DIE_PAGE_FAULT: | ||
440 | /* kprobe_running() needs smp_processor_id() */ | ||
441 | preempt_disable(); | ||
442 | |||
443 | if (kprobe_running() | ||
444 | && kprobe_fault_handler(args->regs, args->trapnr)) | ||
445 | ret = NOTIFY_STOP; | ||
446 | preempt_enable(); | ||
447 | break; | ||
448 | default: | ||
449 | break; | ||
450 | } | ||
451 | return ret; | ||
452 | } | ||
453 | |||
454 | /* | ||
455 | * Function return probe trampoline: | ||
456 | * - init_kprobes() establishes a probepoint here | ||
457 | * - When the probed function returns, this probe causes the | ||
458 | * handlers to fire | ||
459 | */ | ||
460 | static void __used kretprobe_trampoline_holder(void) | ||
461 | { | ||
462 | asm volatile( | ||
463 | ".set push\n\t" | ||
464 | /* Keep the assembler from reordering and placing JR here. */ | ||
465 | ".set noreorder\n\t" | ||
466 | "nop\n\t" | ||
467 | ".global kretprobe_trampoline\n" | ||
468 | "kretprobe_trampoline:\n\t" | ||
469 | "nop\n\t" | ||
470 | ".set pop" | ||
471 | : : : "memory"); | ||
472 | } | ||
473 | |||
474 | void kretprobe_trampoline(void); | ||
475 | |||
476 | void __kprobes arch_prepare_kretprobe(struct kretprobe_instance *ri, | ||
477 | struct pt_regs *regs) | ||
478 | { | ||
479 | ri->ret_addr = (kprobe_opcode_t *) regs->regs[31]; | ||
480 | ri->fp = NULL; | ||
481 | |||
482 | /* Replace the return addr with trampoline addr */ | ||
483 | regs->regs[31] = (unsigned long)kretprobe_trampoline; | ||
484 | } | ||
485 | |||
486 | /* | ||
487 | * Called when the probe at kretprobe trampoline is hit | ||
488 | */ | ||
489 | static int __kprobes trampoline_probe_handler(struct kprobe *p, | ||
490 | struct pt_regs *regs) | ||
491 | { | ||
492 | instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, | ||
493 | kretprobe_trampoline, NULL); | ||
494 | /* | ||
495 | * By returning a non-zero value, we are telling | ||
496 | * kprobe_handler() that we don't want the post_handler | ||
497 | * to run (and have re-enabled preemption) | ||
498 | */ | ||
499 | return 1; | ||
500 | } | ||
501 | |||
502 | int __kprobes arch_trampoline_kprobe(struct kprobe *p) | ||
503 | { | ||
504 | if (p->addr == (kprobe_opcode_t *)kretprobe_trampoline) | ||
505 | return 1; | ||
506 | |||
507 | return 0; | ||
508 | } | ||
509 | |||
510 | static struct kprobe trampoline_p = { | ||
511 | .addr = (kprobe_opcode_t *)kretprobe_trampoline, | ||
512 | .pre_handler = trampoline_probe_handler | ||
513 | }; | ||
514 | |||
515 | int __init arch_init_kprobes(void) | ||
516 | { | ||
517 | return register_kprobe(&trampoline_p); | ||
518 | } | ||
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c new file mode 100644 index 000000000..6b61be486 --- /dev/null +++ b/arch/mips/kernel/linux32.c | |||
@@ -0,0 +1,133 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Conversion between 32-bit and 64-bit native system calls. | ||
4 | * | ||
5 | * Copyright (C) 2000 Silicon Graphics, Inc. | ||
6 | * Written by Ulf Carlsson (ulfc@engr.sgi.com) | ||
7 | */ | ||
8 | #include <linux/compiler.h> | ||
9 | #include <linux/mm.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/file.h> | ||
12 | #include <linux/highuid.h> | ||
13 | #include <linux/resource.h> | ||
14 | #include <linux/highmem.h> | ||
15 | #include <linux/time.h> | ||
16 | #include <linux/times.h> | ||
17 | #include <linux/poll.h> | ||
18 | #include <linux/skbuff.h> | ||
19 | #include <linux/filter.h> | ||
20 | #include <linux/shm.h> | ||
21 | #include <linux/sem.h> | ||
22 | #include <linux/msg.h> | ||
23 | #include <linux/icmpv6.h> | ||
24 | #include <linux/syscalls.h> | ||
25 | #include <linux/sysctl.h> | ||
26 | #include <linux/utime.h> | ||
27 | #include <linux/utsname.h> | ||
28 | #include <linux/personality.h> | ||
29 | #include <linux/dnotify.h> | ||
30 | #include <linux/binfmts.h> | ||
31 | #include <linux/security.h> | ||
32 | #include <linux/compat.h> | ||
33 | #include <linux/vfs.h> | ||
34 | #include <linux/ipc.h> | ||
35 | #include <linux/slab.h> | ||
36 | |||
37 | #include <net/sock.h> | ||
38 | #include <net/scm.h> | ||
39 | |||
40 | #include <asm/compat-signal.h> | ||
41 | #include <asm/sim.h> | ||
42 | #include <linux/uaccess.h> | ||
43 | #include <asm/mmu_context.h> | ||
44 | #include <asm/mman.h> | ||
45 | |||
46 | #ifdef __MIPSEB__ | ||
47 | #define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL)) | ||
48 | #endif | ||
49 | #ifdef __MIPSEL__ | ||
50 | #define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL)) | ||
51 | #endif | ||
52 | |||
53 | SYSCALL_DEFINE4(32_truncate64, const char __user *, path, | ||
54 | unsigned long, __dummy, unsigned long, a2, unsigned long, a3) | ||
55 | { | ||
56 | return ksys_truncate(path, merge_64(a2, a3)); | ||
57 | } | ||
58 | |||
59 | SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy, | ||
60 | unsigned long, a2, unsigned long, a3) | ||
61 | { | ||
62 | return ksys_ftruncate(fd, merge_64(a2, a3)); | ||
63 | } | ||
64 | |||
65 | SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high, | ||
66 | unsigned int, offset_low, loff_t __user *, result, | ||
67 | unsigned int, origin) | ||
68 | { | ||
69 | return sys_llseek(fd, offset_high, offset_low, result, origin); | ||
70 | } | ||
71 | |||
72 | /* From the Single Unix Spec: pread & pwrite act like lseek to pos + op + | ||
73 | lseek back to original location. They fail just like lseek does on | ||
74 | non-seekable files. */ | ||
75 | |||
76 | SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count, | ||
77 | unsigned long, unused, unsigned long, a4, unsigned long, a5) | ||
78 | { | ||
79 | return ksys_pread64(fd, buf, count, merge_64(a4, a5)); | ||
80 | } | ||
81 | |||
82 | SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf, | ||
83 | size_t, count, u32, unused, u64, a4, u64, a5) | ||
84 | { | ||
85 | return ksys_pwrite64(fd, buf, count, merge_64(a4, a5)); | ||
86 | } | ||
87 | |||
88 | SYSCALL_DEFINE1(32_personality, unsigned long, personality) | ||
89 | { | ||
90 | unsigned int p = personality & 0xffffffff; | ||
91 | int ret; | ||
92 | |||
93 | if (personality(current->personality) == PER_LINUX32 && | ||
94 | personality(p) == PER_LINUX) | ||
95 | p = (p & ~PER_MASK) | PER_LINUX32; | ||
96 | ret = sys_personality(p); | ||
97 | if (ret != -1 && personality(ret) == PER_LINUX32) | ||
98 | ret = (ret & ~PER_MASK) | PER_LINUX; | ||
99 | return ret; | ||
100 | } | ||
101 | |||
102 | asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3, | ||
103 | size_t count) | ||
104 | { | ||
105 | return ksys_readahead(fd, merge_64(a2, a3), count); | ||
106 | } | ||
107 | |||
108 | asmlinkage long sys32_sync_file_range(int fd, int __pad, | ||
109 | unsigned long a2, unsigned long a3, | ||
110 | unsigned long a4, unsigned long a5, | ||
111 | int flags) | ||
112 | { | ||
113 | return ksys_sync_file_range(fd, | ||
114 | merge_64(a2, a3), merge_64(a4, a5), | ||
115 | flags); | ||
116 | } | ||
117 | |||
118 | asmlinkage long sys32_fadvise64_64(int fd, int __pad, | ||
119 | unsigned long a2, unsigned long a3, | ||
120 | unsigned long a4, unsigned long a5, | ||
121 | int flags) | ||
122 | { | ||
123 | return ksys_fadvise64_64(fd, | ||
124 | merge_64(a2, a3), merge_64(a4, a5), | ||
125 | flags); | ||
126 | } | ||
127 | |||
128 | asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2, | ||
129 | unsigned offset_a3, unsigned len_a4, unsigned len_a5) | ||
130 | { | ||
131 | return ksys_fallocate(fd, mode, merge_64(offset_a2, offset_a3), | ||
132 | merge_64(len_a4, len_a5)); | ||
133 | } | ||
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c new file mode 100644 index 000000000..432bfd3e7 --- /dev/null +++ b/arch/mips/kernel/machine_kexec.c | |||
@@ -0,0 +1,264 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * machine_kexec.c for kexec | ||
4 | * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006 | ||
5 | */ | ||
6 | #include <linux/compiler.h> | ||
7 | #include <linux/kexec.h> | ||
8 | #include <linux/mm.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/libfdt.h> | ||
11 | |||
12 | #include <asm/cacheflush.h> | ||
13 | #include <asm/page.h> | ||
14 | |||
15 | extern const unsigned char relocate_new_kernel[]; | ||
16 | extern const size_t relocate_new_kernel_size; | ||
17 | |||
18 | extern unsigned long kexec_start_address; | ||
19 | extern unsigned long kexec_indirection_page; | ||
20 | |||
21 | static unsigned long reboot_code_buffer; | ||
22 | |||
23 | #ifdef CONFIG_SMP | ||
24 | static void (*relocated_kexec_smp_wait)(void *); | ||
25 | |||
26 | atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0); | ||
27 | void (*_crash_smp_send_stop)(void) = NULL; | ||
28 | #endif | ||
29 | |||
30 | void (*_machine_kexec_shutdown)(void) = NULL; | ||
31 | void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL; | ||
32 | |||
33 | static void kexec_image_info(const struct kimage *kimage) | ||
34 | { | ||
35 | unsigned long i; | ||
36 | |||
37 | pr_debug("kexec kimage info:\n"); | ||
38 | pr_debug(" type: %d\n", kimage->type); | ||
39 | pr_debug(" start: %lx\n", kimage->start); | ||
40 | pr_debug(" head: %lx\n", kimage->head); | ||
41 | pr_debug(" nr_segments: %lu\n", kimage->nr_segments); | ||
42 | |||
43 | for (i = 0; i < kimage->nr_segments; i++) { | ||
44 | pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n", | ||
45 | i, | ||
46 | kimage->segment[i].mem, | ||
47 | kimage->segment[i].mem + kimage->segment[i].memsz, | ||
48 | (unsigned long)kimage->segment[i].memsz, | ||
49 | (unsigned long)kimage->segment[i].memsz / PAGE_SIZE); | ||
50 | } | ||
51 | } | ||
52 | |||
53 | #ifdef CONFIG_UHI_BOOT | ||
54 | |||
55 | static int uhi_machine_kexec_prepare(struct kimage *kimage) | ||
56 | { | ||
57 | int i; | ||
58 | |||
59 | /* | ||
60 | * In case DTB file is not passed to the new kernel, a flat device | ||
61 | * tree will be created by kexec tool. It holds modified command | ||
62 | * line for the new kernel. | ||
63 | */ | ||
64 | for (i = 0; i < kimage->nr_segments; i++) { | ||
65 | struct fdt_header fdt; | ||
66 | |||
67 | if (kimage->segment[i].memsz <= sizeof(fdt)) | ||
68 | continue; | ||
69 | |||
70 | if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt))) | ||
71 | continue; | ||
72 | |||
73 | if (fdt_check_header(&fdt)) | ||
74 | continue; | ||
75 | |||
76 | kexec_args[0] = -2; | ||
77 | kexec_args[1] = (unsigned long) | ||
78 | phys_to_virt((unsigned long)kimage->segment[i].mem); | ||
79 | break; | ||
80 | } | ||
81 | |||
82 | return 0; | ||
83 | } | ||
84 | |||
85 | int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare; | ||
86 | |||
87 | #else | ||
88 | |||
89 | int (*_machine_kexec_prepare)(struct kimage *) = NULL; | ||
90 | |||
91 | #endif /* CONFIG_UHI_BOOT */ | ||
92 | |||
93 | int | ||
94 | machine_kexec_prepare(struct kimage *kimage) | ||
95 | { | ||
96 | #ifdef CONFIG_SMP | ||
97 | if (!kexec_nonboot_cpu_func()) | ||
98 | return -EINVAL; | ||
99 | #endif | ||
100 | |||
101 | kexec_image_info(kimage); | ||
102 | |||
103 | if (_machine_kexec_prepare) | ||
104 | return _machine_kexec_prepare(kimage); | ||
105 | |||
106 | return 0; | ||
107 | } | ||
108 | |||
109 | void | ||
110 | machine_kexec_cleanup(struct kimage *kimage) | ||
111 | { | ||
112 | } | ||
113 | |||
114 | #ifdef CONFIG_SMP | ||
115 | static void kexec_shutdown_secondary(void *param) | ||
116 | { | ||
117 | int cpu = smp_processor_id(); | ||
118 | |||
119 | if (!cpu_online(cpu)) | ||
120 | return; | ||
121 | |||
122 | /* We won't be sent IPIs any more. */ | ||
123 | set_cpu_online(cpu, false); | ||
124 | |||
125 | local_irq_disable(); | ||
126 | while (!atomic_read(&kexec_ready_to_reboot)) | ||
127 | cpu_relax(); | ||
128 | |||
129 | kexec_reboot(); | ||
130 | |||
131 | /* NOTREACHED */ | ||
132 | } | ||
133 | #endif | ||
134 | |||
135 | void | ||
136 | machine_shutdown(void) | ||
137 | { | ||
138 | if (_machine_kexec_shutdown) | ||
139 | _machine_kexec_shutdown(); | ||
140 | |||
141 | #ifdef CONFIG_SMP | ||
142 | smp_call_function(kexec_shutdown_secondary, NULL, 0); | ||
143 | |||
144 | while (num_online_cpus() > 1) { | ||
145 | cpu_relax(); | ||
146 | mdelay(1); | ||
147 | } | ||
148 | #endif | ||
149 | } | ||
150 | |||
151 | void | ||
152 | machine_crash_shutdown(struct pt_regs *regs) | ||
153 | { | ||
154 | if (_machine_crash_shutdown) | ||
155 | _machine_crash_shutdown(regs); | ||
156 | else | ||
157 | default_machine_crash_shutdown(regs); | ||
158 | } | ||
159 | |||
160 | #ifdef CONFIG_SMP | ||
161 | void kexec_nonboot_cpu_jump(void) | ||
162 | { | ||
163 | local_flush_icache_range((unsigned long)relocated_kexec_smp_wait, | ||
164 | reboot_code_buffer + relocate_new_kernel_size); | ||
165 | |||
166 | relocated_kexec_smp_wait(NULL); | ||
167 | } | ||
168 | #endif | ||
169 | |||
170 | void kexec_reboot(void) | ||
171 | { | ||
172 | void (*do_kexec)(void) __noreturn; | ||
173 | |||
174 | /* | ||
175 | * We know we were online, and there will be no incoming IPIs at | ||
176 | * this point. Mark online again before rebooting so that the crash | ||
177 | * analysis tool will see us correctly. | ||
178 | */ | ||
179 | set_cpu_online(smp_processor_id(), true); | ||
180 | |||
181 | /* Ensure remote CPUs observe that we're online before rebooting. */ | ||
182 | smp_mb__after_atomic(); | ||
183 | |||
184 | #ifdef CONFIG_SMP | ||
185 | if (smp_processor_id() > 0) { | ||
186 | /* | ||
187 | * Instead of cpu_relax() or wait, this is needed for kexec | ||
188 | * smp reboot. Kdump usually doesn't require an smp new | ||
189 | * kernel, but kexec may do. | ||
190 | */ | ||
191 | kexec_nonboot_cpu(); | ||
192 | |||
193 | /* NOTREACHED */ | ||
194 | } | ||
195 | #endif | ||
196 | |||
197 | /* | ||
198 | * Make sure we get correct instructions written by the | ||
199 | * machine_kexec() CPU. | ||
200 | */ | ||
201 | local_flush_icache_range(reboot_code_buffer, | ||
202 | reboot_code_buffer + relocate_new_kernel_size); | ||
203 | |||
204 | do_kexec = (void *)reboot_code_buffer; | ||
205 | do_kexec(); | ||
206 | } | ||
207 | |||
208 | void | ||
209 | machine_kexec(struct kimage *image) | ||
210 | { | ||
211 | unsigned long entry; | ||
212 | unsigned long *ptr; | ||
213 | |||
214 | reboot_code_buffer = | ||
215 | (unsigned long)page_address(image->control_code_page); | ||
216 | |||
217 | kexec_start_address = | ||
218 | (unsigned long) phys_to_virt(image->start); | ||
219 | |||
220 | if (image->type == KEXEC_TYPE_DEFAULT) { | ||
221 | kexec_indirection_page = | ||
222 | (unsigned long) phys_to_virt(image->head & PAGE_MASK); | ||
223 | } else { | ||
224 | kexec_indirection_page = (unsigned long)&image->head; | ||
225 | } | ||
226 | |||
227 | memcpy((void*)reboot_code_buffer, relocate_new_kernel, | ||
228 | relocate_new_kernel_size); | ||
229 | |||
230 | /* | ||
231 | * The generic kexec code builds a page list with physical | ||
232 | * addresses. they are directly accessible through KSEG0 (or | ||
233 | * CKSEG0 or XPHYS if on 64bit system), hence the | ||
234 | * phys_to_virt() call. | ||
235 | */ | ||
236 | for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE); | ||
237 | ptr = (entry & IND_INDIRECTION) ? | ||
238 | phys_to_virt(entry & PAGE_MASK) : ptr + 1) { | ||
239 | if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION || | ||
240 | *ptr & IND_DESTINATION) | ||
241 | *ptr = (unsigned long) phys_to_virt(*ptr); | ||
242 | } | ||
243 | |||
244 | /* Mark offline BEFORE disabling local irq. */ | ||
245 | set_cpu_online(smp_processor_id(), false); | ||
246 | |||
247 | /* | ||
248 | * we do not want to be bothered. | ||
249 | */ | ||
250 | local_irq_disable(); | ||
251 | |||
252 | printk("Will call new kernel at %08lx\n", image->start); | ||
253 | printk("Bye ...\n"); | ||
254 | /* Make reboot code buffer available to the boot CPU. */ | ||
255 | __flush_cache_all(); | ||
256 | #ifdef CONFIG_SMP | ||
257 | /* All secondary cpus now may jump to kexec_wait cycle */ | ||
258 | relocated_kexec_smp_wait = reboot_code_buffer + | ||
259 | (void *)(kexec_smp_wait - relocate_new_kernel); | ||
260 | smp_wmb(); | ||
261 | atomic_set(&kexec_ready_to_reboot, 1); | ||
262 | #endif | ||
263 | kexec_reboot(); | ||
264 | } | ||
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S new file mode 100644 index 000000000..cff52b283 --- /dev/null +++ b/arch/mips/kernel/mcount.S | |||
@@ -0,0 +1,220 @@ | |||
1 | /* | ||
2 | * MIPS specific _mcount support | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive for | ||
6 | * more details. | ||
7 | * | ||
8 | * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China | ||
9 | * Copyright (C) 2010 DSLab, Lanzhou University, China | ||
10 | * Author: Wu Zhangjin <wuzhangjin@gmail.com> | ||
11 | */ | ||
12 | |||
13 | #include <asm/export.h> | ||
14 | #include <asm/regdef.h> | ||
15 | #include <asm/stackframe.h> | ||
16 | #include <asm/ftrace.h> | ||
17 | |||
18 | .text | ||
19 | .set noreorder | ||
20 | .set noat | ||
21 | |||
22 | .macro MCOUNT_SAVE_REGS | ||
23 | PTR_SUBU sp, PT_SIZE | ||
24 | PTR_S ra, PT_R31(sp) | ||
25 | PTR_S AT, PT_R1(sp) | ||
26 | PTR_S a0, PT_R4(sp) | ||
27 | PTR_S a1, PT_R5(sp) | ||
28 | PTR_S a2, PT_R6(sp) | ||
29 | PTR_S a3, PT_R7(sp) | ||
30 | #ifdef CONFIG_64BIT | ||
31 | PTR_S a4, PT_R8(sp) | ||
32 | PTR_S a5, PT_R9(sp) | ||
33 | PTR_S a6, PT_R10(sp) | ||
34 | PTR_S a7, PT_R11(sp) | ||
35 | #endif | ||
36 | .endm | ||
37 | |||
38 | .macro MCOUNT_RESTORE_REGS | ||
39 | PTR_L ra, PT_R31(sp) | ||
40 | PTR_L AT, PT_R1(sp) | ||
41 | PTR_L a0, PT_R4(sp) | ||
42 | PTR_L a1, PT_R5(sp) | ||
43 | PTR_L a2, PT_R6(sp) | ||
44 | PTR_L a3, PT_R7(sp) | ||
45 | #ifdef CONFIG_64BIT | ||
46 | PTR_L a4, PT_R8(sp) | ||
47 | PTR_L a5, PT_R9(sp) | ||
48 | PTR_L a6, PT_R10(sp) | ||
49 | PTR_L a7, PT_R11(sp) | ||
50 | #endif | ||
51 | PTR_ADDIU sp, PT_SIZE | ||
52 | .endm | ||
53 | |||
54 | .macro RETURN_BACK | ||
55 | jr ra | ||
56 | move ra, AT | ||
57 | .endm | ||
58 | |||
59 | /* | ||
60 | * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass | ||
61 | * the location of the parent's return address. | ||
62 | */ | ||
63 | #define MCOUNT_RA_ADDRESS_REG $12 | ||
64 | |||
65 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
66 | |||
67 | NESTED(ftrace_caller, PT_SIZE, ra) | ||
68 | .globl _mcount | ||
69 | _mcount: | ||
70 | EXPORT_SYMBOL(_mcount) | ||
71 | b ftrace_stub | ||
72 | #ifdef CONFIG_32BIT | ||
73 | addiu sp,sp,8 | ||
74 | #else | ||
75 | nop | ||
76 | #endif | ||
77 | |||
78 | /* When tracing is activated, it calls ftrace_caller+8 (aka here) */ | ||
79 | MCOUNT_SAVE_REGS | ||
80 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | ||
81 | PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp) | ||
82 | #endif | ||
83 | |||
84 | PTR_SUBU a0, ra, 8 /* arg1: self address */ | ||
85 | PTR_LA t1, _stext | ||
86 | sltu t2, a0, t1 /* t2 = (a0 < _stext) */ | ||
87 | PTR_LA t1, _etext | ||
88 | sltu t3, t1, a0 /* t3 = (a0 > _etext) */ | ||
89 | or t1, t2, t3 | ||
90 | beqz t1, ftrace_call | ||
91 | nop | ||
92 | #if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT) | ||
93 | PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */ | ||
94 | #else | ||
95 | PTR_SUBU a0, a0, 12 | ||
96 | #endif | ||
97 | |||
98 | .globl ftrace_call | ||
99 | ftrace_call: | ||
100 | nop /* a placeholder for the call to a real tracing function */ | ||
101 | move a1, AT /* arg2: parent's return address */ | ||
102 | |||
103 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
104 | .globl ftrace_graph_call | ||
105 | ftrace_graph_call: | ||
106 | nop | ||
107 | nop | ||
108 | #endif | ||
109 | |||
110 | MCOUNT_RESTORE_REGS | ||
111 | .globl ftrace_stub | ||
112 | ftrace_stub: | ||
113 | RETURN_BACK | ||
114 | END(ftrace_caller) | ||
115 | |||
116 | #else /* ! CONFIG_DYNAMIC_FTRACE */ | ||
117 | |||
118 | NESTED(_mcount, PT_SIZE, ra) | ||
119 | EXPORT_SYMBOL(_mcount) | ||
120 | PTR_LA t1, ftrace_stub | ||
121 | PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */ | ||
122 | beq t1, t2, fgraph_trace | ||
123 | nop | ||
124 | |||
125 | MCOUNT_SAVE_REGS | ||
126 | |||
127 | move a0, ra /* arg1: self return address */ | ||
128 | jalr t2 /* (1) call *ftrace_trace_function */ | ||
129 | move a1, AT /* arg2: parent's return address */ | ||
130 | |||
131 | MCOUNT_RESTORE_REGS | ||
132 | |||
133 | fgraph_trace: | ||
134 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
135 | PTR_LA t1, ftrace_stub | ||
136 | PTR_L t3, ftrace_graph_return | ||
137 | bne t1, t3, ftrace_graph_caller | ||
138 | nop | ||
139 | PTR_LA t1, ftrace_graph_entry_stub | ||
140 | PTR_L t3, ftrace_graph_entry | ||
141 | bne t1, t3, ftrace_graph_caller | ||
142 | nop | ||
143 | #endif | ||
144 | |||
145 | #ifdef CONFIG_32BIT | ||
146 | addiu sp, sp, 8 | ||
147 | #endif | ||
148 | |||
149 | .globl ftrace_stub | ||
150 | ftrace_stub: | ||
151 | RETURN_BACK | ||
152 | END(_mcount) | ||
153 | |||
154 | #endif /* ! CONFIG_DYNAMIC_FTRACE */ | ||
155 | |||
156 | #ifdef CONFIG_FUNCTION_GRAPH_TRACER | ||
157 | |||
158 | NESTED(ftrace_graph_caller, PT_SIZE, ra) | ||
159 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
160 | MCOUNT_SAVE_REGS | ||
161 | #endif | ||
162 | |||
163 | /* arg1: Get the location of the parent's return address */ | ||
164 | #ifdef KBUILD_MCOUNT_RA_ADDRESS | ||
165 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
166 | PTR_L a0, PT_R12(sp) | ||
167 | #else | ||
168 | move a0, MCOUNT_RA_ADDRESS_REG | ||
169 | #endif | ||
170 | bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */ | ||
171 | nop | ||
172 | #endif | ||
173 | PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */ | ||
174 | 1: | ||
175 | |||
176 | /* arg2: Get self return address */ | ||
177 | #ifdef CONFIG_DYNAMIC_FTRACE | ||
178 | PTR_L a1, PT_R31(sp) | ||
179 | #else | ||
180 | move a1, ra | ||
181 | #endif | ||
182 | |||
183 | /* arg3: Get frame pointer of current stack */ | ||
184 | #ifdef CONFIG_64BIT | ||
185 | PTR_LA a2, PT_SIZE(sp) | ||
186 | #else | ||
187 | PTR_LA a2, (PT_SIZE+8)(sp) | ||
188 | #endif | ||
189 | |||
190 | jal prepare_ftrace_return | ||
191 | nop | ||
192 | MCOUNT_RESTORE_REGS | ||
193 | #ifndef CONFIG_DYNAMIC_FTRACE | ||
194 | #ifdef CONFIG_32BIT | ||
195 | addiu sp, sp, 8 | ||
196 | #endif | ||
197 | #endif | ||
198 | RETURN_BACK | ||
199 | END(ftrace_graph_caller) | ||
200 | |||
201 | .align 2 | ||
202 | .globl return_to_handler | ||
203 | return_to_handler: | ||
204 | PTR_SUBU sp, PT_SIZE | ||
205 | PTR_S v0, PT_R2(sp) | ||
206 | |||
207 | jal ftrace_return_to_handler | ||
208 | PTR_S v1, PT_R3(sp) | ||
209 | |||
210 | /* restore the real parent address: v0 -> ra */ | ||
211 | move ra, v0 | ||
212 | |||
213 | PTR_L v0, PT_R2(sp) | ||
214 | PTR_L v1, PT_R3(sp) | ||
215 | jr ra | ||
216 | PTR_ADDIU sp, PT_SIZE | ||
217 | #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ | ||
218 | |||
219 | .set at | ||
220 | .set reorder | ||
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c new file mode 100644 index 000000000..72c8374a3 --- /dev/null +++ b/arch/mips/kernel/mips-cm.c | |||
@@ -0,0 +1,514 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2013 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/errno.h> | ||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | |||
11 | #include <asm/mips-cps.h> | ||
12 | #include <asm/mipsregs.h> | ||
13 | |||
14 | void __iomem *mips_gcr_base; | ||
15 | void __iomem *mips_cm_l2sync_base; | ||
16 | int mips_cm_is64; | ||
17 | |||
18 | static char *cm2_tr[8] = { | ||
19 | "mem", "gcr", "gic", "mmio", | ||
20 | "0x04", "cpc", "0x06", "0x07" | ||
21 | }; | ||
22 | |||
23 | /* CM3 Tag ECC transaction type */ | ||
24 | static char *cm3_tr[16] = { | ||
25 | [0x0] = "ReqNoData", | ||
26 | [0x1] = "0x1", | ||
27 | [0x2] = "ReqWData", | ||
28 | [0x3] = "0x3", | ||
29 | [0x4] = "IReqNoResp", | ||
30 | [0x5] = "IReqWResp", | ||
31 | [0x6] = "IReqNoRespDat", | ||
32 | [0x7] = "IReqWRespDat", | ||
33 | [0x8] = "RespNoData", | ||
34 | [0x9] = "RespDataFol", | ||
35 | [0xa] = "RespWData", | ||
36 | [0xb] = "RespDataOnly", | ||
37 | [0xc] = "IRespNoData", | ||
38 | [0xd] = "IRespDataFol", | ||
39 | [0xe] = "IRespWData", | ||
40 | [0xf] = "IRespDataOnly" | ||
41 | }; | ||
42 | |||
43 | static char *cm2_cmd[32] = { | ||
44 | [0x00] = "0x00", | ||
45 | [0x01] = "Legacy Write", | ||
46 | [0x02] = "Legacy Read", | ||
47 | [0x03] = "0x03", | ||
48 | [0x04] = "0x04", | ||
49 | [0x05] = "0x05", | ||
50 | [0x06] = "0x06", | ||
51 | [0x07] = "0x07", | ||
52 | [0x08] = "Coherent Read Own", | ||
53 | [0x09] = "Coherent Read Share", | ||
54 | [0x0a] = "Coherent Read Discard", | ||
55 | [0x0b] = "Coherent Ready Share Always", | ||
56 | [0x0c] = "Coherent Upgrade", | ||
57 | [0x0d] = "Coherent Writeback", | ||
58 | [0x0e] = "0x0e", | ||
59 | [0x0f] = "0x0f", | ||
60 | [0x10] = "Coherent Copyback", | ||
61 | [0x11] = "Coherent Copyback Invalidate", | ||
62 | [0x12] = "Coherent Invalidate", | ||
63 | [0x13] = "Coherent Write Invalidate", | ||
64 | [0x14] = "Coherent Completion Sync", | ||
65 | [0x15] = "0x15", | ||
66 | [0x16] = "0x16", | ||
67 | [0x17] = "0x17", | ||
68 | [0x18] = "0x18", | ||
69 | [0x19] = "0x19", | ||
70 | [0x1a] = "0x1a", | ||
71 | [0x1b] = "0x1b", | ||
72 | [0x1c] = "0x1c", | ||
73 | [0x1d] = "0x1d", | ||
74 | [0x1e] = "0x1e", | ||
75 | [0x1f] = "0x1f" | ||
76 | }; | ||
77 | |||
78 | /* CM3 Tag ECC command type */ | ||
79 | static char *cm3_cmd[16] = { | ||
80 | [0x0] = "Legacy Read", | ||
81 | [0x1] = "Legacy Write", | ||
82 | [0x2] = "Coherent Read Own", | ||
83 | [0x3] = "Coherent Read Share", | ||
84 | [0x4] = "Coherent Read Discard", | ||
85 | [0x5] = "Coherent Evicted", | ||
86 | [0x6] = "Coherent Upgrade", | ||
87 | [0x7] = "Coherent Upgrade for Store Conditional", | ||
88 | [0x8] = "Coherent Writeback", | ||
89 | [0x9] = "Coherent Write Invalidate", | ||
90 | [0xa] = "0xa", | ||
91 | [0xb] = "0xb", | ||
92 | [0xc] = "0xc", | ||
93 | [0xd] = "0xd", | ||
94 | [0xe] = "0xe", | ||
95 | [0xf] = "0xf" | ||
96 | }; | ||
97 | |||
98 | /* CM3 Tag ECC command group */ | ||
99 | static char *cm3_cmd_group[8] = { | ||
100 | [0x0] = "Normal", | ||
101 | [0x1] = "Registers", | ||
102 | [0x2] = "TLB", | ||
103 | [0x3] = "0x3", | ||
104 | [0x4] = "L1I", | ||
105 | [0x5] = "L1D", | ||
106 | [0x6] = "L3", | ||
107 | [0x7] = "L2" | ||
108 | }; | ||
109 | |||
110 | static char *cm2_core[8] = { | ||
111 | "Invalid/OK", "Invalid/Data", | ||
112 | "Shared/OK", "Shared/Data", | ||
113 | "Modified/OK", "Modified/Data", | ||
114 | "Exclusive/OK", "Exclusive/Data" | ||
115 | }; | ||
116 | |||
117 | static char *cm2_l2_type[4] = { | ||
118 | [0x0] = "None", | ||
119 | [0x1] = "Tag RAM single/double ECC error", | ||
120 | [0x2] = "Data RAM single/double ECC error", | ||
121 | [0x3] = "WS RAM uncorrectable dirty parity" | ||
122 | }; | ||
123 | |||
124 | static char *cm2_l2_instr[32] = { | ||
125 | [0x00] = "L2_NOP", | ||
126 | [0x01] = "L2_ERR_CORR", | ||
127 | [0x02] = "L2_TAG_INV", | ||
128 | [0x03] = "L2_WS_CLEAN", | ||
129 | [0x04] = "L2_RD_MDYFY_WR", | ||
130 | [0x05] = "L2_WS_MRU", | ||
131 | [0x06] = "L2_EVICT_LN2", | ||
132 | [0x07] = "0x07", | ||
133 | [0x08] = "L2_EVICT", | ||
134 | [0x09] = "L2_REFL", | ||
135 | [0x0a] = "L2_RD", | ||
136 | [0x0b] = "L2_WR", | ||
137 | [0x0c] = "L2_EVICT_MRU", | ||
138 | [0x0d] = "L2_SYNC", | ||
139 | [0x0e] = "L2_REFL_ERR", | ||
140 | [0x0f] = "0x0f", | ||
141 | [0x10] = "L2_INDX_WB_INV", | ||
142 | [0x11] = "L2_INDX_LD_TAG", | ||
143 | [0x12] = "L2_INDX_ST_TAG", | ||
144 | [0x13] = "L2_INDX_ST_DATA", | ||
145 | [0x14] = "L2_INDX_ST_ECC", | ||
146 | [0x15] = "0x15", | ||
147 | [0x16] = "0x16", | ||
148 | [0x17] = "0x17", | ||
149 | [0x18] = "L2_FTCH_AND_LCK", | ||
150 | [0x19] = "L2_HIT_INV", | ||
151 | [0x1a] = "L2_HIT_WB_INV", | ||
152 | [0x1b] = "L2_HIT_WB", | ||
153 | [0x1c] = "0x1c", | ||
154 | [0x1d] = "0x1d", | ||
155 | [0x1e] = "0x1e", | ||
156 | [0x1f] = "0x1f" | ||
157 | }; | ||
158 | |||
159 | static char *cm2_causes[32] = { | ||
160 | "None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR", | ||
161 | "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07", | ||
162 | "0x08", "0x09", "0x0a", "0x0b", | ||
163 | "0x0c", "0x0d", "0x0e", "0x0f", | ||
164 | "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13", | ||
165 | "0x14", "0x15", "0x16", "0x17", | ||
166 | "L2_RD_UNCORR", "L2_WR_UNCORR", "L2_CORR", "0x1b", | ||
167 | "0x1c", "0x1d", "0x1e", "0x1f" | ||
168 | }; | ||
169 | |||
170 | static char *cm3_causes[32] = { | ||
171 | "0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR", | ||
172 | "MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR", | ||
173 | "CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR", | ||
174 | "0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10", | ||
175 | "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18", | ||
176 | "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f" | ||
177 | }; | ||
178 | |||
179 | static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock); | ||
180 | static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags); | ||
181 | |||
182 | phys_addr_t __mips_cm_phys_base(void) | ||
183 | { | ||
184 | u32 config3 = read_c0_config3(); | ||
185 | unsigned long cmgcr; | ||
186 | |||
187 | /* Check the CMGCRBase register is implemented */ | ||
188 | if (!(config3 & MIPS_CONF3_CMGCR)) | ||
189 | return 0; | ||
190 | |||
191 | /* Read the address from CMGCRBase */ | ||
192 | cmgcr = read_c0_cmgcrbase(); | ||
193 | return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32); | ||
194 | } | ||
195 | |||
196 | phys_addr_t mips_cm_phys_base(void) | ||
197 | __attribute__((weak, alias("__mips_cm_phys_base"))); | ||
198 | |||
199 | phys_addr_t __mips_cm_l2sync_phys_base(void) | ||
200 | { | ||
201 | u32 base_reg; | ||
202 | |||
203 | /* | ||
204 | * If the L2-only sync region is already enabled then leave it at it's | ||
205 | * current location. | ||
206 | */ | ||
207 | base_reg = read_gcr_l2_only_sync_base(); | ||
208 | if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN) | ||
209 | return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE; | ||
210 | |||
211 | /* Default to following the CM */ | ||
212 | return mips_cm_phys_base() + MIPS_CM_GCR_SIZE; | ||
213 | } | ||
214 | |||
215 | phys_addr_t mips_cm_l2sync_phys_base(void) | ||
216 | __attribute__((weak, alias("__mips_cm_l2sync_phys_base"))); | ||
217 | |||
218 | static void mips_cm_probe_l2sync(void) | ||
219 | { | ||
220 | unsigned major_rev; | ||
221 | phys_addr_t addr; | ||
222 | |||
223 | /* L2-only sync was introduced with CM major revision 6 */ | ||
224 | major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev()); | ||
225 | if (major_rev < 6) | ||
226 | return; | ||
227 | |||
228 | /* Find a location for the L2 sync region */ | ||
229 | addr = mips_cm_l2sync_phys_base(); | ||
230 | BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE) != addr); | ||
231 | if (!addr) | ||
232 | return; | ||
233 | |||
234 | /* Set the region base address & enable it */ | ||
235 | write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN); | ||
236 | |||
237 | /* Map the region */ | ||
238 | mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE); | ||
239 | } | ||
240 | |||
241 | int mips_cm_probe(void) | ||
242 | { | ||
243 | phys_addr_t addr; | ||
244 | u32 base_reg; | ||
245 | unsigned cpu; | ||
246 | |||
247 | /* | ||
248 | * No need to probe again if we have already been | ||
249 | * here before. | ||
250 | */ | ||
251 | if (mips_gcr_base) | ||
252 | return 0; | ||
253 | |||
254 | addr = mips_cm_phys_base(); | ||
255 | BUG_ON((addr & CM_GCR_BASE_GCRBASE) != addr); | ||
256 | if (!addr) | ||
257 | return -ENODEV; | ||
258 | |||
259 | mips_gcr_base = ioremap(addr, MIPS_CM_GCR_SIZE); | ||
260 | if (!mips_gcr_base) | ||
261 | return -ENXIO; | ||
262 | |||
263 | /* sanity check that we're looking at a CM */ | ||
264 | base_reg = read_gcr_base(); | ||
265 | if ((base_reg & CM_GCR_BASE_GCRBASE) != addr) { | ||
266 | pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n", | ||
267 | (unsigned long)addr); | ||
268 | mips_gcr_base = NULL; | ||
269 | return -ENODEV; | ||
270 | } | ||
271 | |||
272 | /* set default target to memory */ | ||
273 | change_gcr_base(CM_GCR_BASE_CMDEFTGT, CM_GCR_BASE_CMDEFTGT_MEM); | ||
274 | |||
275 | /* disable CM regions */ | ||
276 | write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR); | ||
277 | write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK); | ||
278 | write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR); | ||
279 | write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK); | ||
280 | write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR); | ||
281 | write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK); | ||
282 | write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR); | ||
283 | write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK); | ||
284 | |||
285 | /* probe for an L2-only sync region */ | ||
286 | mips_cm_probe_l2sync(); | ||
287 | |||
288 | /* determine register width for this CM */ | ||
289 | mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3); | ||
290 | |||
291 | for_each_possible_cpu(cpu) | ||
292 | spin_lock_init(&per_cpu(cm_core_lock, cpu)); | ||
293 | |||
294 | return 0; | ||
295 | } | ||
296 | |||
297 | void mips_cm_lock_other(unsigned int cluster, unsigned int core, | ||
298 | unsigned int vp, unsigned int block) | ||
299 | { | ||
300 | unsigned int curr_core, cm_rev; | ||
301 | u32 val; | ||
302 | |||
303 | cm_rev = mips_cm_revision(); | ||
304 | preempt_disable(); | ||
305 | |||
306 | if (cm_rev >= CM_REV_CM3) { | ||
307 | val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) | | ||
308 | FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp); | ||
309 | |||
310 | if (cm_rev >= CM_REV_CM3_5) { | ||
311 | val |= CM_GCR_Cx_OTHER_CLUSTER_EN; | ||
312 | val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster); | ||
313 | val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block); | ||
314 | } else { | ||
315 | WARN_ON(cluster != 0); | ||
316 | WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
317 | } | ||
318 | |||
319 | /* | ||
320 | * We need to disable interrupts in SMP systems in order to | ||
321 | * ensure that we don't interrupt the caller with code which | ||
322 | * may modify the redirect register. We do so here in a | ||
323 | * slightly obscure way by using a spin lock, since this has | ||
324 | * the neat property of also catching any nested uses of | ||
325 | * mips_cm_lock_other() leading to a deadlock or a nice warning | ||
326 | * with lockdep enabled. | ||
327 | */ | ||
328 | spin_lock_irqsave(this_cpu_ptr(&cm_core_lock), | ||
329 | *this_cpu_ptr(&cm_core_lock_flags)); | ||
330 | } else { | ||
331 | WARN_ON(cluster != 0); | ||
332 | WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
333 | |||
334 | /* | ||
335 | * We only have a GCR_CL_OTHER per core in systems with | ||
336 | * CM 2.5 & older, so have to ensure other VP(E)s don't | ||
337 | * race with us. | ||
338 | */ | ||
339 | curr_core = cpu_core(¤t_cpu_data); | ||
340 | spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core), | ||
341 | per_cpu(cm_core_lock_flags, curr_core)); | ||
342 | |||
343 | val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core); | ||
344 | } | ||
345 | |||
346 | write_gcr_cl_other(val); | ||
347 | |||
348 | /* | ||
349 | * Ensure the core-other region reflects the appropriate core & | ||
350 | * VP before any accesses to it occur. | ||
351 | */ | ||
352 | mb(); | ||
353 | } | ||
354 | |||
355 | void mips_cm_unlock_other(void) | ||
356 | { | ||
357 | unsigned int curr_core; | ||
358 | |||
359 | if (mips_cm_revision() < CM_REV_CM3) { | ||
360 | curr_core = cpu_core(¤t_cpu_data); | ||
361 | spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core), | ||
362 | per_cpu(cm_core_lock_flags, curr_core)); | ||
363 | } else { | ||
364 | spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock), | ||
365 | *this_cpu_ptr(&cm_core_lock_flags)); | ||
366 | } | ||
367 | |||
368 | preempt_enable(); | ||
369 | } | ||
370 | |||
371 | void mips_cm_error_report(void) | ||
372 | { | ||
373 | u64 cm_error, cm_addr, cm_other; | ||
374 | unsigned long revision; | ||
375 | int ocause, cause; | ||
376 | char buf[256]; | ||
377 | |||
378 | if (!mips_cm_present()) | ||
379 | return; | ||
380 | |||
381 | revision = mips_cm_revision(); | ||
382 | cm_error = read_gcr_error_cause(); | ||
383 | cm_addr = read_gcr_error_addr(); | ||
384 | cm_other = read_gcr_error_mult(); | ||
385 | |||
386 | if (revision < CM_REV_CM3) { /* CM2 */ | ||
387 | cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error); | ||
388 | ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other); | ||
389 | |||
390 | if (!cause) | ||
391 | return; | ||
392 | |||
393 | if (cause < 16) { | ||
394 | unsigned long cca_bits = (cm_error >> 15) & 7; | ||
395 | unsigned long tr_bits = (cm_error >> 12) & 7; | ||
396 | unsigned long cmd_bits = (cm_error >> 7) & 0x1f; | ||
397 | unsigned long stag_bits = (cm_error >> 3) & 15; | ||
398 | unsigned long sport_bits = (cm_error >> 0) & 7; | ||
399 | |||
400 | snprintf(buf, sizeof(buf), | ||
401 | "CCA=%lu TR=%s MCmd=%s STag=%lu " | ||
402 | "SPort=%lu\n", cca_bits, cm2_tr[tr_bits], | ||
403 | cm2_cmd[cmd_bits], stag_bits, sport_bits); | ||
404 | } else if (cause < 24) { | ||
405 | /* glob state & sresp together */ | ||
406 | unsigned long c3_bits = (cm_error >> 18) & 7; | ||
407 | unsigned long c2_bits = (cm_error >> 15) & 7; | ||
408 | unsigned long c1_bits = (cm_error >> 12) & 7; | ||
409 | unsigned long c0_bits = (cm_error >> 9) & 7; | ||
410 | unsigned long sc_bit = (cm_error >> 8) & 1; | ||
411 | unsigned long cmd_bits = (cm_error >> 3) & 0x1f; | ||
412 | unsigned long sport_bits = (cm_error >> 0) & 7; | ||
413 | |||
414 | snprintf(buf, sizeof(buf), | ||
415 | "C3=%s C2=%s C1=%s C0=%s SC=%s " | ||
416 | "MCmd=%s SPort=%lu\n", | ||
417 | cm2_core[c3_bits], cm2_core[c2_bits], | ||
418 | cm2_core[c1_bits], cm2_core[c0_bits], | ||
419 | sc_bit ? "True" : "False", | ||
420 | cm2_cmd[cmd_bits], sport_bits); | ||
421 | } else { | ||
422 | unsigned long muc_bit = (cm_error >> 23) & 1; | ||
423 | unsigned long ins_bits = (cm_error >> 18) & 0x1f; | ||
424 | unsigned long arr_bits = (cm_error >> 16) & 3; | ||
425 | unsigned long dw_bits = (cm_error >> 12) & 15; | ||
426 | unsigned long way_bits = (cm_error >> 9) & 7; | ||
427 | unsigned long mway_bit = (cm_error >> 8) & 1; | ||
428 | unsigned long syn_bits = (cm_error >> 0) & 0xFF; | ||
429 | |||
430 | snprintf(buf, sizeof(buf), | ||
431 | "Type=%s%s Instr=%s DW=%lu Way=%lu " | ||
432 | "MWay=%s Syndrome=0x%02lx", | ||
433 | muc_bit ? "Multi-UC " : "", | ||
434 | cm2_l2_type[arr_bits], | ||
435 | cm2_l2_instr[ins_bits], dw_bits, way_bits, | ||
436 | mway_bit ? "True" : "False", syn_bits); | ||
437 | } | ||
438 | pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error, | ||
439 | cm2_causes[cause], buf); | ||
440 | pr_err("CM_ADDR =%08llx\n", cm_addr); | ||
441 | pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]); | ||
442 | } else { /* CM3 */ | ||
443 | ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits; | ||
444 | ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit; | ||
445 | |||
446 | cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error); | ||
447 | ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other); | ||
448 | |||
449 | if (!cause) | ||
450 | return; | ||
451 | |||
452 | /* Used by cause == {1,2,3} */ | ||
453 | core_id_bits = (cm_error >> 22) & 0xf; | ||
454 | vp_id_bits = (cm_error >> 18) & 0xf; | ||
455 | cmd_bits = (cm_error >> 14) & 0xf; | ||
456 | cmd_group_bits = (cm_error >> 11) & 0xf; | ||
457 | cm3_cca_bits = (cm_error >> 8) & 7; | ||
458 | mcp_bits = (cm_error >> 5) & 0xf; | ||
459 | cm3_tr_bits = (cm_error >> 1) & 0xf; | ||
460 | sched_bit = cm_error & 0x1; | ||
461 | |||
462 | if (cause == 1 || cause == 3) { /* Tag ECC */ | ||
463 | unsigned long tag_ecc = (cm_error >> 57) & 0x1; | ||
464 | unsigned long tag_way_bits = (cm_error >> 29) & 0xffff; | ||
465 | unsigned long dword_bits = (cm_error >> 49) & 0xff; | ||
466 | unsigned long data_way_bits = (cm_error >> 45) & 0xf; | ||
467 | unsigned long data_sets_bits = (cm_error >> 29) & 0xfff; | ||
468 | unsigned long bank_bit = (cm_error >> 28) & 0x1; | ||
469 | snprintf(buf, sizeof(buf), | ||
470 | "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)" | ||
471 | "Bank=%lu CoreID=%lu VPID=%lu Command=%s" | ||
472 | "Command Group=%s CCA=%lu MCP=%d" | ||
473 | "Transaction type=%s Scheduler=%lu\n", | ||
474 | tag_ecc ? "TAG" : "DATA", | ||
475 | tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 : | ||
476 | data_way_bits, bank_bit, dword_bits, | ||
477 | data_sets_bits, | ||
478 | core_id_bits, vp_id_bits, | ||
479 | cm3_cmd[cmd_bits], | ||
480 | cm3_cmd_group[cmd_group_bits], | ||
481 | cm3_cca_bits, 1 << mcp_bits, | ||
482 | cm3_tr[cm3_tr_bits], sched_bit); | ||
483 | } else if (cause == 2) { | ||
484 | unsigned long data_error_type = (cm_error >> 41) & 0xfff; | ||
485 | unsigned long data_decode_cmd = (cm_error >> 37) & 0xf; | ||
486 | unsigned long data_decode_group = (cm_error >> 34) & 0x7; | ||
487 | unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f; | ||
488 | |||
489 | snprintf(buf, sizeof(buf), | ||
490 | "Decode Request Error: Type=%lu, Command=%lu" | ||
491 | "Command Group=%lu Destination ID=%lu" | ||
492 | "CoreID=%lu VPID=%lu Command=%s" | ||
493 | "Command Group=%s CCA=%lu MCP=%d" | ||
494 | "Transaction type=%s Scheduler=%lu\n", | ||
495 | data_error_type, data_decode_cmd, | ||
496 | data_decode_group, data_decode_destination_id, | ||
497 | core_id_bits, vp_id_bits, | ||
498 | cm3_cmd[cmd_bits], | ||
499 | cm3_cmd_group[cmd_group_bits], | ||
500 | cm3_cca_bits, 1 << mcp_bits, | ||
501 | cm3_tr[cm3_tr_bits], sched_bit); | ||
502 | } else { | ||
503 | buf[0] = 0; | ||
504 | } | ||
505 | |||
506 | pr_err("CM_ERROR=%llx %s <%s>\n", cm_error, | ||
507 | cm3_causes[cause], buf); | ||
508 | pr_err("CM_ADDR =%llx\n", cm_addr); | ||
509 | pr_err("CM_OTHER=%llx %s\n", cm_other, cm3_causes[ocause]); | ||
510 | } | ||
511 | |||
512 | /* reprime cause register */ | ||
513 | write_gcr_error_cause(cm_error); | ||
514 | } | ||
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c new file mode 100644 index 000000000..d005be84c --- /dev/null +++ b/arch/mips/kernel/mips-cpc.c | |||
@@ -0,0 +1,122 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2013 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/errno.h> | ||
8 | #include <linux/percpu.h> | ||
9 | #include <linux/of.h> | ||
10 | #include <linux/of_address.h> | ||
11 | #include <linux/spinlock.h> | ||
12 | |||
13 | #include <asm/mips-cps.h> | ||
14 | |||
15 | void __iomem *mips_cpc_base; | ||
16 | |||
17 | static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock); | ||
18 | |||
19 | static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags); | ||
20 | |||
21 | phys_addr_t __weak mips_cpc_default_phys_base(void) | ||
22 | { | ||
23 | struct device_node *cpc_node; | ||
24 | struct resource res; | ||
25 | int err; | ||
26 | |||
27 | cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc"); | ||
28 | if (cpc_node) { | ||
29 | err = of_address_to_resource(cpc_node, 0, &res); | ||
30 | of_node_put(cpc_node); | ||
31 | if (!err) | ||
32 | return res.start; | ||
33 | } | ||
34 | |||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | /** | ||
39 | * mips_cpc_phys_base - retrieve the physical base address of the CPC | ||
40 | * | ||
41 | * This function returns the physical base address of the Cluster Power | ||
42 | * Controller memory mapped registers, or 0 if no Cluster Power Controller | ||
43 | * is present. | ||
44 | */ | ||
45 | static phys_addr_t mips_cpc_phys_base(void) | ||
46 | { | ||
47 | unsigned long cpc_base; | ||
48 | |||
49 | if (!mips_cm_present()) | ||
50 | return 0; | ||
51 | |||
52 | if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX)) | ||
53 | return 0; | ||
54 | |||
55 | /* If the CPC is already enabled, leave it so */ | ||
56 | cpc_base = read_gcr_cpc_base(); | ||
57 | if (cpc_base & CM_GCR_CPC_BASE_CPCEN) | ||
58 | return cpc_base & CM_GCR_CPC_BASE_CPCBASE; | ||
59 | |||
60 | /* Otherwise, use the default address */ | ||
61 | cpc_base = mips_cpc_default_phys_base(); | ||
62 | if (!cpc_base) | ||
63 | return cpc_base; | ||
64 | |||
65 | /* Enable the CPC, mapped at the default address */ | ||
66 | write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN); | ||
67 | return cpc_base; | ||
68 | } | ||
69 | |||
70 | int mips_cpc_probe(void) | ||
71 | { | ||
72 | phys_addr_t addr; | ||
73 | unsigned int cpu; | ||
74 | |||
75 | for_each_possible_cpu(cpu) | ||
76 | spin_lock_init(&per_cpu(cpc_core_lock, cpu)); | ||
77 | |||
78 | addr = mips_cpc_phys_base(); | ||
79 | if (!addr) | ||
80 | return -ENODEV; | ||
81 | |||
82 | mips_cpc_base = ioremap(addr, 0x8000); | ||
83 | if (!mips_cpc_base) | ||
84 | return -ENXIO; | ||
85 | |||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | void mips_cpc_lock_other(unsigned int core) | ||
90 | { | ||
91 | unsigned int curr_core; | ||
92 | |||
93 | if (mips_cm_revision() >= CM_REV_CM3) | ||
94 | /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ | ||
95 | return; | ||
96 | |||
97 | preempt_disable(); | ||
98 | curr_core = cpu_core(¤t_cpu_data); | ||
99 | spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core), | ||
100 | per_cpu(cpc_core_lock_flags, curr_core)); | ||
101 | write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM)); | ||
102 | |||
103 | /* | ||
104 | * Ensure the core-other region reflects the appropriate core & | ||
105 | * VP before any accesses to it occur. | ||
106 | */ | ||
107 | mb(); | ||
108 | } | ||
109 | |||
110 | void mips_cpc_unlock_other(void) | ||
111 | { | ||
112 | unsigned int curr_core; | ||
113 | |||
114 | if (mips_cm_revision() >= CM_REV_CM3) | ||
115 | /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */ | ||
116 | return; | ||
117 | |||
118 | curr_core = cpu_core(¤t_cpu_data); | ||
119 | spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core), | ||
120 | per_cpu(cpc_core_lock_flags, curr_core)); | ||
121 | preempt_enable(); | ||
122 | } | ||
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c new file mode 100644 index 000000000..6c590ef27 --- /dev/null +++ b/arch/mips/kernel/mips-mt-fpaff.c | |||
@@ -0,0 +1,219 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * General MIPS MT support routines, usable in AP/SP and SMVP. | ||
4 | * Copyright (C) 2005 Mips Technologies, Inc | ||
5 | */ | ||
6 | #include <linux/cpu.h> | ||
7 | #include <linux/cpuset.h> | ||
8 | #include <linux/cpumask.h> | ||
9 | #include <linux/delay.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/sched/task.h> | ||
14 | #include <linux/cred.h> | ||
15 | #include <linux/security.h> | ||
16 | #include <linux/types.h> | ||
17 | #include <linux/uaccess.h> | ||
18 | |||
19 | /* | ||
20 | * CPU mask used to set process affinity for MT VPEs/TCs with FPUs | ||
21 | */ | ||
22 | cpumask_t mt_fpu_cpumask; | ||
23 | |||
24 | static int fpaff_threshold = -1; | ||
25 | unsigned long mt_fpemul_threshold; | ||
26 | |||
27 | /* | ||
28 | * Replacement functions for the sys_sched_setaffinity() and | ||
29 | * sys_sched_getaffinity() system calls, so that we can integrate | ||
30 | * FPU affinity with the user's requested processor affinity. | ||
31 | * This code is 98% identical with the sys_sched_setaffinity() | ||
32 | * and sys_sched_getaffinity() system calls, and should be | ||
33 | * updated when kernel/sched/core.c changes. | ||
34 | */ | ||
35 | |||
36 | /* | ||
37 | * find_process_by_pid - find a process with a matching PID value. | ||
38 | * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so | ||
39 | * cloned here. | ||
40 | */ | ||
41 | static inline struct task_struct *find_process_by_pid(pid_t pid) | ||
42 | { | ||
43 | return pid ? find_task_by_vpid(pid) : current; | ||
44 | } | ||
45 | |||
46 | /* | ||
47 | * check the target process has a UID that matches the current process's | ||
48 | */ | ||
49 | static bool check_same_owner(struct task_struct *p) | ||
50 | { | ||
51 | const struct cred *cred = current_cred(), *pcred; | ||
52 | bool match; | ||
53 | |||
54 | rcu_read_lock(); | ||
55 | pcred = __task_cred(p); | ||
56 | match = (uid_eq(cred->euid, pcred->euid) || | ||
57 | uid_eq(cred->euid, pcred->uid)); | ||
58 | rcu_read_unlock(); | ||
59 | return match; | ||
60 | } | ||
61 | |||
62 | /* | ||
63 | * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process | ||
64 | */ | ||
65 | asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len, | ||
66 | unsigned long __user *user_mask_ptr) | ||
67 | { | ||
68 | cpumask_var_t cpus_allowed, new_mask, effective_mask; | ||
69 | struct thread_info *ti; | ||
70 | struct task_struct *p; | ||
71 | int retval; | ||
72 | |||
73 | if (len < sizeof(new_mask)) | ||
74 | return -EINVAL; | ||
75 | |||
76 | if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask))) | ||
77 | return -EFAULT; | ||
78 | |||
79 | get_online_cpus(); | ||
80 | rcu_read_lock(); | ||
81 | |||
82 | p = find_process_by_pid(pid); | ||
83 | if (!p) { | ||
84 | rcu_read_unlock(); | ||
85 | put_online_cpus(); | ||
86 | return -ESRCH; | ||
87 | } | ||
88 | |||
89 | /* Prevent p going away */ | ||
90 | get_task_struct(p); | ||
91 | rcu_read_unlock(); | ||
92 | |||
93 | if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) { | ||
94 | retval = -ENOMEM; | ||
95 | goto out_put_task; | ||
96 | } | ||
97 | if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) { | ||
98 | retval = -ENOMEM; | ||
99 | goto out_free_cpus_allowed; | ||
100 | } | ||
101 | if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) { | ||
102 | retval = -ENOMEM; | ||
103 | goto out_free_new_mask; | ||
104 | } | ||
105 | if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) { | ||
106 | retval = -EPERM; | ||
107 | goto out_unlock; | ||
108 | } | ||
109 | |||
110 | retval = security_task_setscheduler(p); | ||
111 | if (retval) | ||
112 | goto out_unlock; | ||
113 | |||
114 | /* Record new user-specified CPU set for future reference */ | ||
115 | cpumask_copy(&p->thread.user_cpus_allowed, new_mask); | ||
116 | |||
117 | again: | ||
118 | /* Compute new global allowed CPU set if necessary */ | ||
119 | ti = task_thread_info(p); | ||
120 | if (test_ti_thread_flag(ti, TIF_FPUBOUND) && | ||
121 | cpumask_intersects(new_mask, &mt_fpu_cpumask)) { | ||
122 | cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask); | ||
123 | retval = set_cpus_allowed_ptr(p, effective_mask); | ||
124 | } else { | ||
125 | cpumask_copy(effective_mask, new_mask); | ||
126 | clear_ti_thread_flag(ti, TIF_FPUBOUND); | ||
127 | retval = set_cpus_allowed_ptr(p, new_mask); | ||
128 | } | ||
129 | |||
130 | if (!retval) { | ||
131 | cpuset_cpus_allowed(p, cpus_allowed); | ||
132 | if (!cpumask_subset(effective_mask, cpus_allowed)) { | ||
133 | /* | ||
134 | * We must have raced with a concurrent cpuset | ||
135 | * update. Just reset the cpus_allowed to the | ||
136 | * cpuset's cpus_allowed | ||
137 | */ | ||
138 | cpumask_copy(new_mask, cpus_allowed); | ||
139 | goto again; | ||
140 | } | ||
141 | } | ||
142 | out_unlock: | ||
143 | free_cpumask_var(effective_mask); | ||
144 | out_free_new_mask: | ||
145 | free_cpumask_var(new_mask); | ||
146 | out_free_cpus_allowed: | ||
147 | free_cpumask_var(cpus_allowed); | ||
148 | out_put_task: | ||
149 | put_task_struct(p); | ||
150 | put_online_cpus(); | ||
151 | return retval; | ||
152 | } | ||
153 | |||
154 | /* | ||
155 | * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process | ||
156 | */ | ||
157 | asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len, | ||
158 | unsigned long __user *user_mask_ptr) | ||
159 | { | ||
160 | unsigned int real_len; | ||
161 | cpumask_t allowed, mask; | ||
162 | int retval; | ||
163 | struct task_struct *p; | ||
164 | |||
165 | real_len = sizeof(mask); | ||
166 | if (len < real_len) | ||
167 | return -EINVAL; | ||
168 | |||
169 | get_online_cpus(); | ||
170 | rcu_read_lock(); | ||
171 | |||
172 | retval = -ESRCH; | ||
173 | p = find_process_by_pid(pid); | ||
174 | if (!p) | ||
175 | goto out_unlock; | ||
176 | retval = security_task_getscheduler(p); | ||
177 | if (retval) | ||
178 | goto out_unlock; | ||
179 | |||
180 | cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr); | ||
181 | cpumask_and(&mask, &allowed, cpu_active_mask); | ||
182 | |||
183 | out_unlock: | ||
184 | rcu_read_unlock(); | ||
185 | put_online_cpus(); | ||
186 | if (retval) | ||
187 | return retval; | ||
188 | if (copy_to_user(user_mask_ptr, &mask, real_len)) | ||
189 | return -EFAULT; | ||
190 | return real_len; | ||
191 | } | ||
192 | |||
193 | |||
194 | static int __init fpaff_thresh(char *str) | ||
195 | { | ||
196 | get_option(&str, &fpaff_threshold); | ||
197 | return 1; | ||
198 | } | ||
199 | __setup("fpaff=", fpaff_thresh); | ||
200 | |||
201 | /* | ||
202 | * FPU Use Factor empirically derived from experiments on 34K | ||
203 | */ | ||
204 | #define FPUSEFACTOR 2000 | ||
205 | |||
206 | static __init int mt_fp_affinity_init(void) | ||
207 | { | ||
208 | if (fpaff_threshold >= 0) { | ||
209 | mt_fpemul_threshold = fpaff_threshold; | ||
210 | } else { | ||
211 | mt_fpemul_threshold = | ||
212 | (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ; | ||
213 | } | ||
214 | printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n", | ||
215 | mt_fpemul_threshold); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | arch_initcall(mt_fp_affinity_init); | ||
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c new file mode 100644 index 000000000..d5f7362e8 --- /dev/null +++ b/arch/mips/kernel/mips-mt.c | |||
@@ -0,0 +1,246 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * General MIPS MT support routines, usable in AP/SP and SMVP. | ||
4 | * Copyright (C) 2005 Mips Technologies, Inc | ||
5 | */ | ||
6 | |||
7 | #include <linux/device.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/export.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/security.h> | ||
13 | |||
14 | #include <asm/cpu.h> | ||
15 | #include <asm/processor.h> | ||
16 | #include <linux/atomic.h> | ||
17 | #include <asm/hardirq.h> | ||
18 | #include <asm/mmu_context.h> | ||
19 | #include <asm/mipsmtregs.h> | ||
20 | #include <asm/r4kcache.h> | ||
21 | #include <asm/cacheflush.h> | ||
22 | |||
23 | int vpelimit; | ||
24 | |||
25 | static int __init maxvpes(char *str) | ||
26 | { | ||
27 | get_option(&str, &vpelimit); | ||
28 | |||
29 | return 1; | ||
30 | } | ||
31 | |||
32 | __setup("maxvpes=", maxvpes); | ||
33 | |||
34 | int tclimit; | ||
35 | |||
36 | static int __init maxtcs(char *str) | ||
37 | { | ||
38 | get_option(&str, &tclimit); | ||
39 | |||
40 | return 1; | ||
41 | } | ||
42 | |||
43 | __setup("maxtcs=", maxtcs); | ||
44 | |||
45 | /* | ||
46 | * Dump new MIPS MT state for the core. Does not leave TCs halted. | ||
47 | * Takes an argument which taken to be a pre-call MVPControl value. | ||
48 | */ | ||
49 | |||
50 | void mips_mt_regdump(unsigned long mvpctl) | ||
51 | { | ||
52 | unsigned long flags; | ||
53 | unsigned long vpflags; | ||
54 | unsigned long mvpconf0; | ||
55 | int nvpe; | ||
56 | int ntc; | ||
57 | int i; | ||
58 | int tc; | ||
59 | unsigned long haltval; | ||
60 | unsigned long tcstatval; | ||
61 | |||
62 | local_irq_save(flags); | ||
63 | vpflags = dvpe(); | ||
64 | printk("=== MIPS MT State Dump ===\n"); | ||
65 | printk("-- Global State --\n"); | ||
66 | printk(" MVPControl Passed: %08lx\n", mvpctl); | ||
67 | printk(" MVPControl Read: %08lx\n", vpflags); | ||
68 | printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0())); | ||
69 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
70 | ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1; | ||
71 | printk("-- per-VPE State --\n"); | ||
72 | for (i = 0; i < nvpe; i++) { | ||
73 | for (tc = 0; tc < ntc; tc++) { | ||
74 | settc(tc); | ||
75 | if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) { | ||
76 | printk(" VPE %d\n", i); | ||
77 | printk(" VPEControl : %08lx\n", | ||
78 | read_vpe_c0_vpecontrol()); | ||
79 | printk(" VPEConf0 : %08lx\n", | ||
80 | read_vpe_c0_vpeconf0()); | ||
81 | printk(" VPE%d.Status : %08lx\n", | ||
82 | i, read_vpe_c0_status()); | ||
83 | printk(" VPE%d.EPC : %08lx %pS\n", | ||
84 | i, read_vpe_c0_epc(), | ||
85 | (void *) read_vpe_c0_epc()); | ||
86 | printk(" VPE%d.Cause : %08lx\n", | ||
87 | i, read_vpe_c0_cause()); | ||
88 | printk(" VPE%d.Config7 : %08lx\n", | ||
89 | i, read_vpe_c0_config7()); | ||
90 | break; /* Next VPE */ | ||
91 | } | ||
92 | } | ||
93 | } | ||
94 | printk("-- per-TC State --\n"); | ||
95 | for (tc = 0; tc < ntc; tc++) { | ||
96 | settc(tc); | ||
97 | if (read_tc_c0_tcbind() == read_c0_tcbind()) { | ||
98 | /* Are we dumping ourself? */ | ||
99 | haltval = 0; /* Then we're not halted, and mustn't be */ | ||
100 | tcstatval = flags; /* And pre-dump TCStatus is flags */ | ||
101 | printk(" TC %d (current TC with VPE EPC above)\n", tc); | ||
102 | } else { | ||
103 | haltval = read_tc_c0_tchalt(); | ||
104 | write_tc_c0_tchalt(1); | ||
105 | tcstatval = read_tc_c0_tcstatus(); | ||
106 | printk(" TC %d\n", tc); | ||
107 | } | ||
108 | printk(" TCStatus : %08lx\n", tcstatval); | ||
109 | printk(" TCBind : %08lx\n", read_tc_c0_tcbind()); | ||
110 | printk(" TCRestart : %08lx %pS\n", | ||
111 | read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart()); | ||
112 | printk(" TCHalt : %08lx\n", haltval); | ||
113 | printk(" TCContext : %08lx\n", read_tc_c0_tccontext()); | ||
114 | if (!haltval) | ||
115 | write_tc_c0_tchalt(0); | ||
116 | } | ||
117 | printk("===========================\n"); | ||
118 | evpe(vpflags); | ||
119 | local_irq_restore(flags); | ||
120 | } | ||
121 | |||
122 | static int mt_opt_rpsctl = -1; | ||
123 | static int mt_opt_nblsu = -1; | ||
124 | static int mt_opt_forceconfig7; | ||
125 | static int mt_opt_config7 = -1; | ||
126 | |||
127 | static int __init rpsctl_set(char *str) | ||
128 | { | ||
129 | get_option(&str, &mt_opt_rpsctl); | ||
130 | return 1; | ||
131 | } | ||
132 | __setup("rpsctl=", rpsctl_set); | ||
133 | |||
134 | static int __init nblsu_set(char *str) | ||
135 | { | ||
136 | get_option(&str, &mt_opt_nblsu); | ||
137 | return 1; | ||
138 | } | ||
139 | __setup("nblsu=", nblsu_set); | ||
140 | |||
141 | static int __init config7_set(char *str) | ||
142 | { | ||
143 | get_option(&str, &mt_opt_config7); | ||
144 | mt_opt_forceconfig7 = 1; | ||
145 | return 1; | ||
146 | } | ||
147 | __setup("config7=", config7_set); | ||
148 | |||
149 | static unsigned int itc_base; | ||
150 | |||
151 | static int __init set_itc_base(char *str) | ||
152 | { | ||
153 | get_option(&str, &itc_base); | ||
154 | return 1; | ||
155 | } | ||
156 | |||
157 | __setup("itcbase=", set_itc_base); | ||
158 | |||
159 | void mips_mt_set_cpuoptions(void) | ||
160 | { | ||
161 | unsigned int oconfig7 = read_c0_config7(); | ||
162 | unsigned int nconfig7 = oconfig7; | ||
163 | |||
164 | if (mt_opt_rpsctl >= 0) { | ||
165 | printk("34K return prediction stack override set to %d.\n", | ||
166 | mt_opt_rpsctl); | ||
167 | if (mt_opt_rpsctl) | ||
168 | nconfig7 |= (1 << 2); | ||
169 | else | ||
170 | nconfig7 &= ~(1 << 2); | ||
171 | } | ||
172 | if (mt_opt_nblsu >= 0) { | ||
173 | printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu); | ||
174 | if (mt_opt_nblsu) | ||
175 | nconfig7 |= (1 << 5); | ||
176 | else | ||
177 | nconfig7 &= ~(1 << 5); | ||
178 | } | ||
179 | if (mt_opt_forceconfig7) { | ||
180 | printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7); | ||
181 | nconfig7 = mt_opt_config7; | ||
182 | } | ||
183 | if (oconfig7 != nconfig7) { | ||
184 | __asm__ __volatile("sync"); | ||
185 | write_c0_config7(nconfig7); | ||
186 | ehb(); | ||
187 | printk("Config7: 0x%08x\n", read_c0_config7()); | ||
188 | } | ||
189 | |||
190 | if (itc_base != 0) { | ||
191 | /* | ||
192 | * Configure ITC mapping. This code is very | ||
193 | * specific to the 34K core family, which uses | ||
194 | * a special mode bit ("ITC") in the ErrCtl | ||
195 | * register to enable access to ITC control | ||
196 | * registers via cache "tag" operations. | ||
197 | */ | ||
198 | unsigned long ectlval; | ||
199 | unsigned long itcblkgrn; | ||
200 | |||
201 | /* ErrCtl register is known as "ecc" to Linux */ | ||
202 | ectlval = read_c0_ecc(); | ||
203 | write_c0_ecc(ectlval | (0x1 << 26)); | ||
204 | ehb(); | ||
205 | #define INDEX_0 (0x80000000) | ||
206 | #define INDEX_8 (0x80000008) | ||
207 | /* Read "cache tag" for Dcache pseudo-index 8 */ | ||
208 | cache_op(Index_Load_Tag_D, INDEX_8); | ||
209 | ehb(); | ||
210 | itcblkgrn = read_c0_dtaglo(); | ||
211 | itcblkgrn &= 0xfffe0000; | ||
212 | /* Set for 128 byte pitch of ITC cells */ | ||
213 | itcblkgrn |= 0x00000c00; | ||
214 | /* Stage in Tag register */ | ||
215 | write_c0_dtaglo(itcblkgrn); | ||
216 | ehb(); | ||
217 | /* Write out to ITU with CACHE op */ | ||
218 | cache_op(Index_Store_Tag_D, INDEX_8); | ||
219 | /* Now set base address, and turn ITC on with 0x1 bit */ | ||
220 | write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 ); | ||
221 | ehb(); | ||
222 | /* Write out to ITU with CACHE op */ | ||
223 | cache_op(Index_Store_Tag_D, INDEX_0); | ||
224 | write_c0_ecc(ectlval); | ||
225 | ehb(); | ||
226 | printk("Mapped %ld ITC cells starting at 0x%08x\n", | ||
227 | ((itcblkgrn & 0x7fe00000) >> 20), itc_base); | ||
228 | } | ||
229 | } | ||
230 | |||
231 | struct class *mt_class; | ||
232 | |||
233 | static int __init mt_init(void) | ||
234 | { | ||
235 | struct class *mtc; | ||
236 | |||
237 | mtc = class_create(THIS_MODULE, "mt"); | ||
238 | if (IS_ERR(mtc)) | ||
239 | return PTR_ERR(mtc); | ||
240 | |||
241 | mt_class = mtc; | ||
242 | |||
243 | return 0; | ||
244 | } | ||
245 | |||
246 | subsys_initcall(mt_init); | ||
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c new file mode 100644 index 000000000..a39ec755e --- /dev/null +++ b/arch/mips/kernel/mips-r2-to-r6-emul.c | |||
@@ -0,0 +1,2363 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (c) 2014 Imagination Technologies Ltd. | ||
7 | * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com> | ||
8 | * Author: Markos Chandras <markos.chandras@imgtec.com> | ||
9 | * | ||
10 | * MIPS R2 user space instruction emulator for MIPS R6 | ||
11 | * | ||
12 | */ | ||
13 | #include <linux/bug.h> | ||
14 | #include <linux/compiler.h> | ||
15 | #include <linux/debugfs.h> | ||
16 | #include <linux/init.h> | ||
17 | #include <linux/kernel.h> | ||
18 | #include <linux/ptrace.h> | ||
19 | #include <linux/seq_file.h> | ||
20 | |||
21 | #include <asm/asm.h> | ||
22 | #include <asm/branch.h> | ||
23 | #include <asm/break.h> | ||
24 | #include <asm/debug.h> | ||
25 | #include <asm/fpu.h> | ||
26 | #include <asm/fpu_emulator.h> | ||
27 | #include <asm/inst.h> | ||
28 | #include <asm/mips-r2-to-r6-emul.h> | ||
29 | #include <asm/local.h> | ||
30 | #include <asm/mipsregs.h> | ||
31 | #include <asm/ptrace.h> | ||
32 | #include <linux/uaccess.h> | ||
33 | |||
34 | #ifdef CONFIG_64BIT | ||
35 | #define ADDIU "daddiu " | ||
36 | #define INS "dins " | ||
37 | #define EXT "dext " | ||
38 | #else | ||
39 | #define ADDIU "addiu " | ||
40 | #define INS "ins " | ||
41 | #define EXT "ext " | ||
42 | #endif /* CONFIG_64BIT */ | ||
43 | |||
44 | #define SB "sb " | ||
45 | #define LB "lb " | ||
46 | #define LL "ll " | ||
47 | #define SC "sc " | ||
48 | |||
49 | #ifdef CONFIG_DEBUG_FS | ||
50 | static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats); | ||
51 | static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats); | ||
52 | static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats); | ||
53 | #endif | ||
54 | |||
55 | extern const unsigned int fpucondbit[8]; | ||
56 | |||
57 | #define MIPS_R2_EMUL_TOTAL_PASS 10 | ||
58 | |||
59 | int mipsr2_emulation = 0; | ||
60 | |||
61 | static int __init mipsr2emu_enable(char *s) | ||
62 | { | ||
63 | mipsr2_emulation = 1; | ||
64 | |||
65 | pr_info("MIPS R2-to-R6 Emulator Enabled!"); | ||
66 | |||
67 | return 1; | ||
68 | } | ||
69 | __setup("mipsr2emu", mipsr2emu_enable); | ||
70 | |||
71 | /** | ||
72 | * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot | ||
73 | * for performance instead of the traditional way of using a stack trampoline | ||
74 | * which is rather slow. | ||
75 | * @regs: Process register set | ||
76 | * @ir: Instruction | ||
77 | */ | ||
78 | static inline int mipsr6_emul(struct pt_regs *regs, u32 ir) | ||
79 | { | ||
80 | switch (MIPSInst_OPCODE(ir)) { | ||
81 | case addiu_op: | ||
82 | if (MIPSInst_RT(ir)) | ||
83 | regs->regs[MIPSInst_RT(ir)] = | ||
84 | (s32)regs->regs[MIPSInst_RS(ir)] + | ||
85 | (s32)MIPSInst_SIMM(ir); | ||
86 | return 0; | ||
87 | case daddiu_op: | ||
88 | if (IS_ENABLED(CONFIG_32BIT)) | ||
89 | break; | ||
90 | |||
91 | if (MIPSInst_RT(ir)) | ||
92 | regs->regs[MIPSInst_RT(ir)] = | ||
93 | (s64)regs->regs[MIPSInst_RS(ir)] + | ||
94 | (s64)MIPSInst_SIMM(ir); | ||
95 | return 0; | ||
96 | case lwc1_op: | ||
97 | case swc1_op: | ||
98 | case cop1_op: | ||
99 | case cop1x_op: | ||
100 | /* FPU instructions in delay slot */ | ||
101 | return -SIGFPE; | ||
102 | case spec_op: | ||
103 | switch (MIPSInst_FUNC(ir)) { | ||
104 | case or_op: | ||
105 | if (MIPSInst_RD(ir)) | ||
106 | regs->regs[MIPSInst_RD(ir)] = | ||
107 | regs->regs[MIPSInst_RS(ir)] | | ||
108 | regs->regs[MIPSInst_RT(ir)]; | ||
109 | return 0; | ||
110 | case sll_op: | ||
111 | if (MIPSInst_RS(ir)) | ||
112 | break; | ||
113 | |||
114 | if (MIPSInst_RD(ir)) | ||
115 | regs->regs[MIPSInst_RD(ir)] = | ||
116 | (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) << | ||
117 | MIPSInst_FD(ir)); | ||
118 | return 0; | ||
119 | case srl_op: | ||
120 | if (MIPSInst_RS(ir)) | ||
121 | break; | ||
122 | |||
123 | if (MIPSInst_RD(ir)) | ||
124 | regs->regs[MIPSInst_RD(ir)] = | ||
125 | (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >> | ||
126 | MIPSInst_FD(ir)); | ||
127 | return 0; | ||
128 | case addu_op: | ||
129 | if (MIPSInst_FD(ir)) | ||
130 | break; | ||
131 | |||
132 | if (MIPSInst_RD(ir)) | ||
133 | regs->regs[MIPSInst_RD(ir)] = | ||
134 | (s32)((u32)regs->regs[MIPSInst_RS(ir)] + | ||
135 | (u32)regs->regs[MIPSInst_RT(ir)]); | ||
136 | return 0; | ||
137 | case subu_op: | ||
138 | if (MIPSInst_FD(ir)) | ||
139 | break; | ||
140 | |||
141 | if (MIPSInst_RD(ir)) | ||
142 | regs->regs[MIPSInst_RD(ir)] = | ||
143 | (s32)((u32)regs->regs[MIPSInst_RS(ir)] - | ||
144 | (u32)regs->regs[MIPSInst_RT(ir)]); | ||
145 | return 0; | ||
146 | case dsll_op: | ||
147 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) | ||
148 | break; | ||
149 | |||
150 | if (MIPSInst_RD(ir)) | ||
151 | regs->regs[MIPSInst_RD(ir)] = | ||
152 | (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) << | ||
153 | MIPSInst_FD(ir)); | ||
154 | return 0; | ||
155 | case dsrl_op: | ||
156 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir)) | ||
157 | break; | ||
158 | |||
159 | if (MIPSInst_RD(ir)) | ||
160 | regs->regs[MIPSInst_RD(ir)] = | ||
161 | (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >> | ||
162 | MIPSInst_FD(ir)); | ||
163 | return 0; | ||
164 | case daddu_op: | ||
165 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) | ||
166 | break; | ||
167 | |||
168 | if (MIPSInst_RD(ir)) | ||
169 | regs->regs[MIPSInst_RD(ir)] = | ||
170 | (u64)regs->regs[MIPSInst_RS(ir)] + | ||
171 | (u64)regs->regs[MIPSInst_RT(ir)]; | ||
172 | return 0; | ||
173 | case dsubu_op: | ||
174 | if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir)) | ||
175 | break; | ||
176 | |||
177 | if (MIPSInst_RD(ir)) | ||
178 | regs->regs[MIPSInst_RD(ir)] = | ||
179 | (s64)((u64)regs->regs[MIPSInst_RS(ir)] - | ||
180 | (u64)regs->regs[MIPSInst_RT(ir)]); | ||
181 | return 0; | ||
182 | } | ||
183 | break; | ||
184 | default: | ||
185 | pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n", | ||
186 | ir, MIPSInst_OPCODE(ir)); | ||
187 | } | ||
188 | |||
189 | return SIGILL; | ||
190 | } | ||
191 | |||
192 | /** | ||
193 | * movf_func - Emulate a MOVF instruction | ||
194 | * @regs: Process register set | ||
195 | * @ir: Instruction | ||
196 | * | ||
197 | * Returns 0 since it always succeeds. | ||
198 | */ | ||
199 | static int movf_func(struct pt_regs *regs, u32 ir) | ||
200 | { | ||
201 | u32 csr; | ||
202 | u32 cond; | ||
203 | |||
204 | csr = current->thread.fpu.fcr31; | ||
205 | cond = fpucondbit[MIPSInst_RT(ir) >> 2]; | ||
206 | |||
207 | if (((csr & cond) == 0) && MIPSInst_RD(ir)) | ||
208 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
209 | |||
210 | MIPS_R2_STATS(movs); | ||
211 | |||
212 | return 0; | ||
213 | } | ||
214 | |||
215 | /** | ||
216 | * movt_func - Emulate a MOVT instruction | ||
217 | * @regs: Process register set | ||
218 | * @ir: Instruction | ||
219 | * | ||
220 | * Returns 0 since it always succeeds. | ||
221 | */ | ||
222 | static int movt_func(struct pt_regs *regs, u32 ir) | ||
223 | { | ||
224 | u32 csr; | ||
225 | u32 cond; | ||
226 | |||
227 | csr = current->thread.fpu.fcr31; | ||
228 | cond = fpucondbit[MIPSInst_RT(ir) >> 2]; | ||
229 | |||
230 | if (((csr & cond) != 0) && MIPSInst_RD(ir)) | ||
231 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
232 | |||
233 | MIPS_R2_STATS(movs); | ||
234 | |||
235 | return 0; | ||
236 | } | ||
237 | |||
238 | /** | ||
239 | * jr_func - Emulate a JR instruction. | ||
240 | * @pt_regs: Process register set | ||
241 | * @ir: Instruction | ||
242 | * | ||
243 | * Returns SIGILL if JR was in delay slot, SIGEMT if we | ||
244 | * can't compute the EPC, SIGSEGV if we can't access the | ||
245 | * userland instruction or 0 on success. | ||
246 | */ | ||
247 | static int jr_func(struct pt_regs *regs, u32 ir) | ||
248 | { | ||
249 | int err; | ||
250 | unsigned long cepc, epc, nepc; | ||
251 | u32 nir; | ||
252 | |||
253 | if (delay_slot(regs)) | ||
254 | return SIGILL; | ||
255 | |||
256 | /* EPC after the RI/JR instruction */ | ||
257 | nepc = regs->cp0_epc; | ||
258 | /* Roll back to the reserved R2 JR instruction */ | ||
259 | regs->cp0_epc -= 4; | ||
260 | epc = regs->cp0_epc; | ||
261 | err = __compute_return_epc(regs); | ||
262 | |||
263 | if (err < 0) | ||
264 | return SIGEMT; | ||
265 | |||
266 | |||
267 | /* Computed EPC */ | ||
268 | cepc = regs->cp0_epc; | ||
269 | |||
270 | /* Get DS instruction */ | ||
271 | err = __get_user(nir, (u32 __user *)nepc); | ||
272 | if (err) | ||
273 | return SIGSEGV; | ||
274 | |||
275 | MIPS_R2BR_STATS(jrs); | ||
276 | |||
277 | /* If nir == 0(NOP), then nothing else to do */ | ||
278 | if (nir) { | ||
279 | /* | ||
280 | * Negative err means FPU instruction in BD-slot, | ||
281 | * Zero err means 'BD-slot emulation done' | ||
282 | * For anything else we go back to trampoline emulation. | ||
283 | */ | ||
284 | err = mipsr6_emul(regs, nir); | ||
285 | if (err > 0) { | ||
286 | regs->cp0_epc = nepc; | ||
287 | err = mips_dsemul(regs, nir, epc, cepc); | ||
288 | if (err == SIGILL) | ||
289 | err = SIGEMT; | ||
290 | MIPS_R2_STATS(dsemul); | ||
291 | } | ||
292 | } | ||
293 | |||
294 | return err; | ||
295 | } | ||
296 | |||
297 | /** | ||
298 | * movz_func - Emulate a MOVZ instruction | ||
299 | * @regs: Process register set | ||
300 | * @ir: Instruction | ||
301 | * | ||
302 | * Returns 0 since it always succeeds. | ||
303 | */ | ||
304 | static int movz_func(struct pt_regs *regs, u32 ir) | ||
305 | { | ||
306 | if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir)) | ||
307 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
308 | MIPS_R2_STATS(movs); | ||
309 | |||
310 | return 0; | ||
311 | } | ||
312 | |||
313 | /** | ||
314 | * movn_func - Emulate a MOVZ instruction | ||
315 | * @regs: Process register set | ||
316 | * @ir: Instruction | ||
317 | * | ||
318 | * Returns 0 since it always succeeds. | ||
319 | */ | ||
320 | static int movn_func(struct pt_regs *regs, u32 ir) | ||
321 | { | ||
322 | if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir)) | ||
323 | regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)]; | ||
324 | MIPS_R2_STATS(movs); | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | /** | ||
330 | * mfhi_func - Emulate a MFHI instruction | ||
331 | * @regs: Process register set | ||
332 | * @ir: Instruction | ||
333 | * | ||
334 | * Returns 0 since it always succeeds. | ||
335 | */ | ||
336 | static int mfhi_func(struct pt_regs *regs, u32 ir) | ||
337 | { | ||
338 | if (MIPSInst_RD(ir)) | ||
339 | regs->regs[MIPSInst_RD(ir)] = regs->hi; | ||
340 | |||
341 | MIPS_R2_STATS(hilo); | ||
342 | |||
343 | return 0; | ||
344 | } | ||
345 | |||
346 | /** | ||
347 | * mthi_func - Emulate a MTHI instruction | ||
348 | * @regs: Process register set | ||
349 | * @ir: Instruction | ||
350 | * | ||
351 | * Returns 0 since it always succeeds. | ||
352 | */ | ||
353 | static int mthi_func(struct pt_regs *regs, u32 ir) | ||
354 | { | ||
355 | regs->hi = regs->regs[MIPSInst_RS(ir)]; | ||
356 | |||
357 | MIPS_R2_STATS(hilo); | ||
358 | |||
359 | return 0; | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * mflo_func - Emulate a MFLO instruction | ||
364 | * @regs: Process register set | ||
365 | * @ir: Instruction | ||
366 | * | ||
367 | * Returns 0 since it always succeeds. | ||
368 | */ | ||
369 | static int mflo_func(struct pt_regs *regs, u32 ir) | ||
370 | { | ||
371 | if (MIPSInst_RD(ir)) | ||
372 | regs->regs[MIPSInst_RD(ir)] = regs->lo; | ||
373 | |||
374 | MIPS_R2_STATS(hilo); | ||
375 | |||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | /** | ||
380 | * mtlo_func - Emulate a MTLO instruction | ||
381 | * @regs: Process register set | ||
382 | * @ir: Instruction | ||
383 | * | ||
384 | * Returns 0 since it always succeeds. | ||
385 | */ | ||
386 | static int mtlo_func(struct pt_regs *regs, u32 ir) | ||
387 | { | ||
388 | regs->lo = regs->regs[MIPSInst_RS(ir)]; | ||
389 | |||
390 | MIPS_R2_STATS(hilo); | ||
391 | |||
392 | return 0; | ||
393 | } | ||
394 | |||
395 | /** | ||
396 | * mult_func - Emulate a MULT instruction | ||
397 | * @regs: Process register set | ||
398 | * @ir: Instruction | ||
399 | * | ||
400 | * Returns 0 since it always succeeds. | ||
401 | */ | ||
402 | static int mult_func(struct pt_regs *regs, u32 ir) | ||
403 | { | ||
404 | s64 res; | ||
405 | s32 rt, rs; | ||
406 | |||
407 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
408 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
409 | res = (s64)rt * (s64)rs; | ||
410 | |||
411 | rs = res; | ||
412 | regs->lo = (s64)rs; | ||
413 | rt = res >> 32; | ||
414 | res = (s64)rt; | ||
415 | regs->hi = res; | ||
416 | |||
417 | MIPS_R2_STATS(muls); | ||
418 | |||
419 | return 0; | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * multu_func - Emulate a MULTU instruction | ||
424 | * @regs: Process register set | ||
425 | * @ir: Instruction | ||
426 | * | ||
427 | * Returns 0 since it always succeeds. | ||
428 | */ | ||
429 | static int multu_func(struct pt_regs *regs, u32 ir) | ||
430 | { | ||
431 | u64 res; | ||
432 | u32 rt, rs; | ||
433 | |||
434 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
435 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
436 | res = (u64)rt * (u64)rs; | ||
437 | rt = res; | ||
438 | regs->lo = (s64)(s32)rt; | ||
439 | regs->hi = (s64)(s32)(res >> 32); | ||
440 | |||
441 | MIPS_R2_STATS(muls); | ||
442 | |||
443 | return 0; | ||
444 | } | ||
445 | |||
446 | /** | ||
447 | * div_func - Emulate a DIV instruction | ||
448 | * @regs: Process register set | ||
449 | * @ir: Instruction | ||
450 | * | ||
451 | * Returns 0 since it always succeeds. | ||
452 | */ | ||
453 | static int div_func(struct pt_regs *regs, u32 ir) | ||
454 | { | ||
455 | s32 rt, rs; | ||
456 | |||
457 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
458 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
459 | |||
460 | regs->lo = (s64)(rs / rt); | ||
461 | regs->hi = (s64)(rs % rt); | ||
462 | |||
463 | MIPS_R2_STATS(divs); | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | /** | ||
469 | * divu_func - Emulate a DIVU instruction | ||
470 | * @regs: Process register set | ||
471 | * @ir: Instruction | ||
472 | * | ||
473 | * Returns 0 since it always succeeds. | ||
474 | */ | ||
475 | static int divu_func(struct pt_regs *regs, u32 ir) | ||
476 | { | ||
477 | u32 rt, rs; | ||
478 | |||
479 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
480 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
481 | |||
482 | regs->lo = (s64)(rs / rt); | ||
483 | regs->hi = (s64)(rs % rt); | ||
484 | |||
485 | MIPS_R2_STATS(divs); | ||
486 | |||
487 | return 0; | ||
488 | } | ||
489 | |||
490 | /** | ||
491 | * dmult_func - Emulate a DMULT instruction | ||
492 | * @regs: Process register set | ||
493 | * @ir: Instruction | ||
494 | * | ||
495 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
496 | */ | ||
497 | static int dmult_func(struct pt_regs *regs, u32 ir) | ||
498 | { | ||
499 | s64 res; | ||
500 | s64 rt, rs; | ||
501 | |||
502 | if (IS_ENABLED(CONFIG_32BIT)) | ||
503 | return SIGILL; | ||
504 | |||
505 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
506 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
507 | res = rt * rs; | ||
508 | |||
509 | regs->lo = res; | ||
510 | __asm__ __volatile__( | ||
511 | "dmuh %0, %1, %2\t\n" | ||
512 | : "=r"(res) | ||
513 | : "r"(rt), "r"(rs)); | ||
514 | |||
515 | regs->hi = res; | ||
516 | |||
517 | MIPS_R2_STATS(muls); | ||
518 | |||
519 | return 0; | ||
520 | } | ||
521 | |||
522 | /** | ||
523 | * dmultu_func - Emulate a DMULTU instruction | ||
524 | * @regs: Process register set | ||
525 | * @ir: Instruction | ||
526 | * | ||
527 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
528 | */ | ||
529 | static int dmultu_func(struct pt_regs *regs, u32 ir) | ||
530 | { | ||
531 | u64 res; | ||
532 | u64 rt, rs; | ||
533 | |||
534 | if (IS_ENABLED(CONFIG_32BIT)) | ||
535 | return SIGILL; | ||
536 | |||
537 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
538 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
539 | res = rt * rs; | ||
540 | |||
541 | regs->lo = res; | ||
542 | __asm__ __volatile__( | ||
543 | "dmuhu %0, %1, %2\t\n" | ||
544 | : "=r"(res) | ||
545 | : "r"(rt), "r"(rs)); | ||
546 | |||
547 | regs->hi = res; | ||
548 | |||
549 | MIPS_R2_STATS(muls); | ||
550 | |||
551 | return 0; | ||
552 | } | ||
553 | |||
554 | /** | ||
555 | * ddiv_func - Emulate a DDIV instruction | ||
556 | * @regs: Process register set | ||
557 | * @ir: Instruction | ||
558 | * | ||
559 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
560 | */ | ||
561 | static int ddiv_func(struct pt_regs *regs, u32 ir) | ||
562 | { | ||
563 | s64 rt, rs; | ||
564 | |||
565 | if (IS_ENABLED(CONFIG_32BIT)) | ||
566 | return SIGILL; | ||
567 | |||
568 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
569 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
570 | |||
571 | regs->lo = rs / rt; | ||
572 | regs->hi = rs % rt; | ||
573 | |||
574 | MIPS_R2_STATS(divs); | ||
575 | |||
576 | return 0; | ||
577 | } | ||
578 | |||
579 | /** | ||
580 | * ddivu_func - Emulate a DDIVU instruction | ||
581 | * @regs: Process register set | ||
582 | * @ir: Instruction | ||
583 | * | ||
584 | * Returns 0 on success or SIGILL for 32-bit kernels. | ||
585 | */ | ||
586 | static int ddivu_func(struct pt_regs *regs, u32 ir) | ||
587 | { | ||
588 | u64 rt, rs; | ||
589 | |||
590 | if (IS_ENABLED(CONFIG_32BIT)) | ||
591 | return SIGILL; | ||
592 | |||
593 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
594 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
595 | |||
596 | regs->lo = rs / rt; | ||
597 | regs->hi = rs % rt; | ||
598 | |||
599 | MIPS_R2_STATS(divs); | ||
600 | |||
601 | return 0; | ||
602 | } | ||
603 | |||
604 | /* R6 removed instructions for the SPECIAL opcode */ | ||
605 | static const struct r2_decoder_table spec_op_table[] = { | ||
606 | { 0xfc1ff83f, 0x00000008, jr_func }, | ||
607 | { 0xfc00ffff, 0x00000018, mult_func }, | ||
608 | { 0xfc00ffff, 0x00000019, multu_func }, | ||
609 | { 0xfc00ffff, 0x0000001c, dmult_func }, | ||
610 | { 0xfc00ffff, 0x0000001d, dmultu_func }, | ||
611 | { 0xffff07ff, 0x00000010, mfhi_func }, | ||
612 | { 0xfc1fffff, 0x00000011, mthi_func }, | ||
613 | { 0xffff07ff, 0x00000012, mflo_func }, | ||
614 | { 0xfc1fffff, 0x00000013, mtlo_func }, | ||
615 | { 0xfc0307ff, 0x00000001, movf_func }, | ||
616 | { 0xfc0307ff, 0x00010001, movt_func }, | ||
617 | { 0xfc0007ff, 0x0000000a, movz_func }, | ||
618 | { 0xfc0007ff, 0x0000000b, movn_func }, | ||
619 | { 0xfc00ffff, 0x0000001a, div_func }, | ||
620 | { 0xfc00ffff, 0x0000001b, divu_func }, | ||
621 | { 0xfc00ffff, 0x0000001e, ddiv_func }, | ||
622 | { 0xfc00ffff, 0x0000001f, ddivu_func }, | ||
623 | {} | ||
624 | }; | ||
625 | |||
626 | /** | ||
627 | * madd_func - Emulate a MADD instruction | ||
628 | * @regs: Process register set | ||
629 | * @ir: Instruction | ||
630 | * | ||
631 | * Returns 0 since it always succeeds. | ||
632 | */ | ||
633 | static int madd_func(struct pt_regs *regs, u32 ir) | ||
634 | { | ||
635 | s64 res; | ||
636 | s32 rt, rs; | ||
637 | |||
638 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
639 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
640 | res = (s64)rt * (s64)rs; | ||
641 | rt = regs->hi; | ||
642 | rs = regs->lo; | ||
643 | res += ((((s64)rt) << 32) | (u32)rs); | ||
644 | |||
645 | rt = res; | ||
646 | regs->lo = (s64)rt; | ||
647 | rs = res >> 32; | ||
648 | regs->hi = (s64)rs; | ||
649 | |||
650 | MIPS_R2_STATS(dsps); | ||
651 | |||
652 | return 0; | ||
653 | } | ||
654 | |||
655 | /** | ||
656 | * maddu_func - Emulate a MADDU instruction | ||
657 | * @regs: Process register set | ||
658 | * @ir: Instruction | ||
659 | * | ||
660 | * Returns 0 since it always succeeds. | ||
661 | */ | ||
662 | static int maddu_func(struct pt_regs *regs, u32 ir) | ||
663 | { | ||
664 | u64 res; | ||
665 | u32 rt, rs; | ||
666 | |||
667 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
668 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
669 | res = (u64)rt * (u64)rs; | ||
670 | rt = regs->hi; | ||
671 | rs = regs->lo; | ||
672 | res += ((((s64)rt) << 32) | (u32)rs); | ||
673 | |||
674 | rt = res; | ||
675 | regs->lo = (s64)(s32)rt; | ||
676 | rs = res >> 32; | ||
677 | regs->hi = (s64)(s32)rs; | ||
678 | |||
679 | MIPS_R2_STATS(dsps); | ||
680 | |||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /** | ||
685 | * msub_func - Emulate a MSUB instruction | ||
686 | * @regs: Process register set | ||
687 | * @ir: Instruction | ||
688 | * | ||
689 | * Returns 0 since it always succeeds. | ||
690 | */ | ||
691 | static int msub_func(struct pt_regs *regs, u32 ir) | ||
692 | { | ||
693 | s64 res; | ||
694 | s32 rt, rs; | ||
695 | |||
696 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
697 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
698 | res = (s64)rt * (s64)rs; | ||
699 | rt = regs->hi; | ||
700 | rs = regs->lo; | ||
701 | res = ((((s64)rt) << 32) | (u32)rs) - res; | ||
702 | |||
703 | rt = res; | ||
704 | regs->lo = (s64)rt; | ||
705 | rs = res >> 32; | ||
706 | regs->hi = (s64)rs; | ||
707 | |||
708 | MIPS_R2_STATS(dsps); | ||
709 | |||
710 | return 0; | ||
711 | } | ||
712 | |||
713 | /** | ||
714 | * msubu_func - Emulate a MSUBU instruction | ||
715 | * @regs: Process register set | ||
716 | * @ir: Instruction | ||
717 | * | ||
718 | * Returns 0 since it always succeeds. | ||
719 | */ | ||
720 | static int msubu_func(struct pt_regs *regs, u32 ir) | ||
721 | { | ||
722 | u64 res; | ||
723 | u32 rt, rs; | ||
724 | |||
725 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
726 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
727 | res = (u64)rt * (u64)rs; | ||
728 | rt = regs->hi; | ||
729 | rs = regs->lo; | ||
730 | res = ((((s64)rt) << 32) | (u32)rs) - res; | ||
731 | |||
732 | rt = res; | ||
733 | regs->lo = (s64)(s32)rt; | ||
734 | rs = res >> 32; | ||
735 | regs->hi = (s64)(s32)rs; | ||
736 | |||
737 | MIPS_R2_STATS(dsps); | ||
738 | |||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | /** | ||
743 | * mul_func - Emulate a MUL instruction | ||
744 | * @regs: Process register set | ||
745 | * @ir: Instruction | ||
746 | * | ||
747 | * Returns 0 since it always succeeds. | ||
748 | */ | ||
749 | static int mul_func(struct pt_regs *regs, u32 ir) | ||
750 | { | ||
751 | s64 res; | ||
752 | s32 rt, rs; | ||
753 | |||
754 | if (!MIPSInst_RD(ir)) | ||
755 | return 0; | ||
756 | rt = regs->regs[MIPSInst_RT(ir)]; | ||
757 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
758 | res = (s64)rt * (s64)rs; | ||
759 | |||
760 | rs = res; | ||
761 | regs->regs[MIPSInst_RD(ir)] = (s64)rs; | ||
762 | |||
763 | MIPS_R2_STATS(muls); | ||
764 | |||
765 | return 0; | ||
766 | } | ||
767 | |||
768 | /** | ||
769 | * clz_func - Emulate a CLZ instruction | ||
770 | * @regs: Process register set | ||
771 | * @ir: Instruction | ||
772 | * | ||
773 | * Returns 0 since it always succeeds. | ||
774 | */ | ||
775 | static int clz_func(struct pt_regs *regs, u32 ir) | ||
776 | { | ||
777 | u32 res; | ||
778 | u32 rs; | ||
779 | |||
780 | if (!MIPSInst_RD(ir)) | ||
781 | return 0; | ||
782 | |||
783 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
784 | __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs)); | ||
785 | regs->regs[MIPSInst_RD(ir)] = res; | ||
786 | |||
787 | MIPS_R2_STATS(bops); | ||
788 | |||
789 | return 0; | ||
790 | } | ||
791 | |||
792 | /** | ||
793 | * clo_func - Emulate a CLO instruction | ||
794 | * @regs: Process register set | ||
795 | * @ir: Instruction | ||
796 | * | ||
797 | * Returns 0 since it always succeeds. | ||
798 | */ | ||
799 | |||
800 | static int clo_func(struct pt_regs *regs, u32 ir) | ||
801 | { | ||
802 | u32 res; | ||
803 | u32 rs; | ||
804 | |||
805 | if (!MIPSInst_RD(ir)) | ||
806 | return 0; | ||
807 | |||
808 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
809 | __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs)); | ||
810 | regs->regs[MIPSInst_RD(ir)] = res; | ||
811 | |||
812 | MIPS_R2_STATS(bops); | ||
813 | |||
814 | return 0; | ||
815 | } | ||
816 | |||
817 | /** | ||
818 | * dclz_func - Emulate a DCLZ instruction | ||
819 | * @regs: Process register set | ||
820 | * @ir: Instruction | ||
821 | * | ||
822 | * Returns 0 since it always succeeds. | ||
823 | */ | ||
824 | static int dclz_func(struct pt_regs *regs, u32 ir) | ||
825 | { | ||
826 | u64 res; | ||
827 | u64 rs; | ||
828 | |||
829 | if (IS_ENABLED(CONFIG_32BIT)) | ||
830 | return SIGILL; | ||
831 | |||
832 | if (!MIPSInst_RD(ir)) | ||
833 | return 0; | ||
834 | |||
835 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
836 | __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs)); | ||
837 | regs->regs[MIPSInst_RD(ir)] = res; | ||
838 | |||
839 | MIPS_R2_STATS(bops); | ||
840 | |||
841 | return 0; | ||
842 | } | ||
843 | |||
844 | /** | ||
845 | * dclo_func - Emulate a DCLO instruction | ||
846 | * @regs: Process register set | ||
847 | * @ir: Instruction | ||
848 | * | ||
849 | * Returns 0 since it always succeeds. | ||
850 | */ | ||
851 | static int dclo_func(struct pt_regs *regs, u32 ir) | ||
852 | { | ||
853 | u64 res; | ||
854 | u64 rs; | ||
855 | |||
856 | if (IS_ENABLED(CONFIG_32BIT)) | ||
857 | return SIGILL; | ||
858 | |||
859 | if (!MIPSInst_RD(ir)) | ||
860 | return 0; | ||
861 | |||
862 | rs = regs->regs[MIPSInst_RS(ir)]; | ||
863 | __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs)); | ||
864 | regs->regs[MIPSInst_RD(ir)] = res; | ||
865 | |||
866 | MIPS_R2_STATS(bops); | ||
867 | |||
868 | return 0; | ||
869 | } | ||
870 | |||
871 | /* R6 removed instructions for the SPECIAL2 opcode */ | ||
872 | static const struct r2_decoder_table spec2_op_table[] = { | ||
873 | { 0xfc00ffff, 0x70000000, madd_func }, | ||
874 | { 0xfc00ffff, 0x70000001, maddu_func }, | ||
875 | { 0xfc0007ff, 0x70000002, mul_func }, | ||
876 | { 0xfc00ffff, 0x70000004, msub_func }, | ||
877 | { 0xfc00ffff, 0x70000005, msubu_func }, | ||
878 | { 0xfc0007ff, 0x70000020, clz_func }, | ||
879 | { 0xfc0007ff, 0x70000021, clo_func }, | ||
880 | { 0xfc0007ff, 0x70000024, dclz_func }, | ||
881 | { 0xfc0007ff, 0x70000025, dclo_func }, | ||
882 | { } | ||
883 | }; | ||
884 | |||
885 | static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst, | ||
886 | const struct r2_decoder_table *table) | ||
887 | { | ||
888 | const struct r2_decoder_table *p; | ||
889 | int err; | ||
890 | |||
891 | for (p = table; p->func; p++) { | ||
892 | if ((inst & p->mask) == p->code) { | ||
893 | err = (p->func)(regs, inst); | ||
894 | return err; | ||
895 | } | ||
896 | } | ||
897 | return SIGILL; | ||
898 | } | ||
899 | |||
900 | /** | ||
901 | * mipsr2_decoder: Decode and emulate a MIPS R2 instruction | ||
902 | * @regs: Process register set | ||
903 | * @inst: Instruction to decode and emulate | ||
904 | * @fcr31: Floating Point Control and Status Register Cause bits returned | ||
905 | */ | ||
906 | int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31) | ||
907 | { | ||
908 | int err = 0; | ||
909 | unsigned long vaddr; | ||
910 | u32 nir; | ||
911 | unsigned long cpc, epc, nepc, r31, res, rs, rt; | ||
912 | |||
913 | void __user *fault_addr = NULL; | ||
914 | int pass = 0; | ||
915 | |||
916 | repeat: | ||
917 | r31 = regs->regs[31]; | ||
918 | epc = regs->cp0_epc; | ||
919 | err = compute_return_epc(regs); | ||
920 | if (err < 0) { | ||
921 | BUG(); | ||
922 | return SIGEMT; | ||
923 | } | ||
924 | pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n", | ||
925 | inst, epc, pass); | ||
926 | |||
927 | switch (MIPSInst_OPCODE(inst)) { | ||
928 | case spec_op: | ||
929 | err = mipsr2_find_op_func(regs, inst, spec_op_table); | ||
930 | if (err < 0) { | ||
931 | /* FPU instruction under JR */ | ||
932 | regs->cp0_cause |= CAUSEF_BD; | ||
933 | goto fpu_emul; | ||
934 | } | ||
935 | break; | ||
936 | case spec2_op: | ||
937 | err = mipsr2_find_op_func(regs, inst, spec2_op_table); | ||
938 | break; | ||
939 | case bcond_op: | ||
940 | rt = MIPSInst_RT(inst); | ||
941 | rs = MIPSInst_RS(inst); | ||
942 | switch (rt) { | ||
943 | case tgei_op: | ||
944 | if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst)) | ||
945 | do_trap_or_bp(regs, 0, 0, "TGEI"); | ||
946 | |||
947 | MIPS_R2_STATS(traps); | ||
948 | |||
949 | break; | ||
950 | case tgeiu_op: | ||
951 | if (regs->regs[rs] >= MIPSInst_UIMM(inst)) | ||
952 | do_trap_or_bp(regs, 0, 0, "TGEIU"); | ||
953 | |||
954 | MIPS_R2_STATS(traps); | ||
955 | |||
956 | break; | ||
957 | case tlti_op: | ||
958 | if ((long)regs->regs[rs] < MIPSInst_SIMM(inst)) | ||
959 | do_trap_or_bp(regs, 0, 0, "TLTI"); | ||
960 | |||
961 | MIPS_R2_STATS(traps); | ||
962 | |||
963 | break; | ||
964 | case tltiu_op: | ||
965 | if (regs->regs[rs] < MIPSInst_UIMM(inst)) | ||
966 | do_trap_or_bp(regs, 0, 0, "TLTIU"); | ||
967 | |||
968 | MIPS_R2_STATS(traps); | ||
969 | |||
970 | break; | ||
971 | case teqi_op: | ||
972 | if (regs->regs[rs] == MIPSInst_SIMM(inst)) | ||
973 | do_trap_or_bp(regs, 0, 0, "TEQI"); | ||
974 | |||
975 | MIPS_R2_STATS(traps); | ||
976 | |||
977 | break; | ||
978 | case tnei_op: | ||
979 | if (regs->regs[rs] != MIPSInst_SIMM(inst)) | ||
980 | do_trap_or_bp(regs, 0, 0, "TNEI"); | ||
981 | |||
982 | MIPS_R2_STATS(traps); | ||
983 | |||
984 | break; | ||
985 | case bltzl_op: | ||
986 | case bgezl_op: | ||
987 | case bltzall_op: | ||
988 | case bgezall_op: | ||
989 | if (delay_slot(regs)) { | ||
990 | err = SIGILL; | ||
991 | break; | ||
992 | } | ||
993 | regs->regs[31] = r31; | ||
994 | regs->cp0_epc = epc; | ||
995 | err = __compute_return_epc(regs); | ||
996 | if (err < 0) | ||
997 | return SIGEMT; | ||
998 | if (err != BRANCH_LIKELY_TAKEN) | ||
999 | break; | ||
1000 | cpc = regs->cp0_epc; | ||
1001 | nepc = epc + 4; | ||
1002 | err = __get_user(nir, (u32 __user *)nepc); | ||
1003 | if (err) { | ||
1004 | err = SIGSEGV; | ||
1005 | break; | ||
1006 | } | ||
1007 | /* | ||
1008 | * This will probably be optimized away when | ||
1009 | * CONFIG_DEBUG_FS is not enabled | ||
1010 | */ | ||
1011 | switch (rt) { | ||
1012 | case bltzl_op: | ||
1013 | MIPS_R2BR_STATS(bltzl); | ||
1014 | break; | ||
1015 | case bgezl_op: | ||
1016 | MIPS_R2BR_STATS(bgezl); | ||
1017 | break; | ||
1018 | case bltzall_op: | ||
1019 | MIPS_R2BR_STATS(bltzall); | ||
1020 | break; | ||
1021 | case bgezall_op: | ||
1022 | MIPS_R2BR_STATS(bgezall); | ||
1023 | break; | ||
1024 | } | ||
1025 | |||
1026 | switch (MIPSInst_OPCODE(nir)) { | ||
1027 | case cop1_op: | ||
1028 | case cop1x_op: | ||
1029 | case lwc1_op: | ||
1030 | case swc1_op: | ||
1031 | regs->cp0_cause |= CAUSEF_BD; | ||
1032 | goto fpu_emul; | ||
1033 | } | ||
1034 | if (nir) { | ||
1035 | err = mipsr6_emul(regs, nir); | ||
1036 | if (err > 0) { | ||
1037 | err = mips_dsemul(regs, nir, epc, cpc); | ||
1038 | if (err == SIGILL) | ||
1039 | err = SIGEMT; | ||
1040 | MIPS_R2_STATS(dsemul); | ||
1041 | } | ||
1042 | } | ||
1043 | break; | ||
1044 | case bltzal_op: | ||
1045 | case bgezal_op: | ||
1046 | if (delay_slot(regs)) { | ||
1047 | err = SIGILL; | ||
1048 | break; | ||
1049 | } | ||
1050 | regs->regs[31] = r31; | ||
1051 | regs->cp0_epc = epc; | ||
1052 | err = __compute_return_epc(regs); | ||
1053 | if (err < 0) | ||
1054 | return SIGEMT; | ||
1055 | cpc = regs->cp0_epc; | ||
1056 | nepc = epc + 4; | ||
1057 | err = __get_user(nir, (u32 __user *)nepc); | ||
1058 | if (err) { | ||
1059 | err = SIGSEGV; | ||
1060 | break; | ||
1061 | } | ||
1062 | /* | ||
1063 | * This will probably be optimized away when | ||
1064 | * CONFIG_DEBUG_FS is not enabled | ||
1065 | */ | ||
1066 | switch (rt) { | ||
1067 | case bltzal_op: | ||
1068 | MIPS_R2BR_STATS(bltzal); | ||
1069 | break; | ||
1070 | case bgezal_op: | ||
1071 | MIPS_R2BR_STATS(bgezal); | ||
1072 | break; | ||
1073 | } | ||
1074 | |||
1075 | switch (MIPSInst_OPCODE(nir)) { | ||
1076 | case cop1_op: | ||
1077 | case cop1x_op: | ||
1078 | case lwc1_op: | ||
1079 | case swc1_op: | ||
1080 | regs->cp0_cause |= CAUSEF_BD; | ||
1081 | goto fpu_emul; | ||
1082 | } | ||
1083 | if (nir) { | ||
1084 | err = mipsr6_emul(regs, nir); | ||
1085 | if (err > 0) { | ||
1086 | err = mips_dsemul(regs, nir, epc, cpc); | ||
1087 | if (err == SIGILL) | ||
1088 | err = SIGEMT; | ||
1089 | MIPS_R2_STATS(dsemul); | ||
1090 | } | ||
1091 | } | ||
1092 | break; | ||
1093 | default: | ||
1094 | regs->regs[31] = r31; | ||
1095 | regs->cp0_epc = epc; | ||
1096 | err = SIGILL; | ||
1097 | break; | ||
1098 | } | ||
1099 | break; | ||
1100 | |||
1101 | case blezl_op: | ||
1102 | case bgtzl_op: | ||
1103 | /* | ||
1104 | * For BLEZL and BGTZL, rt field must be set to 0. If this | ||
1105 | * is not the case, this may be an encoding of a MIPS R6 | ||
1106 | * instruction, so return to CPU execution if this occurs | ||
1107 | */ | ||
1108 | if (MIPSInst_RT(inst)) { | ||
1109 | err = SIGILL; | ||
1110 | break; | ||
1111 | } | ||
1112 | fallthrough; | ||
1113 | case beql_op: | ||
1114 | case bnel_op: | ||
1115 | if (delay_slot(regs)) { | ||
1116 | err = SIGILL; | ||
1117 | break; | ||
1118 | } | ||
1119 | regs->regs[31] = r31; | ||
1120 | regs->cp0_epc = epc; | ||
1121 | err = __compute_return_epc(regs); | ||
1122 | if (err < 0) | ||
1123 | return SIGEMT; | ||
1124 | if (err != BRANCH_LIKELY_TAKEN) | ||
1125 | break; | ||
1126 | cpc = regs->cp0_epc; | ||
1127 | nepc = epc + 4; | ||
1128 | err = __get_user(nir, (u32 __user *)nepc); | ||
1129 | if (err) { | ||
1130 | err = SIGSEGV; | ||
1131 | break; | ||
1132 | } | ||
1133 | /* | ||
1134 | * This will probably be optimized away when | ||
1135 | * CONFIG_DEBUG_FS is not enabled | ||
1136 | */ | ||
1137 | switch (MIPSInst_OPCODE(inst)) { | ||
1138 | case beql_op: | ||
1139 | MIPS_R2BR_STATS(beql); | ||
1140 | break; | ||
1141 | case bnel_op: | ||
1142 | MIPS_R2BR_STATS(bnel); | ||
1143 | break; | ||
1144 | case blezl_op: | ||
1145 | MIPS_R2BR_STATS(blezl); | ||
1146 | break; | ||
1147 | case bgtzl_op: | ||
1148 | MIPS_R2BR_STATS(bgtzl); | ||
1149 | break; | ||
1150 | } | ||
1151 | |||
1152 | switch (MIPSInst_OPCODE(nir)) { | ||
1153 | case cop1_op: | ||
1154 | case cop1x_op: | ||
1155 | case lwc1_op: | ||
1156 | case swc1_op: | ||
1157 | regs->cp0_cause |= CAUSEF_BD; | ||
1158 | goto fpu_emul; | ||
1159 | } | ||
1160 | if (nir) { | ||
1161 | err = mipsr6_emul(regs, nir); | ||
1162 | if (err > 0) { | ||
1163 | err = mips_dsemul(regs, nir, epc, cpc); | ||
1164 | if (err == SIGILL) | ||
1165 | err = SIGEMT; | ||
1166 | MIPS_R2_STATS(dsemul); | ||
1167 | } | ||
1168 | } | ||
1169 | break; | ||
1170 | case lwc1_op: | ||
1171 | case swc1_op: | ||
1172 | case cop1_op: | ||
1173 | case cop1x_op: | ||
1174 | fpu_emul: | ||
1175 | regs->regs[31] = r31; | ||
1176 | regs->cp0_epc = epc; | ||
1177 | |||
1178 | err = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, | ||
1179 | &fault_addr); | ||
1180 | |||
1181 | /* | ||
1182 | * We can't allow the emulated instruction to leave any | ||
1183 | * enabled Cause bits set in $fcr31. | ||
1184 | */ | ||
1185 | *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31); | ||
1186 | current->thread.fpu.fcr31 &= ~res; | ||
1187 | |||
1188 | /* | ||
1189 | * this is a tricky issue - lose_fpu() uses LL/SC atomics | ||
1190 | * if FPU is owned and effectively cancels user level LL/SC. | ||
1191 | * So, it could be logical to don't restore FPU ownership here. | ||
1192 | * But the sequence of multiple FPU instructions is much much | ||
1193 | * more often than LL-FPU-SC and I prefer loop here until | ||
1194 | * next scheduler cycle cancels FPU ownership | ||
1195 | */ | ||
1196 | own_fpu(1); /* Restore FPU state. */ | ||
1197 | |||
1198 | if (err) | ||
1199 | current->thread.cp0_baduaddr = (unsigned long)fault_addr; | ||
1200 | |||
1201 | MIPS_R2_STATS(fpus); | ||
1202 | |||
1203 | break; | ||
1204 | |||
1205 | case lwl_op: | ||
1206 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1207 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1208 | if (!access_ok((void __user *)vaddr, 4)) { | ||
1209 | current->thread.cp0_baduaddr = vaddr; | ||
1210 | err = SIGSEGV; | ||
1211 | break; | ||
1212 | } | ||
1213 | __asm__ __volatile__( | ||
1214 | " .set push\n" | ||
1215 | " .set reorder\n" | ||
1216 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1217 | "1:" LB "%1, 0(%2)\n" | ||
1218 | INS "%0, %1, 24, 8\n" | ||
1219 | " andi %1, %2, 0x3\n" | ||
1220 | " beq $0, %1, 9f\n" | ||
1221 | ADDIU "%2, %2, -1\n" | ||
1222 | "2:" LB "%1, 0(%2)\n" | ||
1223 | INS "%0, %1, 16, 8\n" | ||
1224 | " andi %1, %2, 0x3\n" | ||
1225 | " beq $0, %1, 9f\n" | ||
1226 | ADDIU "%2, %2, -1\n" | ||
1227 | "3:" LB "%1, 0(%2)\n" | ||
1228 | INS "%0, %1, 8, 8\n" | ||
1229 | " andi %1, %2, 0x3\n" | ||
1230 | " beq $0, %1, 9f\n" | ||
1231 | ADDIU "%2, %2, -1\n" | ||
1232 | "4:" LB "%1, 0(%2)\n" | ||
1233 | INS "%0, %1, 0, 8\n" | ||
1234 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1235 | "1:" LB "%1, 0(%2)\n" | ||
1236 | INS "%0, %1, 24, 8\n" | ||
1237 | ADDIU "%2, %2, 1\n" | ||
1238 | " andi %1, %2, 0x3\n" | ||
1239 | " beq $0, %1, 9f\n" | ||
1240 | "2:" LB "%1, 0(%2)\n" | ||
1241 | INS "%0, %1, 16, 8\n" | ||
1242 | ADDIU "%2, %2, 1\n" | ||
1243 | " andi %1, %2, 0x3\n" | ||
1244 | " beq $0, %1, 9f\n" | ||
1245 | "3:" LB "%1, 0(%2)\n" | ||
1246 | INS "%0, %1, 8, 8\n" | ||
1247 | ADDIU "%2, %2, 1\n" | ||
1248 | " andi %1, %2, 0x3\n" | ||
1249 | " beq $0, %1, 9f\n" | ||
1250 | "4:" LB "%1, 0(%2)\n" | ||
1251 | INS "%0, %1, 0, 8\n" | ||
1252 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1253 | "9: sll %0, %0, 0\n" | ||
1254 | "10:\n" | ||
1255 | " .insn\n" | ||
1256 | " .section .fixup,\"ax\"\n" | ||
1257 | "8: li %3,%4\n" | ||
1258 | " j 10b\n" | ||
1259 | " .previous\n" | ||
1260 | " .section __ex_table,\"a\"\n" | ||
1261 | STR(PTR) " 1b,8b\n" | ||
1262 | STR(PTR) " 2b,8b\n" | ||
1263 | STR(PTR) " 3b,8b\n" | ||
1264 | STR(PTR) " 4b,8b\n" | ||
1265 | " .previous\n" | ||
1266 | " .set pop\n" | ||
1267 | : "+&r"(rt), "=&r"(rs), | ||
1268 | "+&r"(vaddr), "+&r"(err) | ||
1269 | : "i"(SIGSEGV)); | ||
1270 | |||
1271 | if (MIPSInst_RT(inst) && !err) | ||
1272 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1273 | |||
1274 | MIPS_R2_STATS(loads); | ||
1275 | |||
1276 | break; | ||
1277 | |||
1278 | case lwr_op: | ||
1279 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1280 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1281 | if (!access_ok((void __user *)vaddr, 4)) { | ||
1282 | current->thread.cp0_baduaddr = vaddr; | ||
1283 | err = SIGSEGV; | ||
1284 | break; | ||
1285 | } | ||
1286 | __asm__ __volatile__( | ||
1287 | " .set push\n" | ||
1288 | " .set reorder\n" | ||
1289 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1290 | "1:" LB "%1, 0(%2)\n" | ||
1291 | INS "%0, %1, 0, 8\n" | ||
1292 | ADDIU "%2, %2, 1\n" | ||
1293 | " andi %1, %2, 0x3\n" | ||
1294 | " beq $0, %1, 9f\n" | ||
1295 | "2:" LB "%1, 0(%2)\n" | ||
1296 | INS "%0, %1, 8, 8\n" | ||
1297 | ADDIU "%2, %2, 1\n" | ||
1298 | " andi %1, %2, 0x3\n" | ||
1299 | " beq $0, %1, 9f\n" | ||
1300 | "3:" LB "%1, 0(%2)\n" | ||
1301 | INS "%0, %1, 16, 8\n" | ||
1302 | ADDIU "%2, %2, 1\n" | ||
1303 | " andi %1, %2, 0x3\n" | ||
1304 | " beq $0, %1, 9f\n" | ||
1305 | "4:" LB "%1, 0(%2)\n" | ||
1306 | INS "%0, %1, 24, 8\n" | ||
1307 | " sll %0, %0, 0\n" | ||
1308 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1309 | "1:" LB "%1, 0(%2)\n" | ||
1310 | INS "%0, %1, 0, 8\n" | ||
1311 | " andi %1, %2, 0x3\n" | ||
1312 | " beq $0, %1, 9f\n" | ||
1313 | ADDIU "%2, %2, -1\n" | ||
1314 | "2:" LB "%1, 0(%2)\n" | ||
1315 | INS "%0, %1, 8, 8\n" | ||
1316 | " andi %1, %2, 0x3\n" | ||
1317 | " beq $0, %1, 9f\n" | ||
1318 | ADDIU "%2, %2, -1\n" | ||
1319 | "3:" LB "%1, 0(%2)\n" | ||
1320 | INS "%0, %1, 16, 8\n" | ||
1321 | " andi %1, %2, 0x3\n" | ||
1322 | " beq $0, %1, 9f\n" | ||
1323 | ADDIU "%2, %2, -1\n" | ||
1324 | "4:" LB "%1, 0(%2)\n" | ||
1325 | INS "%0, %1, 24, 8\n" | ||
1326 | " sll %0, %0, 0\n" | ||
1327 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1328 | "9:\n" | ||
1329 | "10:\n" | ||
1330 | " .insn\n" | ||
1331 | " .section .fixup,\"ax\"\n" | ||
1332 | "8: li %3,%4\n" | ||
1333 | " j 10b\n" | ||
1334 | " .previous\n" | ||
1335 | " .section __ex_table,\"a\"\n" | ||
1336 | STR(PTR) " 1b,8b\n" | ||
1337 | STR(PTR) " 2b,8b\n" | ||
1338 | STR(PTR) " 3b,8b\n" | ||
1339 | STR(PTR) " 4b,8b\n" | ||
1340 | " .previous\n" | ||
1341 | " .set pop\n" | ||
1342 | : "+&r"(rt), "=&r"(rs), | ||
1343 | "+&r"(vaddr), "+&r"(err) | ||
1344 | : "i"(SIGSEGV)); | ||
1345 | if (MIPSInst_RT(inst) && !err) | ||
1346 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1347 | |||
1348 | MIPS_R2_STATS(loads); | ||
1349 | |||
1350 | break; | ||
1351 | |||
1352 | case swl_op: | ||
1353 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1354 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1355 | if (!access_ok((void __user *)vaddr, 4)) { | ||
1356 | current->thread.cp0_baduaddr = vaddr; | ||
1357 | err = SIGSEGV; | ||
1358 | break; | ||
1359 | } | ||
1360 | __asm__ __volatile__( | ||
1361 | " .set push\n" | ||
1362 | " .set reorder\n" | ||
1363 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1364 | EXT "%1, %0, 24, 8\n" | ||
1365 | "1:" SB "%1, 0(%2)\n" | ||
1366 | " andi %1, %2, 0x3\n" | ||
1367 | " beq $0, %1, 9f\n" | ||
1368 | ADDIU "%2, %2, -1\n" | ||
1369 | EXT "%1, %0, 16, 8\n" | ||
1370 | "2:" SB "%1, 0(%2)\n" | ||
1371 | " andi %1, %2, 0x3\n" | ||
1372 | " beq $0, %1, 9f\n" | ||
1373 | ADDIU "%2, %2, -1\n" | ||
1374 | EXT "%1, %0, 8, 8\n" | ||
1375 | "3:" SB "%1, 0(%2)\n" | ||
1376 | " andi %1, %2, 0x3\n" | ||
1377 | " beq $0, %1, 9f\n" | ||
1378 | ADDIU "%2, %2, -1\n" | ||
1379 | EXT "%1, %0, 0, 8\n" | ||
1380 | "4:" SB "%1, 0(%2)\n" | ||
1381 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1382 | EXT "%1, %0, 24, 8\n" | ||
1383 | "1:" SB "%1, 0(%2)\n" | ||
1384 | ADDIU "%2, %2, 1\n" | ||
1385 | " andi %1, %2, 0x3\n" | ||
1386 | " beq $0, %1, 9f\n" | ||
1387 | EXT "%1, %0, 16, 8\n" | ||
1388 | "2:" SB "%1, 0(%2)\n" | ||
1389 | ADDIU "%2, %2, 1\n" | ||
1390 | " andi %1, %2, 0x3\n" | ||
1391 | " beq $0, %1, 9f\n" | ||
1392 | EXT "%1, %0, 8, 8\n" | ||
1393 | "3:" SB "%1, 0(%2)\n" | ||
1394 | ADDIU "%2, %2, 1\n" | ||
1395 | " andi %1, %2, 0x3\n" | ||
1396 | " beq $0, %1, 9f\n" | ||
1397 | EXT "%1, %0, 0, 8\n" | ||
1398 | "4:" SB "%1, 0(%2)\n" | ||
1399 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1400 | "9:\n" | ||
1401 | " .insn\n" | ||
1402 | " .section .fixup,\"ax\"\n" | ||
1403 | "8: li %3,%4\n" | ||
1404 | " j 9b\n" | ||
1405 | " .previous\n" | ||
1406 | " .section __ex_table,\"a\"\n" | ||
1407 | STR(PTR) " 1b,8b\n" | ||
1408 | STR(PTR) " 2b,8b\n" | ||
1409 | STR(PTR) " 3b,8b\n" | ||
1410 | STR(PTR) " 4b,8b\n" | ||
1411 | " .previous\n" | ||
1412 | " .set pop\n" | ||
1413 | : "+&r"(rt), "=&r"(rs), | ||
1414 | "+&r"(vaddr), "+&r"(err) | ||
1415 | : "i"(SIGSEGV) | ||
1416 | : "memory"); | ||
1417 | |||
1418 | MIPS_R2_STATS(stores); | ||
1419 | |||
1420 | break; | ||
1421 | |||
1422 | case swr_op: | ||
1423 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1424 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1425 | if (!access_ok((void __user *)vaddr, 4)) { | ||
1426 | current->thread.cp0_baduaddr = vaddr; | ||
1427 | err = SIGSEGV; | ||
1428 | break; | ||
1429 | } | ||
1430 | __asm__ __volatile__( | ||
1431 | " .set push\n" | ||
1432 | " .set reorder\n" | ||
1433 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1434 | EXT "%1, %0, 0, 8\n" | ||
1435 | "1:" SB "%1, 0(%2)\n" | ||
1436 | ADDIU "%2, %2, 1\n" | ||
1437 | " andi %1, %2, 0x3\n" | ||
1438 | " beq $0, %1, 9f\n" | ||
1439 | EXT "%1, %0, 8, 8\n" | ||
1440 | "2:" SB "%1, 0(%2)\n" | ||
1441 | ADDIU "%2, %2, 1\n" | ||
1442 | " andi %1, %2, 0x3\n" | ||
1443 | " beq $0, %1, 9f\n" | ||
1444 | EXT "%1, %0, 16, 8\n" | ||
1445 | "3:" SB "%1, 0(%2)\n" | ||
1446 | ADDIU "%2, %2, 1\n" | ||
1447 | " andi %1, %2, 0x3\n" | ||
1448 | " beq $0, %1, 9f\n" | ||
1449 | EXT "%1, %0, 24, 8\n" | ||
1450 | "4:" SB "%1, 0(%2)\n" | ||
1451 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1452 | EXT "%1, %0, 0, 8\n" | ||
1453 | "1:" SB "%1, 0(%2)\n" | ||
1454 | " andi %1, %2, 0x3\n" | ||
1455 | " beq $0, %1, 9f\n" | ||
1456 | ADDIU "%2, %2, -1\n" | ||
1457 | EXT "%1, %0, 8, 8\n" | ||
1458 | "2:" SB "%1, 0(%2)\n" | ||
1459 | " andi %1, %2, 0x3\n" | ||
1460 | " beq $0, %1, 9f\n" | ||
1461 | ADDIU "%2, %2, -1\n" | ||
1462 | EXT "%1, %0, 16, 8\n" | ||
1463 | "3:" SB "%1, 0(%2)\n" | ||
1464 | " andi %1, %2, 0x3\n" | ||
1465 | " beq $0, %1, 9f\n" | ||
1466 | ADDIU "%2, %2, -1\n" | ||
1467 | EXT "%1, %0, 24, 8\n" | ||
1468 | "4:" SB "%1, 0(%2)\n" | ||
1469 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1470 | "9:\n" | ||
1471 | " .insn\n" | ||
1472 | " .section .fixup,\"ax\"\n" | ||
1473 | "8: li %3,%4\n" | ||
1474 | " j 9b\n" | ||
1475 | " .previous\n" | ||
1476 | " .section __ex_table,\"a\"\n" | ||
1477 | STR(PTR) " 1b,8b\n" | ||
1478 | STR(PTR) " 2b,8b\n" | ||
1479 | STR(PTR) " 3b,8b\n" | ||
1480 | STR(PTR) " 4b,8b\n" | ||
1481 | " .previous\n" | ||
1482 | " .set pop\n" | ||
1483 | : "+&r"(rt), "=&r"(rs), | ||
1484 | "+&r"(vaddr), "+&r"(err) | ||
1485 | : "i"(SIGSEGV) | ||
1486 | : "memory"); | ||
1487 | |||
1488 | MIPS_R2_STATS(stores); | ||
1489 | |||
1490 | break; | ||
1491 | |||
1492 | case ldl_op: | ||
1493 | if (IS_ENABLED(CONFIG_32BIT)) { | ||
1494 | err = SIGILL; | ||
1495 | break; | ||
1496 | } | ||
1497 | |||
1498 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1499 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1500 | if (!access_ok((void __user *)vaddr, 8)) { | ||
1501 | current->thread.cp0_baduaddr = vaddr; | ||
1502 | err = SIGSEGV; | ||
1503 | break; | ||
1504 | } | ||
1505 | __asm__ __volatile__( | ||
1506 | " .set push\n" | ||
1507 | " .set reorder\n" | ||
1508 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1509 | "1: lb %1, 0(%2)\n" | ||
1510 | " dinsu %0, %1, 56, 8\n" | ||
1511 | " andi %1, %2, 0x7\n" | ||
1512 | " beq $0, %1, 9f\n" | ||
1513 | " daddiu %2, %2, -1\n" | ||
1514 | "2: lb %1, 0(%2)\n" | ||
1515 | " dinsu %0, %1, 48, 8\n" | ||
1516 | " andi %1, %2, 0x7\n" | ||
1517 | " beq $0, %1, 9f\n" | ||
1518 | " daddiu %2, %2, -1\n" | ||
1519 | "3: lb %1, 0(%2)\n" | ||
1520 | " dinsu %0, %1, 40, 8\n" | ||
1521 | " andi %1, %2, 0x7\n" | ||
1522 | " beq $0, %1, 9f\n" | ||
1523 | " daddiu %2, %2, -1\n" | ||
1524 | "4: lb %1, 0(%2)\n" | ||
1525 | " dinsu %0, %1, 32, 8\n" | ||
1526 | " andi %1, %2, 0x7\n" | ||
1527 | " beq $0, %1, 9f\n" | ||
1528 | " daddiu %2, %2, -1\n" | ||
1529 | "5: lb %1, 0(%2)\n" | ||
1530 | " dins %0, %1, 24, 8\n" | ||
1531 | " andi %1, %2, 0x7\n" | ||
1532 | " beq $0, %1, 9f\n" | ||
1533 | " daddiu %2, %2, -1\n" | ||
1534 | "6: lb %1, 0(%2)\n" | ||
1535 | " dins %0, %1, 16, 8\n" | ||
1536 | " andi %1, %2, 0x7\n" | ||
1537 | " beq $0, %1, 9f\n" | ||
1538 | " daddiu %2, %2, -1\n" | ||
1539 | "7: lb %1, 0(%2)\n" | ||
1540 | " dins %0, %1, 8, 8\n" | ||
1541 | " andi %1, %2, 0x7\n" | ||
1542 | " beq $0, %1, 9f\n" | ||
1543 | " daddiu %2, %2, -1\n" | ||
1544 | "0: lb %1, 0(%2)\n" | ||
1545 | " dins %0, %1, 0, 8\n" | ||
1546 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1547 | "1: lb %1, 0(%2)\n" | ||
1548 | " dinsu %0, %1, 56, 8\n" | ||
1549 | " daddiu %2, %2, 1\n" | ||
1550 | " andi %1, %2, 0x7\n" | ||
1551 | " beq $0, %1, 9f\n" | ||
1552 | "2: lb %1, 0(%2)\n" | ||
1553 | " dinsu %0, %1, 48, 8\n" | ||
1554 | " daddiu %2, %2, 1\n" | ||
1555 | " andi %1, %2, 0x7\n" | ||
1556 | " beq $0, %1, 9f\n" | ||
1557 | "3: lb %1, 0(%2)\n" | ||
1558 | " dinsu %0, %1, 40, 8\n" | ||
1559 | " daddiu %2, %2, 1\n" | ||
1560 | " andi %1, %2, 0x7\n" | ||
1561 | " beq $0, %1, 9f\n" | ||
1562 | "4: lb %1, 0(%2)\n" | ||
1563 | " dinsu %0, %1, 32, 8\n" | ||
1564 | " daddiu %2, %2, 1\n" | ||
1565 | " andi %1, %2, 0x7\n" | ||
1566 | " beq $0, %1, 9f\n" | ||
1567 | "5: lb %1, 0(%2)\n" | ||
1568 | " dins %0, %1, 24, 8\n" | ||
1569 | " daddiu %2, %2, 1\n" | ||
1570 | " andi %1, %2, 0x7\n" | ||
1571 | " beq $0, %1, 9f\n" | ||
1572 | "6: lb %1, 0(%2)\n" | ||
1573 | " dins %0, %1, 16, 8\n" | ||
1574 | " daddiu %2, %2, 1\n" | ||
1575 | " andi %1, %2, 0x7\n" | ||
1576 | " beq $0, %1, 9f\n" | ||
1577 | "7: lb %1, 0(%2)\n" | ||
1578 | " dins %0, %1, 8, 8\n" | ||
1579 | " daddiu %2, %2, 1\n" | ||
1580 | " andi %1, %2, 0x7\n" | ||
1581 | " beq $0, %1, 9f\n" | ||
1582 | "0: lb %1, 0(%2)\n" | ||
1583 | " dins %0, %1, 0, 8\n" | ||
1584 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1585 | "9:\n" | ||
1586 | " .insn\n" | ||
1587 | " .section .fixup,\"ax\"\n" | ||
1588 | "8: li %3,%4\n" | ||
1589 | " j 9b\n" | ||
1590 | " .previous\n" | ||
1591 | " .section __ex_table,\"a\"\n" | ||
1592 | STR(PTR) " 1b,8b\n" | ||
1593 | STR(PTR) " 2b,8b\n" | ||
1594 | STR(PTR) " 3b,8b\n" | ||
1595 | STR(PTR) " 4b,8b\n" | ||
1596 | STR(PTR) " 5b,8b\n" | ||
1597 | STR(PTR) " 6b,8b\n" | ||
1598 | STR(PTR) " 7b,8b\n" | ||
1599 | STR(PTR) " 0b,8b\n" | ||
1600 | " .previous\n" | ||
1601 | " .set pop\n" | ||
1602 | : "+&r"(rt), "=&r"(rs), | ||
1603 | "+&r"(vaddr), "+&r"(err) | ||
1604 | : "i"(SIGSEGV)); | ||
1605 | if (MIPSInst_RT(inst) && !err) | ||
1606 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1607 | |||
1608 | MIPS_R2_STATS(loads); | ||
1609 | break; | ||
1610 | |||
1611 | case ldr_op: | ||
1612 | if (IS_ENABLED(CONFIG_32BIT)) { | ||
1613 | err = SIGILL; | ||
1614 | break; | ||
1615 | } | ||
1616 | |||
1617 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1618 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1619 | if (!access_ok((void __user *)vaddr, 8)) { | ||
1620 | current->thread.cp0_baduaddr = vaddr; | ||
1621 | err = SIGSEGV; | ||
1622 | break; | ||
1623 | } | ||
1624 | __asm__ __volatile__( | ||
1625 | " .set push\n" | ||
1626 | " .set reorder\n" | ||
1627 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1628 | "1: lb %1, 0(%2)\n" | ||
1629 | " dins %0, %1, 0, 8\n" | ||
1630 | " daddiu %2, %2, 1\n" | ||
1631 | " andi %1, %2, 0x7\n" | ||
1632 | " beq $0, %1, 9f\n" | ||
1633 | "2: lb %1, 0(%2)\n" | ||
1634 | " dins %0, %1, 8, 8\n" | ||
1635 | " daddiu %2, %2, 1\n" | ||
1636 | " andi %1, %2, 0x7\n" | ||
1637 | " beq $0, %1, 9f\n" | ||
1638 | "3: lb %1, 0(%2)\n" | ||
1639 | " dins %0, %1, 16, 8\n" | ||
1640 | " daddiu %2, %2, 1\n" | ||
1641 | " andi %1, %2, 0x7\n" | ||
1642 | " beq $0, %1, 9f\n" | ||
1643 | "4: lb %1, 0(%2)\n" | ||
1644 | " dins %0, %1, 24, 8\n" | ||
1645 | " daddiu %2, %2, 1\n" | ||
1646 | " andi %1, %2, 0x7\n" | ||
1647 | " beq $0, %1, 9f\n" | ||
1648 | "5: lb %1, 0(%2)\n" | ||
1649 | " dinsu %0, %1, 32, 8\n" | ||
1650 | " daddiu %2, %2, 1\n" | ||
1651 | " andi %1, %2, 0x7\n" | ||
1652 | " beq $0, %1, 9f\n" | ||
1653 | "6: lb %1, 0(%2)\n" | ||
1654 | " dinsu %0, %1, 40, 8\n" | ||
1655 | " daddiu %2, %2, 1\n" | ||
1656 | " andi %1, %2, 0x7\n" | ||
1657 | " beq $0, %1, 9f\n" | ||
1658 | "7: lb %1, 0(%2)\n" | ||
1659 | " dinsu %0, %1, 48, 8\n" | ||
1660 | " daddiu %2, %2, 1\n" | ||
1661 | " andi %1, %2, 0x7\n" | ||
1662 | " beq $0, %1, 9f\n" | ||
1663 | "0: lb %1, 0(%2)\n" | ||
1664 | " dinsu %0, %1, 56, 8\n" | ||
1665 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1666 | "1: lb %1, 0(%2)\n" | ||
1667 | " dins %0, %1, 0, 8\n" | ||
1668 | " andi %1, %2, 0x7\n" | ||
1669 | " beq $0, %1, 9f\n" | ||
1670 | " daddiu %2, %2, -1\n" | ||
1671 | "2: lb %1, 0(%2)\n" | ||
1672 | " dins %0, %1, 8, 8\n" | ||
1673 | " andi %1, %2, 0x7\n" | ||
1674 | " beq $0, %1, 9f\n" | ||
1675 | " daddiu %2, %2, -1\n" | ||
1676 | "3: lb %1, 0(%2)\n" | ||
1677 | " dins %0, %1, 16, 8\n" | ||
1678 | " andi %1, %2, 0x7\n" | ||
1679 | " beq $0, %1, 9f\n" | ||
1680 | " daddiu %2, %2, -1\n" | ||
1681 | "4: lb %1, 0(%2)\n" | ||
1682 | " dins %0, %1, 24, 8\n" | ||
1683 | " andi %1, %2, 0x7\n" | ||
1684 | " beq $0, %1, 9f\n" | ||
1685 | " daddiu %2, %2, -1\n" | ||
1686 | "5: lb %1, 0(%2)\n" | ||
1687 | " dinsu %0, %1, 32, 8\n" | ||
1688 | " andi %1, %2, 0x7\n" | ||
1689 | " beq $0, %1, 9f\n" | ||
1690 | " daddiu %2, %2, -1\n" | ||
1691 | "6: lb %1, 0(%2)\n" | ||
1692 | " dinsu %0, %1, 40, 8\n" | ||
1693 | " andi %1, %2, 0x7\n" | ||
1694 | " beq $0, %1, 9f\n" | ||
1695 | " daddiu %2, %2, -1\n" | ||
1696 | "7: lb %1, 0(%2)\n" | ||
1697 | " dinsu %0, %1, 48, 8\n" | ||
1698 | " andi %1, %2, 0x7\n" | ||
1699 | " beq $0, %1, 9f\n" | ||
1700 | " daddiu %2, %2, -1\n" | ||
1701 | "0: lb %1, 0(%2)\n" | ||
1702 | " dinsu %0, %1, 56, 8\n" | ||
1703 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1704 | "9:\n" | ||
1705 | " .insn\n" | ||
1706 | " .section .fixup,\"ax\"\n" | ||
1707 | "8: li %3,%4\n" | ||
1708 | " j 9b\n" | ||
1709 | " .previous\n" | ||
1710 | " .section __ex_table,\"a\"\n" | ||
1711 | STR(PTR) " 1b,8b\n" | ||
1712 | STR(PTR) " 2b,8b\n" | ||
1713 | STR(PTR) " 3b,8b\n" | ||
1714 | STR(PTR) " 4b,8b\n" | ||
1715 | STR(PTR) " 5b,8b\n" | ||
1716 | STR(PTR) " 6b,8b\n" | ||
1717 | STR(PTR) " 7b,8b\n" | ||
1718 | STR(PTR) " 0b,8b\n" | ||
1719 | " .previous\n" | ||
1720 | " .set pop\n" | ||
1721 | : "+&r"(rt), "=&r"(rs), | ||
1722 | "+&r"(vaddr), "+&r"(err) | ||
1723 | : "i"(SIGSEGV)); | ||
1724 | if (MIPSInst_RT(inst) && !err) | ||
1725 | regs->regs[MIPSInst_RT(inst)] = rt; | ||
1726 | |||
1727 | MIPS_R2_STATS(loads); | ||
1728 | break; | ||
1729 | |||
1730 | case sdl_op: | ||
1731 | if (IS_ENABLED(CONFIG_32BIT)) { | ||
1732 | err = SIGILL; | ||
1733 | break; | ||
1734 | } | ||
1735 | |||
1736 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1737 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1738 | if (!access_ok((void __user *)vaddr, 8)) { | ||
1739 | current->thread.cp0_baduaddr = vaddr; | ||
1740 | err = SIGSEGV; | ||
1741 | break; | ||
1742 | } | ||
1743 | __asm__ __volatile__( | ||
1744 | " .set push\n" | ||
1745 | " .set reorder\n" | ||
1746 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1747 | " dextu %1, %0, 56, 8\n" | ||
1748 | "1: sb %1, 0(%2)\n" | ||
1749 | " andi %1, %2, 0x7\n" | ||
1750 | " beq $0, %1, 9f\n" | ||
1751 | " daddiu %2, %2, -1\n" | ||
1752 | " dextu %1, %0, 48, 8\n" | ||
1753 | "2: sb %1, 0(%2)\n" | ||
1754 | " andi %1, %2, 0x7\n" | ||
1755 | " beq $0, %1, 9f\n" | ||
1756 | " daddiu %2, %2, -1\n" | ||
1757 | " dextu %1, %0, 40, 8\n" | ||
1758 | "3: sb %1, 0(%2)\n" | ||
1759 | " andi %1, %2, 0x7\n" | ||
1760 | " beq $0, %1, 9f\n" | ||
1761 | " daddiu %2, %2, -1\n" | ||
1762 | " dextu %1, %0, 32, 8\n" | ||
1763 | "4: sb %1, 0(%2)\n" | ||
1764 | " andi %1, %2, 0x7\n" | ||
1765 | " beq $0, %1, 9f\n" | ||
1766 | " daddiu %2, %2, -1\n" | ||
1767 | " dext %1, %0, 24, 8\n" | ||
1768 | "5: sb %1, 0(%2)\n" | ||
1769 | " andi %1, %2, 0x7\n" | ||
1770 | " beq $0, %1, 9f\n" | ||
1771 | " daddiu %2, %2, -1\n" | ||
1772 | " dext %1, %0, 16, 8\n" | ||
1773 | "6: sb %1, 0(%2)\n" | ||
1774 | " andi %1, %2, 0x7\n" | ||
1775 | " beq $0, %1, 9f\n" | ||
1776 | " daddiu %2, %2, -1\n" | ||
1777 | " dext %1, %0, 8, 8\n" | ||
1778 | "7: sb %1, 0(%2)\n" | ||
1779 | " andi %1, %2, 0x7\n" | ||
1780 | " beq $0, %1, 9f\n" | ||
1781 | " daddiu %2, %2, -1\n" | ||
1782 | " dext %1, %0, 0, 8\n" | ||
1783 | "0: sb %1, 0(%2)\n" | ||
1784 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1785 | " dextu %1, %0, 56, 8\n" | ||
1786 | "1: sb %1, 0(%2)\n" | ||
1787 | " daddiu %2, %2, 1\n" | ||
1788 | " andi %1, %2, 0x7\n" | ||
1789 | " beq $0, %1, 9f\n" | ||
1790 | " dextu %1, %0, 48, 8\n" | ||
1791 | "2: sb %1, 0(%2)\n" | ||
1792 | " daddiu %2, %2, 1\n" | ||
1793 | " andi %1, %2, 0x7\n" | ||
1794 | " beq $0, %1, 9f\n" | ||
1795 | " dextu %1, %0, 40, 8\n" | ||
1796 | "3: sb %1, 0(%2)\n" | ||
1797 | " daddiu %2, %2, 1\n" | ||
1798 | " andi %1, %2, 0x7\n" | ||
1799 | " beq $0, %1, 9f\n" | ||
1800 | " dextu %1, %0, 32, 8\n" | ||
1801 | "4: sb %1, 0(%2)\n" | ||
1802 | " daddiu %2, %2, 1\n" | ||
1803 | " andi %1, %2, 0x7\n" | ||
1804 | " beq $0, %1, 9f\n" | ||
1805 | " dext %1, %0, 24, 8\n" | ||
1806 | "5: sb %1, 0(%2)\n" | ||
1807 | " daddiu %2, %2, 1\n" | ||
1808 | " andi %1, %2, 0x7\n" | ||
1809 | " beq $0, %1, 9f\n" | ||
1810 | " dext %1, %0, 16, 8\n" | ||
1811 | "6: sb %1, 0(%2)\n" | ||
1812 | " daddiu %2, %2, 1\n" | ||
1813 | " andi %1, %2, 0x7\n" | ||
1814 | " beq $0, %1, 9f\n" | ||
1815 | " dext %1, %0, 8, 8\n" | ||
1816 | "7: sb %1, 0(%2)\n" | ||
1817 | " daddiu %2, %2, 1\n" | ||
1818 | " andi %1, %2, 0x7\n" | ||
1819 | " beq $0, %1, 9f\n" | ||
1820 | " dext %1, %0, 0, 8\n" | ||
1821 | "0: sb %1, 0(%2)\n" | ||
1822 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1823 | "9:\n" | ||
1824 | " .insn\n" | ||
1825 | " .section .fixup,\"ax\"\n" | ||
1826 | "8: li %3,%4\n" | ||
1827 | " j 9b\n" | ||
1828 | " .previous\n" | ||
1829 | " .section __ex_table,\"a\"\n" | ||
1830 | STR(PTR) " 1b,8b\n" | ||
1831 | STR(PTR) " 2b,8b\n" | ||
1832 | STR(PTR) " 3b,8b\n" | ||
1833 | STR(PTR) " 4b,8b\n" | ||
1834 | STR(PTR) " 5b,8b\n" | ||
1835 | STR(PTR) " 6b,8b\n" | ||
1836 | STR(PTR) " 7b,8b\n" | ||
1837 | STR(PTR) " 0b,8b\n" | ||
1838 | " .previous\n" | ||
1839 | " .set pop\n" | ||
1840 | : "+&r"(rt), "=&r"(rs), | ||
1841 | "+&r"(vaddr), "+&r"(err) | ||
1842 | : "i"(SIGSEGV) | ||
1843 | : "memory"); | ||
1844 | |||
1845 | MIPS_R2_STATS(stores); | ||
1846 | break; | ||
1847 | |||
1848 | case sdr_op: | ||
1849 | if (IS_ENABLED(CONFIG_32BIT)) { | ||
1850 | err = SIGILL; | ||
1851 | break; | ||
1852 | } | ||
1853 | |||
1854 | rt = regs->regs[MIPSInst_RT(inst)]; | ||
1855 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1856 | if (!access_ok((void __user *)vaddr, 8)) { | ||
1857 | current->thread.cp0_baduaddr = vaddr; | ||
1858 | err = SIGSEGV; | ||
1859 | break; | ||
1860 | } | ||
1861 | __asm__ __volatile__( | ||
1862 | " .set push\n" | ||
1863 | " .set reorder\n" | ||
1864 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
1865 | " dext %1, %0, 0, 8\n" | ||
1866 | "1: sb %1, 0(%2)\n" | ||
1867 | " daddiu %2, %2, 1\n" | ||
1868 | " andi %1, %2, 0x7\n" | ||
1869 | " beq $0, %1, 9f\n" | ||
1870 | " dext %1, %0, 8, 8\n" | ||
1871 | "2: sb %1, 0(%2)\n" | ||
1872 | " daddiu %2, %2, 1\n" | ||
1873 | " andi %1, %2, 0x7\n" | ||
1874 | " beq $0, %1, 9f\n" | ||
1875 | " dext %1, %0, 16, 8\n" | ||
1876 | "3: sb %1, 0(%2)\n" | ||
1877 | " daddiu %2, %2, 1\n" | ||
1878 | " andi %1, %2, 0x7\n" | ||
1879 | " beq $0, %1, 9f\n" | ||
1880 | " dext %1, %0, 24, 8\n" | ||
1881 | "4: sb %1, 0(%2)\n" | ||
1882 | " daddiu %2, %2, 1\n" | ||
1883 | " andi %1, %2, 0x7\n" | ||
1884 | " beq $0, %1, 9f\n" | ||
1885 | " dextu %1, %0, 32, 8\n" | ||
1886 | "5: sb %1, 0(%2)\n" | ||
1887 | " daddiu %2, %2, 1\n" | ||
1888 | " andi %1, %2, 0x7\n" | ||
1889 | " beq $0, %1, 9f\n" | ||
1890 | " dextu %1, %0, 40, 8\n" | ||
1891 | "6: sb %1, 0(%2)\n" | ||
1892 | " daddiu %2, %2, 1\n" | ||
1893 | " andi %1, %2, 0x7\n" | ||
1894 | " beq $0, %1, 9f\n" | ||
1895 | " dextu %1, %0, 48, 8\n" | ||
1896 | "7: sb %1, 0(%2)\n" | ||
1897 | " daddiu %2, %2, 1\n" | ||
1898 | " andi %1, %2, 0x7\n" | ||
1899 | " beq $0, %1, 9f\n" | ||
1900 | " dextu %1, %0, 56, 8\n" | ||
1901 | "0: sb %1, 0(%2)\n" | ||
1902 | #else /* !CONFIG_CPU_LITTLE_ENDIAN */ | ||
1903 | " dext %1, %0, 0, 8\n" | ||
1904 | "1: sb %1, 0(%2)\n" | ||
1905 | " andi %1, %2, 0x7\n" | ||
1906 | " beq $0, %1, 9f\n" | ||
1907 | " daddiu %2, %2, -1\n" | ||
1908 | " dext %1, %0, 8, 8\n" | ||
1909 | "2: sb %1, 0(%2)\n" | ||
1910 | " andi %1, %2, 0x7\n" | ||
1911 | " beq $0, %1, 9f\n" | ||
1912 | " daddiu %2, %2, -1\n" | ||
1913 | " dext %1, %0, 16, 8\n" | ||
1914 | "3: sb %1, 0(%2)\n" | ||
1915 | " andi %1, %2, 0x7\n" | ||
1916 | " beq $0, %1, 9f\n" | ||
1917 | " daddiu %2, %2, -1\n" | ||
1918 | " dext %1, %0, 24, 8\n" | ||
1919 | "4: sb %1, 0(%2)\n" | ||
1920 | " andi %1, %2, 0x7\n" | ||
1921 | " beq $0, %1, 9f\n" | ||
1922 | " daddiu %2, %2, -1\n" | ||
1923 | " dextu %1, %0, 32, 8\n" | ||
1924 | "5: sb %1, 0(%2)\n" | ||
1925 | " andi %1, %2, 0x7\n" | ||
1926 | " beq $0, %1, 9f\n" | ||
1927 | " daddiu %2, %2, -1\n" | ||
1928 | " dextu %1, %0, 40, 8\n" | ||
1929 | "6: sb %1, 0(%2)\n" | ||
1930 | " andi %1, %2, 0x7\n" | ||
1931 | " beq $0, %1, 9f\n" | ||
1932 | " daddiu %2, %2, -1\n" | ||
1933 | " dextu %1, %0, 48, 8\n" | ||
1934 | "7: sb %1, 0(%2)\n" | ||
1935 | " andi %1, %2, 0x7\n" | ||
1936 | " beq $0, %1, 9f\n" | ||
1937 | " daddiu %2, %2, -1\n" | ||
1938 | " dextu %1, %0, 56, 8\n" | ||
1939 | "0: sb %1, 0(%2)\n" | ||
1940 | #endif /* CONFIG_CPU_LITTLE_ENDIAN */ | ||
1941 | "9:\n" | ||
1942 | " .insn\n" | ||
1943 | " .section .fixup,\"ax\"\n" | ||
1944 | "8: li %3,%4\n" | ||
1945 | " j 9b\n" | ||
1946 | " .previous\n" | ||
1947 | " .section __ex_table,\"a\"\n" | ||
1948 | STR(PTR) " 1b,8b\n" | ||
1949 | STR(PTR) " 2b,8b\n" | ||
1950 | STR(PTR) " 3b,8b\n" | ||
1951 | STR(PTR) " 4b,8b\n" | ||
1952 | STR(PTR) " 5b,8b\n" | ||
1953 | STR(PTR) " 6b,8b\n" | ||
1954 | STR(PTR) " 7b,8b\n" | ||
1955 | STR(PTR) " 0b,8b\n" | ||
1956 | " .previous\n" | ||
1957 | " .set pop\n" | ||
1958 | : "+&r"(rt), "=&r"(rs), | ||
1959 | "+&r"(vaddr), "+&r"(err) | ||
1960 | : "i"(SIGSEGV) | ||
1961 | : "memory"); | ||
1962 | |||
1963 | MIPS_R2_STATS(stores); | ||
1964 | |||
1965 | break; | ||
1966 | case ll_op: | ||
1967 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
1968 | if (vaddr & 0x3) { | ||
1969 | current->thread.cp0_baduaddr = vaddr; | ||
1970 | err = SIGBUS; | ||
1971 | break; | ||
1972 | } | ||
1973 | if (!access_ok((void __user *)vaddr, 4)) { | ||
1974 | current->thread.cp0_baduaddr = vaddr; | ||
1975 | err = SIGBUS; | ||
1976 | break; | ||
1977 | } | ||
1978 | |||
1979 | if (!cpu_has_rw_llb) { | ||
1980 | /* | ||
1981 | * An LL/SC block can't be safely emulated without | ||
1982 | * a Config5/LLB availability. So it's probably time to | ||
1983 | * kill our process before things get any worse. This is | ||
1984 | * because Config5/LLB allows us to use ERETNC so that | ||
1985 | * the LLAddr/LLB bit is not cleared when we return from | ||
1986 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
1987 | * RI exception so once we emulate them here, we return | ||
1988 | * back to userland with ERETNC. That preserves the | ||
1989 | * LLAddr/LLB so the subsequent SC instruction will | ||
1990 | * succeed preserving the atomic semantics of the LL/SC | ||
1991 | * block. Without that, there is no safe way to emulate | ||
1992 | * an LL/SC block in MIPSR2 userland. | ||
1993 | */ | ||
1994 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
1995 | err = SIGKILL; | ||
1996 | break; | ||
1997 | } | ||
1998 | |||
1999 | __asm__ __volatile__( | ||
2000 | "1:\n" | ||
2001 | "ll %0, 0(%2)\n" | ||
2002 | "2:\n" | ||
2003 | ".insn\n" | ||
2004 | ".section .fixup,\"ax\"\n" | ||
2005 | "3:\n" | ||
2006 | "li %1, %3\n" | ||
2007 | "j 2b\n" | ||
2008 | ".previous\n" | ||
2009 | ".section __ex_table,\"a\"\n" | ||
2010 | STR(PTR) " 1b,3b\n" | ||
2011 | ".previous\n" | ||
2012 | : "=&r"(res), "+&r"(err) | ||
2013 | : "r"(vaddr), "i"(SIGSEGV) | ||
2014 | : "memory"); | ||
2015 | |||
2016 | if (MIPSInst_RT(inst) && !err) | ||
2017 | regs->regs[MIPSInst_RT(inst)] = res; | ||
2018 | MIPS_R2_STATS(llsc); | ||
2019 | |||
2020 | break; | ||
2021 | |||
2022 | case sc_op: | ||
2023 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
2024 | if (vaddr & 0x3) { | ||
2025 | current->thread.cp0_baduaddr = vaddr; | ||
2026 | err = SIGBUS; | ||
2027 | break; | ||
2028 | } | ||
2029 | if (!access_ok((void __user *)vaddr, 4)) { | ||
2030 | current->thread.cp0_baduaddr = vaddr; | ||
2031 | err = SIGBUS; | ||
2032 | break; | ||
2033 | } | ||
2034 | |||
2035 | if (!cpu_has_rw_llb) { | ||
2036 | /* | ||
2037 | * An LL/SC block can't be safely emulated without | ||
2038 | * a Config5/LLB availability. So it's probably time to | ||
2039 | * kill our process before things get any worse. This is | ||
2040 | * because Config5/LLB allows us to use ERETNC so that | ||
2041 | * the LLAddr/LLB bit is not cleared when we return from | ||
2042 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
2043 | * RI exception so once we emulate them here, we return | ||
2044 | * back to userland with ERETNC. That preserves the | ||
2045 | * LLAddr/LLB so the subsequent SC instruction will | ||
2046 | * succeed preserving the atomic semantics of the LL/SC | ||
2047 | * block. Without that, there is no safe way to emulate | ||
2048 | * an LL/SC block in MIPSR2 userland. | ||
2049 | */ | ||
2050 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
2051 | err = SIGKILL; | ||
2052 | break; | ||
2053 | } | ||
2054 | |||
2055 | res = regs->regs[MIPSInst_RT(inst)]; | ||
2056 | |||
2057 | __asm__ __volatile__( | ||
2058 | "1:\n" | ||
2059 | "sc %0, 0(%2)\n" | ||
2060 | "2:\n" | ||
2061 | ".insn\n" | ||
2062 | ".section .fixup,\"ax\"\n" | ||
2063 | "3:\n" | ||
2064 | "li %1, %3\n" | ||
2065 | "j 2b\n" | ||
2066 | ".previous\n" | ||
2067 | ".section __ex_table,\"a\"\n" | ||
2068 | STR(PTR) " 1b,3b\n" | ||
2069 | ".previous\n" | ||
2070 | : "+&r"(res), "+&r"(err) | ||
2071 | : "r"(vaddr), "i"(SIGSEGV)); | ||
2072 | |||
2073 | if (MIPSInst_RT(inst) && !err) | ||
2074 | regs->regs[MIPSInst_RT(inst)] = res; | ||
2075 | |||
2076 | MIPS_R2_STATS(llsc); | ||
2077 | |||
2078 | break; | ||
2079 | |||
2080 | case lld_op: | ||
2081 | if (IS_ENABLED(CONFIG_32BIT)) { | ||
2082 | err = SIGILL; | ||
2083 | break; | ||
2084 | } | ||
2085 | |||
2086 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
2087 | if (vaddr & 0x7) { | ||
2088 | current->thread.cp0_baduaddr = vaddr; | ||
2089 | err = SIGBUS; | ||
2090 | break; | ||
2091 | } | ||
2092 | if (!access_ok((void __user *)vaddr, 8)) { | ||
2093 | current->thread.cp0_baduaddr = vaddr; | ||
2094 | err = SIGBUS; | ||
2095 | break; | ||
2096 | } | ||
2097 | |||
2098 | if (!cpu_has_rw_llb) { | ||
2099 | /* | ||
2100 | * An LL/SC block can't be safely emulated without | ||
2101 | * a Config5/LLB availability. So it's probably time to | ||
2102 | * kill our process before things get any worse. This is | ||
2103 | * because Config5/LLB allows us to use ERETNC so that | ||
2104 | * the LLAddr/LLB bit is not cleared when we return from | ||
2105 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
2106 | * RI exception so once we emulate them here, we return | ||
2107 | * back to userland with ERETNC. That preserves the | ||
2108 | * LLAddr/LLB so the subsequent SC instruction will | ||
2109 | * succeed preserving the atomic semantics of the LL/SC | ||
2110 | * block. Without that, there is no safe way to emulate | ||
2111 | * an LL/SC block in MIPSR2 userland. | ||
2112 | */ | ||
2113 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
2114 | err = SIGKILL; | ||
2115 | break; | ||
2116 | } | ||
2117 | |||
2118 | __asm__ __volatile__( | ||
2119 | "1:\n" | ||
2120 | "lld %0, 0(%2)\n" | ||
2121 | "2:\n" | ||
2122 | ".insn\n" | ||
2123 | ".section .fixup,\"ax\"\n" | ||
2124 | "3:\n" | ||
2125 | "li %1, %3\n" | ||
2126 | "j 2b\n" | ||
2127 | ".previous\n" | ||
2128 | ".section __ex_table,\"a\"\n" | ||
2129 | STR(PTR) " 1b,3b\n" | ||
2130 | ".previous\n" | ||
2131 | : "=&r"(res), "+&r"(err) | ||
2132 | : "r"(vaddr), "i"(SIGSEGV) | ||
2133 | : "memory"); | ||
2134 | if (MIPSInst_RT(inst) && !err) | ||
2135 | regs->regs[MIPSInst_RT(inst)] = res; | ||
2136 | |||
2137 | MIPS_R2_STATS(llsc); | ||
2138 | |||
2139 | break; | ||
2140 | |||
2141 | case scd_op: | ||
2142 | if (IS_ENABLED(CONFIG_32BIT)) { | ||
2143 | err = SIGILL; | ||
2144 | break; | ||
2145 | } | ||
2146 | |||
2147 | vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst); | ||
2148 | if (vaddr & 0x7) { | ||
2149 | current->thread.cp0_baduaddr = vaddr; | ||
2150 | err = SIGBUS; | ||
2151 | break; | ||
2152 | } | ||
2153 | if (!access_ok((void __user *)vaddr, 8)) { | ||
2154 | current->thread.cp0_baduaddr = vaddr; | ||
2155 | err = SIGBUS; | ||
2156 | break; | ||
2157 | } | ||
2158 | |||
2159 | if (!cpu_has_rw_llb) { | ||
2160 | /* | ||
2161 | * An LL/SC block can't be safely emulated without | ||
2162 | * a Config5/LLB availability. So it's probably time to | ||
2163 | * kill our process before things get any worse. This is | ||
2164 | * because Config5/LLB allows us to use ERETNC so that | ||
2165 | * the LLAddr/LLB bit is not cleared when we return from | ||
2166 | * an exception. MIPS R2 LL/SC instructions trap with an | ||
2167 | * RI exception so once we emulate them here, we return | ||
2168 | * back to userland with ERETNC. That preserves the | ||
2169 | * LLAddr/LLB so the subsequent SC instruction will | ||
2170 | * succeed preserving the atomic semantics of the LL/SC | ||
2171 | * block. Without that, there is no safe way to emulate | ||
2172 | * an LL/SC block in MIPSR2 userland. | ||
2173 | */ | ||
2174 | pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n"); | ||
2175 | err = SIGKILL; | ||
2176 | break; | ||
2177 | } | ||
2178 | |||
2179 | res = regs->regs[MIPSInst_RT(inst)]; | ||
2180 | |||
2181 | __asm__ __volatile__( | ||
2182 | "1:\n" | ||
2183 | "scd %0, 0(%2)\n" | ||
2184 | "2:\n" | ||
2185 | ".insn\n" | ||
2186 | ".section .fixup,\"ax\"\n" | ||
2187 | "3:\n" | ||
2188 | "li %1, %3\n" | ||
2189 | "j 2b\n" | ||
2190 | ".previous\n" | ||
2191 | ".section __ex_table,\"a\"\n" | ||
2192 | STR(PTR) " 1b,3b\n" | ||
2193 | ".previous\n" | ||
2194 | : "+&r"(res), "+&r"(err) | ||
2195 | : "r"(vaddr), "i"(SIGSEGV)); | ||
2196 | |||
2197 | if (MIPSInst_RT(inst) && !err) | ||
2198 | regs->regs[MIPSInst_RT(inst)] = res; | ||
2199 | |||
2200 | MIPS_R2_STATS(llsc); | ||
2201 | |||
2202 | break; | ||
2203 | case pref_op: | ||
2204 | /* skip it */ | ||
2205 | break; | ||
2206 | default: | ||
2207 | err = SIGILL; | ||
2208 | } | ||
2209 | |||
2210 | /* | ||
2211 | * Let's not return to userland just yet. It's costly and | ||
2212 | * it's likely we have more R2 instructions to emulate | ||
2213 | */ | ||
2214 | if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) { | ||
2215 | regs->cp0_cause &= ~CAUSEF_BD; | ||
2216 | err = get_user(inst, (u32 __user *)regs->cp0_epc); | ||
2217 | if (!err) | ||
2218 | goto repeat; | ||
2219 | |||
2220 | if (err < 0) | ||
2221 | err = SIGSEGV; | ||
2222 | } | ||
2223 | |||
2224 | if (err && (err != SIGEMT)) { | ||
2225 | regs->regs[31] = r31; | ||
2226 | regs->cp0_epc = epc; | ||
2227 | } | ||
2228 | |||
2229 | /* Likely a MIPS R6 compatible instruction */ | ||
2230 | if (pass && (err == SIGILL)) | ||
2231 | err = 0; | ||
2232 | |||
2233 | return err; | ||
2234 | } | ||
2235 | |||
2236 | #ifdef CONFIG_DEBUG_FS | ||
2237 | |||
2238 | static int mipsr2_emul_show(struct seq_file *s, void *unused) | ||
2239 | { | ||
2240 | |||
2241 | seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n"); | ||
2242 | seq_printf(s, "movs\t\t%ld\t%ld\n", | ||
2243 | (unsigned long)__this_cpu_read(mipsr2emustats.movs), | ||
2244 | (unsigned long)__this_cpu_read(mipsr2bdemustats.movs)); | ||
2245 | seq_printf(s, "hilo\t\t%ld\t%ld\n", | ||
2246 | (unsigned long)__this_cpu_read(mipsr2emustats.hilo), | ||
2247 | (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo)); | ||
2248 | seq_printf(s, "muls\t\t%ld\t%ld\n", | ||
2249 | (unsigned long)__this_cpu_read(mipsr2emustats.muls), | ||
2250 | (unsigned long)__this_cpu_read(mipsr2bdemustats.muls)); | ||
2251 | seq_printf(s, "divs\t\t%ld\t%ld\n", | ||
2252 | (unsigned long)__this_cpu_read(mipsr2emustats.divs), | ||
2253 | (unsigned long)__this_cpu_read(mipsr2bdemustats.divs)); | ||
2254 | seq_printf(s, "dsps\t\t%ld\t%ld\n", | ||
2255 | (unsigned long)__this_cpu_read(mipsr2emustats.dsps), | ||
2256 | (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps)); | ||
2257 | seq_printf(s, "bops\t\t%ld\t%ld\n", | ||
2258 | (unsigned long)__this_cpu_read(mipsr2emustats.bops), | ||
2259 | (unsigned long)__this_cpu_read(mipsr2bdemustats.bops)); | ||
2260 | seq_printf(s, "traps\t\t%ld\t%ld\n", | ||
2261 | (unsigned long)__this_cpu_read(mipsr2emustats.traps), | ||
2262 | (unsigned long)__this_cpu_read(mipsr2bdemustats.traps)); | ||
2263 | seq_printf(s, "fpus\t\t%ld\t%ld\n", | ||
2264 | (unsigned long)__this_cpu_read(mipsr2emustats.fpus), | ||
2265 | (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus)); | ||
2266 | seq_printf(s, "loads\t\t%ld\t%ld\n", | ||
2267 | (unsigned long)__this_cpu_read(mipsr2emustats.loads), | ||
2268 | (unsigned long)__this_cpu_read(mipsr2bdemustats.loads)); | ||
2269 | seq_printf(s, "stores\t\t%ld\t%ld\n", | ||
2270 | (unsigned long)__this_cpu_read(mipsr2emustats.stores), | ||
2271 | (unsigned long)__this_cpu_read(mipsr2bdemustats.stores)); | ||
2272 | seq_printf(s, "llsc\t\t%ld\t%ld\n", | ||
2273 | (unsigned long)__this_cpu_read(mipsr2emustats.llsc), | ||
2274 | (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc)); | ||
2275 | seq_printf(s, "dsemul\t\t%ld\t%ld\n", | ||
2276 | (unsigned long)__this_cpu_read(mipsr2emustats.dsemul), | ||
2277 | (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul)); | ||
2278 | seq_printf(s, "jr\t\t%ld\n", | ||
2279 | (unsigned long)__this_cpu_read(mipsr2bremustats.jrs)); | ||
2280 | seq_printf(s, "bltzl\t\t%ld\n", | ||
2281 | (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl)); | ||
2282 | seq_printf(s, "bgezl\t\t%ld\n", | ||
2283 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl)); | ||
2284 | seq_printf(s, "bltzll\t\t%ld\n", | ||
2285 | (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll)); | ||
2286 | seq_printf(s, "bgezll\t\t%ld\n", | ||
2287 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll)); | ||
2288 | seq_printf(s, "bltzal\t\t%ld\n", | ||
2289 | (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal)); | ||
2290 | seq_printf(s, "bgezal\t\t%ld\n", | ||
2291 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal)); | ||
2292 | seq_printf(s, "beql\t\t%ld\n", | ||
2293 | (unsigned long)__this_cpu_read(mipsr2bremustats.beql)); | ||
2294 | seq_printf(s, "bnel\t\t%ld\n", | ||
2295 | (unsigned long)__this_cpu_read(mipsr2bremustats.bnel)); | ||
2296 | seq_printf(s, "blezl\t\t%ld\n", | ||
2297 | (unsigned long)__this_cpu_read(mipsr2bremustats.blezl)); | ||
2298 | seq_printf(s, "bgtzl\t\t%ld\n", | ||
2299 | (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl)); | ||
2300 | |||
2301 | return 0; | ||
2302 | } | ||
2303 | |||
2304 | static int mipsr2_clear_show(struct seq_file *s, void *unused) | ||
2305 | { | ||
2306 | mipsr2_emul_show(s, unused); | ||
2307 | |||
2308 | __this_cpu_write((mipsr2emustats).movs, 0); | ||
2309 | __this_cpu_write((mipsr2bdemustats).movs, 0); | ||
2310 | __this_cpu_write((mipsr2emustats).hilo, 0); | ||
2311 | __this_cpu_write((mipsr2bdemustats).hilo, 0); | ||
2312 | __this_cpu_write((mipsr2emustats).muls, 0); | ||
2313 | __this_cpu_write((mipsr2bdemustats).muls, 0); | ||
2314 | __this_cpu_write((mipsr2emustats).divs, 0); | ||
2315 | __this_cpu_write((mipsr2bdemustats).divs, 0); | ||
2316 | __this_cpu_write((mipsr2emustats).dsps, 0); | ||
2317 | __this_cpu_write((mipsr2bdemustats).dsps, 0); | ||
2318 | __this_cpu_write((mipsr2emustats).bops, 0); | ||
2319 | __this_cpu_write((mipsr2bdemustats).bops, 0); | ||
2320 | __this_cpu_write((mipsr2emustats).traps, 0); | ||
2321 | __this_cpu_write((mipsr2bdemustats).traps, 0); | ||
2322 | __this_cpu_write((mipsr2emustats).fpus, 0); | ||
2323 | __this_cpu_write((mipsr2bdemustats).fpus, 0); | ||
2324 | __this_cpu_write((mipsr2emustats).loads, 0); | ||
2325 | __this_cpu_write((mipsr2bdemustats).loads, 0); | ||
2326 | __this_cpu_write((mipsr2emustats).stores, 0); | ||
2327 | __this_cpu_write((mipsr2bdemustats).stores, 0); | ||
2328 | __this_cpu_write((mipsr2emustats).llsc, 0); | ||
2329 | __this_cpu_write((mipsr2bdemustats).llsc, 0); | ||
2330 | __this_cpu_write((mipsr2emustats).dsemul, 0); | ||
2331 | __this_cpu_write((mipsr2bdemustats).dsemul, 0); | ||
2332 | __this_cpu_write((mipsr2bremustats).jrs, 0); | ||
2333 | __this_cpu_write((mipsr2bremustats).bltzl, 0); | ||
2334 | __this_cpu_write((mipsr2bremustats).bgezl, 0); | ||
2335 | __this_cpu_write((mipsr2bremustats).bltzll, 0); | ||
2336 | __this_cpu_write((mipsr2bremustats).bgezll, 0); | ||
2337 | __this_cpu_write((mipsr2bremustats).bltzall, 0); | ||
2338 | __this_cpu_write((mipsr2bremustats).bgezall, 0); | ||
2339 | __this_cpu_write((mipsr2bremustats).bltzal, 0); | ||
2340 | __this_cpu_write((mipsr2bremustats).bgezal, 0); | ||
2341 | __this_cpu_write((mipsr2bremustats).beql, 0); | ||
2342 | __this_cpu_write((mipsr2bremustats).bnel, 0); | ||
2343 | __this_cpu_write((mipsr2bremustats).blezl, 0); | ||
2344 | __this_cpu_write((mipsr2bremustats).bgtzl, 0); | ||
2345 | |||
2346 | return 0; | ||
2347 | } | ||
2348 | |||
2349 | DEFINE_SHOW_ATTRIBUTE(mipsr2_emul); | ||
2350 | DEFINE_SHOW_ATTRIBUTE(mipsr2_clear); | ||
2351 | |||
2352 | static int __init mipsr2_init_debugfs(void) | ||
2353 | { | ||
2354 | debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL, | ||
2355 | &mipsr2_emul_fops); | ||
2356 | debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir, | ||
2357 | NULL, &mipsr2_clear_fops); | ||
2358 | return 0; | ||
2359 | } | ||
2360 | |||
2361 | device_initcall(mipsr2_init_debugfs); | ||
2362 | |||
2363 | #endif /* CONFIG_DEBUG_FS */ | ||
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c new file mode 100644 index 000000000..3c0c3d126 --- /dev/null +++ b/arch/mips/kernel/module.c | |||
@@ -0,0 +1,457 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (C) 2001 Rusty Russell. | ||
5 | * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org) | ||
6 | * Copyright (C) 2005 Thiemo Seufer | ||
7 | */ | ||
8 | |||
9 | #undef DEBUG | ||
10 | |||
11 | #include <linux/extable.h> | ||
12 | #include <linux/moduleloader.h> | ||
13 | #include <linux/elf.h> | ||
14 | #include <linux/mm.h> | ||
15 | #include <linux/numa.h> | ||
16 | #include <linux/vmalloc.h> | ||
17 | #include <linux/slab.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/string.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/spinlock.h> | ||
22 | #include <linux/jump_label.h> | ||
23 | |||
24 | |||
25 | struct mips_hi16 { | ||
26 | struct mips_hi16 *next; | ||
27 | Elf_Addr *addr; | ||
28 | Elf_Addr value; | ||
29 | }; | ||
30 | |||
31 | static LIST_HEAD(dbe_list); | ||
32 | static DEFINE_SPINLOCK(dbe_lock); | ||
33 | |||
34 | #ifdef MODULE_START | ||
35 | void *module_alloc(unsigned long size) | ||
36 | { | ||
37 | return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END, | ||
38 | GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, | ||
39 | __builtin_return_address(0)); | ||
40 | } | ||
41 | #endif | ||
42 | |||
43 | static int apply_r_mips_none(struct module *me, u32 *location, | ||
44 | u32 base, Elf_Addr v, bool rela) | ||
45 | { | ||
46 | return 0; | ||
47 | } | ||
48 | |||
49 | static int apply_r_mips_32(struct module *me, u32 *location, | ||
50 | u32 base, Elf_Addr v, bool rela) | ||
51 | { | ||
52 | *location = base + v; | ||
53 | |||
54 | return 0; | ||
55 | } | ||
56 | |||
57 | static int apply_r_mips_26(struct module *me, u32 *location, | ||
58 | u32 base, Elf_Addr v, bool rela) | ||
59 | { | ||
60 | if (v % 4) { | ||
61 | pr_err("module %s: dangerous R_MIPS_26 relocation\n", | ||
62 | me->name); | ||
63 | return -ENOEXEC; | ||
64 | } | ||
65 | |||
66 | if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
67 | pr_err("module %s: relocation overflow\n", | ||
68 | me->name); | ||
69 | return -ENOEXEC; | ||
70 | } | ||
71 | |||
72 | *location = (*location & ~0x03ffffff) | | ||
73 | ((base + (v >> 2)) & 0x03ffffff); | ||
74 | |||
75 | return 0; | ||
76 | } | ||
77 | |||
78 | static int apply_r_mips_hi16(struct module *me, u32 *location, | ||
79 | u32 base, Elf_Addr v, bool rela) | ||
80 | { | ||
81 | struct mips_hi16 *n; | ||
82 | |||
83 | if (rela) { | ||
84 | *location = (*location & 0xffff0000) | | ||
85 | ((((long long) v + 0x8000LL) >> 16) & 0xffff); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * We cannot relocate this one now because we don't know the value of | ||
91 | * the carry we need to add. Save the information, and let LO16 do the | ||
92 | * actual relocation. | ||
93 | */ | ||
94 | n = kmalloc(sizeof *n, GFP_KERNEL); | ||
95 | if (!n) | ||
96 | return -ENOMEM; | ||
97 | |||
98 | n->addr = (Elf_Addr *)location; | ||
99 | n->value = v; | ||
100 | n->next = me->arch.r_mips_hi16_list; | ||
101 | me->arch.r_mips_hi16_list = n; | ||
102 | |||
103 | return 0; | ||
104 | } | ||
105 | |||
106 | static void free_relocation_chain(struct mips_hi16 *l) | ||
107 | { | ||
108 | struct mips_hi16 *next; | ||
109 | |||
110 | while (l) { | ||
111 | next = l->next; | ||
112 | kfree(l); | ||
113 | l = next; | ||
114 | } | ||
115 | } | ||
116 | |||
117 | static int apply_r_mips_lo16(struct module *me, u32 *location, | ||
118 | u32 base, Elf_Addr v, bool rela) | ||
119 | { | ||
120 | unsigned long insnlo = base; | ||
121 | struct mips_hi16 *l; | ||
122 | Elf_Addr val, vallo; | ||
123 | |||
124 | if (rela) { | ||
125 | *location = (*location & 0xffff0000) | (v & 0xffff); | ||
126 | return 0; | ||
127 | } | ||
128 | |||
129 | /* Sign extend the addend we extract from the lo insn. */ | ||
130 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | ||
131 | |||
132 | if (me->arch.r_mips_hi16_list != NULL) { | ||
133 | l = me->arch.r_mips_hi16_list; | ||
134 | while (l != NULL) { | ||
135 | struct mips_hi16 *next; | ||
136 | unsigned long insn; | ||
137 | |||
138 | /* | ||
139 | * The value for the HI16 had best be the same. | ||
140 | */ | ||
141 | if (v != l->value) | ||
142 | goto out_danger; | ||
143 | |||
144 | /* | ||
145 | * Do the HI16 relocation. Note that we actually don't | ||
146 | * need to know anything about the LO16 itself, except | ||
147 | * where to find the low 16 bits of the addend needed | ||
148 | * by the LO16. | ||
149 | */ | ||
150 | insn = *l->addr; | ||
151 | val = ((insn & 0xffff) << 16) + vallo; | ||
152 | val += v; | ||
153 | |||
154 | /* | ||
155 | * Account for the sign extension that will happen in | ||
156 | * the low bits. | ||
157 | */ | ||
158 | val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; | ||
159 | |||
160 | insn = (insn & ~0xffff) | val; | ||
161 | *l->addr = insn; | ||
162 | |||
163 | next = l->next; | ||
164 | kfree(l); | ||
165 | l = next; | ||
166 | } | ||
167 | |||
168 | me->arch.r_mips_hi16_list = NULL; | ||
169 | } | ||
170 | |||
171 | /* | ||
172 | * Ok, we're done with the HI16 relocs. Now deal with the LO16. | ||
173 | */ | ||
174 | val = v + vallo; | ||
175 | insnlo = (insnlo & ~0xffff) | (val & 0xffff); | ||
176 | *location = insnlo; | ||
177 | |||
178 | return 0; | ||
179 | |||
180 | out_danger: | ||
181 | free_relocation_chain(l); | ||
182 | me->arch.r_mips_hi16_list = NULL; | ||
183 | |||
184 | pr_err("module %s: dangerous R_MIPS_LO16 relocation\n", me->name); | ||
185 | |||
186 | return -ENOEXEC; | ||
187 | } | ||
188 | |||
189 | static int apply_r_mips_pc(struct module *me, u32 *location, u32 base, | ||
190 | Elf_Addr v, unsigned int bits) | ||
191 | { | ||
192 | unsigned long mask = GENMASK(bits - 1, 0); | ||
193 | unsigned long se_bits; | ||
194 | long offset; | ||
195 | |||
196 | if (v % 4) { | ||
197 | pr_err("module %s: dangerous R_MIPS_PC%u relocation\n", | ||
198 | me->name, bits); | ||
199 | return -ENOEXEC; | ||
200 | } | ||
201 | |||
202 | /* retrieve & sign extend implicit addend if any */ | ||
203 | offset = base & mask; | ||
204 | offset |= (offset & BIT(bits - 1)) ? ~mask : 0; | ||
205 | |||
206 | offset += ((long)v - (long)location) >> 2; | ||
207 | |||
208 | /* check the sign bit onwards are identical - ie. we didn't overflow */ | ||
209 | se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0; | ||
210 | if ((offset & ~mask) != (se_bits & ~mask)) { | ||
211 | pr_err("module %s: relocation overflow\n", me->name); | ||
212 | return -ENOEXEC; | ||
213 | } | ||
214 | |||
215 | *location = (*location & ~mask) | (offset & mask); | ||
216 | |||
217 | return 0; | ||
218 | } | ||
219 | |||
220 | static int apply_r_mips_pc16(struct module *me, u32 *location, | ||
221 | u32 base, Elf_Addr v, bool rela) | ||
222 | { | ||
223 | return apply_r_mips_pc(me, location, base, v, 16); | ||
224 | } | ||
225 | |||
226 | static int apply_r_mips_pc21(struct module *me, u32 *location, | ||
227 | u32 base, Elf_Addr v, bool rela) | ||
228 | { | ||
229 | return apply_r_mips_pc(me, location, base, v, 21); | ||
230 | } | ||
231 | |||
232 | static int apply_r_mips_pc26(struct module *me, u32 *location, | ||
233 | u32 base, Elf_Addr v, bool rela) | ||
234 | { | ||
235 | return apply_r_mips_pc(me, location, base, v, 26); | ||
236 | } | ||
237 | |||
238 | static int apply_r_mips_64(struct module *me, u32 *location, | ||
239 | u32 base, Elf_Addr v, bool rela) | ||
240 | { | ||
241 | if (WARN_ON(!rela)) | ||
242 | return -EINVAL; | ||
243 | |||
244 | *(Elf_Addr *)location = v; | ||
245 | |||
246 | return 0; | ||
247 | } | ||
248 | |||
249 | static int apply_r_mips_higher(struct module *me, u32 *location, | ||
250 | u32 base, Elf_Addr v, bool rela) | ||
251 | { | ||
252 | if (WARN_ON(!rela)) | ||
253 | return -EINVAL; | ||
254 | |||
255 | *location = (*location & 0xffff0000) | | ||
256 | ((((long long)v + 0x80008000LL) >> 32) & 0xffff); | ||
257 | |||
258 | return 0; | ||
259 | } | ||
260 | |||
261 | static int apply_r_mips_highest(struct module *me, u32 *location, | ||
262 | u32 base, Elf_Addr v, bool rela) | ||
263 | { | ||
264 | if (WARN_ON(!rela)) | ||
265 | return -EINVAL; | ||
266 | |||
267 | *location = (*location & 0xffff0000) | | ||
268 | ((((long long)v + 0x800080008000LL) >> 48) & 0xffff); | ||
269 | |||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | /** | ||
274 | * reloc_handler() - Apply a particular relocation to a module | ||
275 | * @me: the module to apply the reloc to | ||
276 | * @location: the address at which the reloc is to be applied | ||
277 | * @base: the existing value at location for REL-style; 0 for RELA-style | ||
278 | * @v: the value of the reloc, with addend for RELA-style | ||
279 | * | ||
280 | * Each implemented reloc_handler function applies a particular type of | ||
281 | * relocation to the module @me. Relocs that may be found in either REL or RELA | ||
282 | * variants can be handled by making use of the @base & @v parameters which are | ||
283 | * set to values which abstract the difference away from the particular reloc | ||
284 | * implementations. | ||
285 | * | ||
286 | * Return: 0 upon success, else -ERRNO | ||
287 | */ | ||
288 | typedef int (*reloc_handler)(struct module *me, u32 *location, | ||
289 | u32 base, Elf_Addr v, bool rela); | ||
290 | |||
291 | /* The handlers for known reloc types */ | ||
292 | static reloc_handler reloc_handlers[] = { | ||
293 | [R_MIPS_NONE] = apply_r_mips_none, | ||
294 | [R_MIPS_32] = apply_r_mips_32, | ||
295 | [R_MIPS_26] = apply_r_mips_26, | ||
296 | [R_MIPS_HI16] = apply_r_mips_hi16, | ||
297 | [R_MIPS_LO16] = apply_r_mips_lo16, | ||
298 | [R_MIPS_PC16] = apply_r_mips_pc16, | ||
299 | [R_MIPS_64] = apply_r_mips_64, | ||
300 | [R_MIPS_HIGHER] = apply_r_mips_higher, | ||
301 | [R_MIPS_HIGHEST] = apply_r_mips_highest, | ||
302 | [R_MIPS_PC21_S2] = apply_r_mips_pc21, | ||
303 | [R_MIPS_PC26_S2] = apply_r_mips_pc26, | ||
304 | }; | ||
305 | |||
306 | static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab, | ||
307 | unsigned int symindex, unsigned int relsec, | ||
308 | struct module *me, bool rela) | ||
309 | { | ||
310 | union { | ||
311 | Elf_Mips_Rel *rel; | ||
312 | Elf_Mips_Rela *rela; | ||
313 | } r; | ||
314 | reloc_handler handler; | ||
315 | Elf_Sym *sym; | ||
316 | u32 *location, base; | ||
317 | unsigned int i, type; | ||
318 | Elf_Addr v; | ||
319 | int err = 0; | ||
320 | size_t reloc_sz; | ||
321 | |||
322 | pr_debug("Applying relocate section %u to %u\n", relsec, | ||
323 | sechdrs[relsec].sh_info); | ||
324 | |||
325 | r.rel = (void *)sechdrs[relsec].sh_addr; | ||
326 | reloc_sz = rela ? sizeof(*r.rela) : sizeof(*r.rel); | ||
327 | me->arch.r_mips_hi16_list = NULL; | ||
328 | for (i = 0; i < sechdrs[relsec].sh_size / reloc_sz; i++) { | ||
329 | /* This is where to make the change */ | ||
330 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
331 | + r.rel->r_offset; | ||
332 | /* This is the symbol it is referring to */ | ||
333 | sym = (Elf_Sym *)sechdrs[symindex].sh_addr | ||
334 | + ELF_MIPS_R_SYM(*r.rel); | ||
335 | if (sym->st_value >= -MAX_ERRNO) { | ||
336 | /* Ignore unresolved weak symbol */ | ||
337 | if (ELF_ST_BIND(sym->st_info) == STB_WEAK) | ||
338 | continue; | ||
339 | pr_warn("%s: Unknown symbol %s\n", | ||
340 | me->name, strtab + sym->st_name); | ||
341 | err = -ENOENT; | ||
342 | goto out; | ||
343 | } | ||
344 | |||
345 | type = ELF_MIPS_R_TYPE(*r.rel); | ||
346 | if (type < ARRAY_SIZE(reloc_handlers)) | ||
347 | handler = reloc_handlers[type]; | ||
348 | else | ||
349 | handler = NULL; | ||
350 | |||
351 | if (!handler) { | ||
352 | pr_err("%s: Unknown relocation type %u\n", | ||
353 | me->name, type); | ||
354 | err = -EINVAL; | ||
355 | goto out; | ||
356 | } | ||
357 | |||
358 | if (rela) { | ||
359 | v = sym->st_value + r.rela->r_addend; | ||
360 | base = 0; | ||
361 | r.rela = &r.rela[1]; | ||
362 | } else { | ||
363 | v = sym->st_value; | ||
364 | base = *location; | ||
365 | r.rel = &r.rel[1]; | ||
366 | } | ||
367 | |||
368 | err = handler(me, location, base, v, rela); | ||
369 | if (err) | ||
370 | goto out; | ||
371 | } | ||
372 | |||
373 | out: | ||
374 | /* | ||
375 | * Normally the hi16 list should be deallocated at this point. A | ||
376 | * malformed binary however could contain a series of R_MIPS_HI16 | ||
377 | * relocations not followed by a R_MIPS_LO16 relocation, or if we hit | ||
378 | * an error processing a reloc we might have gotten here before | ||
379 | * reaching the R_MIPS_LO16. In either case, free up the list and | ||
380 | * return an error. | ||
381 | */ | ||
382 | if (me->arch.r_mips_hi16_list) { | ||
383 | free_relocation_chain(me->arch.r_mips_hi16_list); | ||
384 | me->arch.r_mips_hi16_list = NULL; | ||
385 | err = err ?: -ENOEXEC; | ||
386 | } | ||
387 | |||
388 | return err; | ||
389 | } | ||
390 | |||
391 | int apply_relocate(Elf_Shdr *sechdrs, const char *strtab, | ||
392 | unsigned int symindex, unsigned int relsec, | ||
393 | struct module *me) | ||
394 | { | ||
395 | return __apply_relocate(sechdrs, strtab, symindex, relsec, me, false); | ||
396 | } | ||
397 | |||
398 | #ifdef CONFIG_MODULES_USE_ELF_RELA | ||
399 | int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, | ||
400 | unsigned int symindex, unsigned int relsec, | ||
401 | struct module *me) | ||
402 | { | ||
403 | return __apply_relocate(sechdrs, strtab, symindex, relsec, me, true); | ||
404 | } | ||
405 | #endif /* CONFIG_MODULES_USE_ELF_RELA */ | ||
406 | |||
407 | /* Given an address, look for it in the module exception tables. */ | ||
408 | const struct exception_table_entry *search_module_dbetables(unsigned long addr) | ||
409 | { | ||
410 | unsigned long flags; | ||
411 | const struct exception_table_entry *e = NULL; | ||
412 | struct mod_arch_specific *dbe; | ||
413 | |||
414 | spin_lock_irqsave(&dbe_lock, flags); | ||
415 | list_for_each_entry(dbe, &dbe_list, dbe_list) { | ||
416 | e = search_extable(dbe->dbe_start, | ||
417 | dbe->dbe_end - dbe->dbe_start, addr); | ||
418 | if (e) | ||
419 | break; | ||
420 | } | ||
421 | spin_unlock_irqrestore(&dbe_lock, flags); | ||
422 | |||
423 | /* Now, if we found one, we are running inside it now, hence | ||
424 | we cannot unload the module, hence no refcnt needed. */ | ||
425 | return e; | ||
426 | } | ||
427 | |||
428 | /* Put in dbe list if necessary. */ | ||
429 | int module_finalize(const Elf_Ehdr *hdr, | ||
430 | const Elf_Shdr *sechdrs, | ||
431 | struct module *me) | ||
432 | { | ||
433 | const Elf_Shdr *s; | ||
434 | char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
435 | |||
436 | /* Make jump label nops. */ | ||
437 | jump_label_apply_nops(me); | ||
438 | |||
439 | INIT_LIST_HEAD(&me->arch.dbe_list); | ||
440 | for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) { | ||
441 | if (strcmp("__dbe_table", secstrings + s->sh_name) != 0) | ||
442 | continue; | ||
443 | me->arch.dbe_start = (void *)s->sh_addr; | ||
444 | me->arch.dbe_end = (void *)s->sh_addr + s->sh_size; | ||
445 | spin_lock_irq(&dbe_lock); | ||
446 | list_add(&me->arch.dbe_list, &dbe_list); | ||
447 | spin_unlock_irq(&dbe_lock); | ||
448 | } | ||
449 | return 0; | ||
450 | } | ||
451 | |||
452 | void module_arch_cleanup(struct module *mod) | ||
453 | { | ||
454 | spin_lock_irq(&dbe_lock); | ||
455 | list_del(&mod->arch.dbe_list); | ||
456 | spin_unlock_irq(&dbe_lock); | ||
457 | } | ||
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S new file mode 100644 index 000000000..896080b44 --- /dev/null +++ b/arch/mips/kernel/octeon_switch.S | |||
@@ -0,0 +1,554 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle | ||
7 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) | ||
8 | * Copyright (C) 1994, 1995, 1996, by Andreas Busse | ||
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
10 | * Copyright (C) 2000 MIPS Technologies, Inc. | ||
11 | * written by Carsten Langgaard, carstenl@mips.com | ||
12 | */ | ||
13 | #include <asm/asm.h> | ||
14 | #include <asm/export.h> | ||
15 | #include <asm/asm-offsets.h> | ||
16 | #include <asm/mipsregs.h> | ||
17 | #include <asm/regdef.h> | ||
18 | #include <asm/stackframe.h> | ||
19 | |||
20 | /* | ||
21 | * task_struct *resume(task_struct *prev, task_struct *next, | ||
22 | * struct thread_info *next_ti) | ||
23 | */ | ||
24 | .align 7 | ||
25 | LEAF(resume) | ||
26 | .set arch=octeon | ||
27 | mfc0 t1, CP0_STATUS | ||
28 | LONG_S t1, THREAD_STATUS(a0) | ||
29 | cpu_save_nonscratch a0 | ||
30 | LONG_S ra, THREAD_REG31(a0) | ||
31 | |||
32 | #if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0 | ||
33 | /* Check if we need to store CVMSEG state */ | ||
34 | dmfc0 t0, $11,7 /* CvmMemCtl */ | ||
35 | bbit0 t0, 6, 3f /* Is user access enabled? */ | ||
36 | |||
37 | /* Store the CVMSEG state */ | ||
38 | /* Extract the size of CVMSEG */ | ||
39 | andi t0, 0x3f | ||
40 | /* Multiply * (cache line size/sizeof(long)/2) */ | ||
41 | sll t0, 7-LONGLOG-1 | ||
42 | li t1, -32768 /* Base address of CVMSEG */ | ||
43 | LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */ | ||
44 | synciobdma | ||
45 | 2: | ||
46 | .set noreorder | ||
47 | LONG_L t8, 0(t1) /* Load from CVMSEG */ | ||
48 | subu t0, 1 /* Decrement loop var */ | ||
49 | LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */ | ||
50 | LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */ | ||
51 | LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */ | ||
52 | LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */ | ||
53 | bnez t0, 2b /* Loop until we've copied it all */ | ||
54 | LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */ | ||
55 | .set reorder | ||
56 | |||
57 | /* Disable access to CVMSEG */ | ||
58 | dmfc0 t0, $11,7 /* CvmMemCtl */ | ||
59 | xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */ | ||
60 | dmtc0 t0, $11,7 /* CvmMemCtl */ | ||
61 | #endif | ||
62 | 3: | ||
63 | |||
64 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) | ||
65 | PTR_LA t8, __stack_chk_guard | ||
66 | LONG_L t9, TASK_STACK_CANARY(a1) | ||
67 | LONG_S t9, 0(t8) | ||
68 | #endif | ||
69 | |||
70 | /* | ||
71 | * The order of restoring the registers takes care of the race | ||
72 | * updating $28, $29 and kernelsp without disabling ints. | ||
73 | */ | ||
74 | move $28, a2 | ||
75 | cpu_restore_nonscratch a1 | ||
76 | |||
77 | PTR_ADDU t0, $28, _THREAD_SIZE - 32 | ||
78 | set_saved_sp t0, t1, t2 | ||
79 | |||
80 | mfc0 t1, CP0_STATUS /* Do we really need this? */ | ||
81 | li a3, 0xff01 | ||
82 | and t1, a3 | ||
83 | LONG_L a2, THREAD_STATUS(a1) | ||
84 | nor a3, $0, a3 | ||
85 | and a2, a3 | ||
86 | or a2, t1 | ||
87 | mtc0 a2, CP0_STATUS | ||
88 | move v0, a0 | ||
89 | jr ra | ||
90 | END(resume) | ||
91 | |||
92 | /* | ||
93 | * void octeon_cop2_save(struct octeon_cop2_state *a0) | ||
94 | */ | ||
95 | .align 7 | ||
96 | .set push | ||
97 | .set noreorder | ||
98 | LEAF(octeon_cop2_save) | ||
99 | |||
100 | dmfc0 t9, $9,7 /* CvmCtl register. */ | ||
101 | |||
102 | /* Save the COP2 CRC state */ | ||
103 | dmfc2 t0, 0x0201 | ||
104 | dmfc2 t1, 0x0202 | ||
105 | dmfc2 t2, 0x0200 | ||
106 | sd t0, OCTEON_CP2_CRC_IV(a0) | ||
107 | sd t1, OCTEON_CP2_CRC_LENGTH(a0) | ||
108 | /* Skip next instructions if CvmCtl[NODFA_CP2] set */ | ||
109 | bbit1 t9, 28, 1f | ||
110 | sd t2, OCTEON_CP2_CRC_POLY(a0) | ||
111 | |||
112 | /* Save the LLM state */ | ||
113 | dmfc2 t0, 0x0402 | ||
114 | dmfc2 t1, 0x040A | ||
115 | sd t0, OCTEON_CP2_LLM_DAT(a0) | ||
116 | |||
117 | 1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */ | ||
118 | sd t1, OCTEON_CP2_LLM_DAT+8(a0) | ||
119 | |||
120 | /* Save the COP2 crypto state */ | ||
121 | /* this part is mostly common to both pass 1 and later revisions */ | ||
122 | dmfc2 t0, 0x0084 | ||
123 | dmfc2 t1, 0x0080 | ||
124 | dmfc2 t2, 0x0081 | ||
125 | dmfc2 t3, 0x0082 | ||
126 | sd t0, OCTEON_CP2_3DES_IV(a0) | ||
127 | dmfc2 t0, 0x0088 | ||
128 | sd t1, OCTEON_CP2_3DES_KEY(a0) | ||
129 | dmfc2 t1, 0x0111 /* only necessary for pass 1 */ | ||
130 | sd t2, OCTEON_CP2_3DES_KEY+8(a0) | ||
131 | dmfc2 t2, 0x0102 | ||
132 | sd t3, OCTEON_CP2_3DES_KEY+16(a0) | ||
133 | dmfc2 t3, 0x0103 | ||
134 | sd t0, OCTEON_CP2_3DES_RESULT(a0) | ||
135 | dmfc2 t0, 0x0104 | ||
136 | sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */ | ||
137 | dmfc2 t1, 0x0105 | ||
138 | sd t2, OCTEON_CP2_AES_IV(a0) | ||
139 | dmfc2 t2, 0x0106 | ||
140 | sd t3, OCTEON_CP2_AES_IV+8(a0) | ||
141 | dmfc2 t3, 0x0107 | ||
142 | sd t0, OCTEON_CP2_AES_KEY(a0) | ||
143 | dmfc2 t0, 0x0110 | ||
144 | sd t1, OCTEON_CP2_AES_KEY+8(a0) | ||
145 | dmfc2 t1, 0x0100 | ||
146 | sd t2, OCTEON_CP2_AES_KEY+16(a0) | ||
147 | dmfc2 t2, 0x0101 | ||
148 | sd t3, OCTEON_CP2_AES_KEY+24(a0) | ||
149 | mfc0 v0, $15,0 /* Get the processor ID register */ | ||
150 | sd t0, OCTEON_CP2_AES_KEYLEN(a0) | ||
151 | li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ | ||
152 | sd t1, OCTEON_CP2_AES_RESULT(a0) | ||
153 | /* Skip to the Pass1 version of the remainder of the COP2 state */ | ||
154 | beq v0, v1, 2f | ||
155 | sd t2, OCTEON_CP2_AES_RESULT+8(a0) | ||
156 | |||
157 | /* the non-pass1 state when !CvmCtl[NOCRYPTO] */ | ||
158 | dmfc2 t1, 0x0240 | ||
159 | dmfc2 t2, 0x0241 | ||
160 | ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/ | ||
161 | dmfc2 t3, 0x0242 | ||
162 | subu v1, v0, v1 /* prid - lowest OCTEON III PrId */ | ||
163 | dmfc2 t0, 0x0243 | ||
164 | sd t1, OCTEON_CP2_HSH_DATW(a0) | ||
165 | dmfc2 t1, 0x0244 | ||
166 | sd t2, OCTEON_CP2_HSH_DATW+8(a0) | ||
167 | dmfc2 t2, 0x0245 | ||
168 | sd t3, OCTEON_CP2_HSH_DATW+16(a0) | ||
169 | dmfc2 t3, 0x0246 | ||
170 | sd t0, OCTEON_CP2_HSH_DATW+24(a0) | ||
171 | dmfc2 t0, 0x0247 | ||
172 | sd t1, OCTEON_CP2_HSH_DATW+32(a0) | ||
173 | dmfc2 t1, 0x0248 | ||
174 | sd t2, OCTEON_CP2_HSH_DATW+40(a0) | ||
175 | dmfc2 t2, 0x0249 | ||
176 | sd t3, OCTEON_CP2_HSH_DATW+48(a0) | ||
177 | dmfc2 t3, 0x024A | ||
178 | sd t0, OCTEON_CP2_HSH_DATW+56(a0) | ||
179 | dmfc2 t0, 0x024B | ||
180 | sd t1, OCTEON_CP2_HSH_DATW+64(a0) | ||
181 | dmfc2 t1, 0x024C | ||
182 | sd t2, OCTEON_CP2_HSH_DATW+72(a0) | ||
183 | dmfc2 t2, 0x024D | ||
184 | sd t3, OCTEON_CP2_HSH_DATW+80(a0) | ||
185 | dmfc2 t3, 0x024E | ||
186 | sd t0, OCTEON_CP2_HSH_DATW+88(a0) | ||
187 | dmfc2 t0, 0x0250 | ||
188 | sd t1, OCTEON_CP2_HSH_DATW+96(a0) | ||
189 | dmfc2 t1, 0x0251 | ||
190 | sd t2, OCTEON_CP2_HSH_DATW+104(a0) | ||
191 | dmfc2 t2, 0x0252 | ||
192 | sd t3, OCTEON_CP2_HSH_DATW+112(a0) | ||
193 | dmfc2 t3, 0x0253 | ||
194 | sd t0, OCTEON_CP2_HSH_IVW(a0) | ||
195 | dmfc2 t0, 0x0254 | ||
196 | sd t1, OCTEON_CP2_HSH_IVW+8(a0) | ||
197 | dmfc2 t1, 0x0255 | ||
198 | sd t2, OCTEON_CP2_HSH_IVW+16(a0) | ||
199 | dmfc2 t2, 0x0256 | ||
200 | sd t3, OCTEON_CP2_HSH_IVW+24(a0) | ||
201 | dmfc2 t3, 0x0257 | ||
202 | sd t0, OCTEON_CP2_HSH_IVW+32(a0) | ||
203 | dmfc2 t0, 0x0258 | ||
204 | sd t1, OCTEON_CP2_HSH_IVW+40(a0) | ||
205 | dmfc2 t1, 0x0259 | ||
206 | sd t2, OCTEON_CP2_HSH_IVW+48(a0) | ||
207 | dmfc2 t2, 0x025E | ||
208 | sd t3, OCTEON_CP2_HSH_IVW+56(a0) | ||
209 | dmfc2 t3, 0x025A | ||
210 | sd t0, OCTEON_CP2_GFM_MULT(a0) | ||
211 | dmfc2 t0, 0x025B | ||
212 | sd t1, OCTEON_CP2_GFM_MULT+8(a0) | ||
213 | sd t2, OCTEON_CP2_GFM_POLY(a0) | ||
214 | sd t3, OCTEON_CP2_GFM_RESULT(a0) | ||
215 | bltz v1, 4f | ||
216 | sd t0, OCTEON_CP2_GFM_RESULT+8(a0) | ||
217 | /* OCTEON III things*/ | ||
218 | dmfc2 t0, 0x024F | ||
219 | dmfc2 t1, 0x0050 | ||
220 | sd t0, OCTEON_CP2_SHA3(a0) | ||
221 | sd t1, OCTEON_CP2_SHA3+8(a0) | ||
222 | 4: | ||
223 | jr ra | ||
224 | nop | ||
225 | |||
226 | 2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */ | ||
227 | dmfc2 t3, 0x0040 | ||
228 | dmfc2 t0, 0x0041 | ||
229 | dmfc2 t1, 0x0042 | ||
230 | dmfc2 t2, 0x0043 | ||
231 | sd t3, OCTEON_CP2_HSH_DATW(a0) | ||
232 | dmfc2 t3, 0x0044 | ||
233 | sd t0, OCTEON_CP2_HSH_DATW+8(a0) | ||
234 | dmfc2 t0, 0x0045 | ||
235 | sd t1, OCTEON_CP2_HSH_DATW+16(a0) | ||
236 | dmfc2 t1, 0x0046 | ||
237 | sd t2, OCTEON_CP2_HSH_DATW+24(a0) | ||
238 | dmfc2 t2, 0x0048 | ||
239 | sd t3, OCTEON_CP2_HSH_DATW+32(a0) | ||
240 | dmfc2 t3, 0x0049 | ||
241 | sd t0, OCTEON_CP2_HSH_DATW+40(a0) | ||
242 | dmfc2 t0, 0x004A | ||
243 | sd t1, OCTEON_CP2_HSH_DATW+48(a0) | ||
244 | sd t2, OCTEON_CP2_HSH_IVW(a0) | ||
245 | sd t3, OCTEON_CP2_HSH_IVW+8(a0) | ||
246 | sd t0, OCTEON_CP2_HSH_IVW+16(a0) | ||
247 | |||
248 | 3: /* pass 1 or CvmCtl[NOCRYPTO] set */ | ||
249 | jr ra | ||
250 | nop | ||
251 | END(octeon_cop2_save) | ||
252 | .set pop | ||
253 | |||
254 | /* | ||
255 | * void octeon_cop2_restore(struct octeon_cop2_state *a0) | ||
256 | */ | ||
257 | .align 7 | ||
258 | .set push | ||
259 | .set noreorder | ||
260 | LEAF(octeon_cop2_restore) | ||
261 | /* First cache line was prefetched before the call */ | ||
262 | pref 4, 128(a0) | ||
263 | dmfc0 t9, $9,7 /* CvmCtl register. */ | ||
264 | |||
265 | pref 4, 256(a0) | ||
266 | ld t0, OCTEON_CP2_CRC_IV(a0) | ||
267 | pref 4, 384(a0) | ||
268 | ld t1, OCTEON_CP2_CRC_LENGTH(a0) | ||
269 | ld t2, OCTEON_CP2_CRC_POLY(a0) | ||
270 | |||
271 | /* Restore the COP2 CRC state */ | ||
272 | dmtc2 t0, 0x0201 | ||
273 | dmtc2 t1, 0x1202 | ||
274 | bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */ | ||
275 | dmtc2 t2, 0x4200 | ||
276 | |||
277 | /* Restore the LLM state */ | ||
278 | ld t0, OCTEON_CP2_LLM_DAT(a0) | ||
279 | ld t1, OCTEON_CP2_LLM_DAT+8(a0) | ||
280 | dmtc2 t0, 0x0402 | ||
281 | dmtc2 t1, 0x040A | ||
282 | |||
283 | 2: | ||
284 | bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */ | ||
285 | nop | ||
286 | |||
287 | /* Restore the COP2 crypto state common to pass 1 and pass 2 */ | ||
288 | ld t0, OCTEON_CP2_3DES_IV(a0) | ||
289 | ld t1, OCTEON_CP2_3DES_KEY(a0) | ||
290 | ld t2, OCTEON_CP2_3DES_KEY+8(a0) | ||
291 | dmtc2 t0, 0x0084 | ||
292 | ld t0, OCTEON_CP2_3DES_KEY+16(a0) | ||
293 | dmtc2 t1, 0x0080 | ||
294 | ld t1, OCTEON_CP2_3DES_RESULT(a0) | ||
295 | dmtc2 t2, 0x0081 | ||
296 | ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */ | ||
297 | dmtc2 t0, 0x0082 | ||
298 | ld t0, OCTEON_CP2_AES_IV(a0) | ||
299 | dmtc2 t1, 0x0098 | ||
300 | ld t1, OCTEON_CP2_AES_IV+8(a0) | ||
301 | dmtc2 t2, 0x010A /* only really needed for pass 1 */ | ||
302 | ld t2, OCTEON_CP2_AES_KEY(a0) | ||
303 | dmtc2 t0, 0x0102 | ||
304 | ld t0, OCTEON_CP2_AES_KEY+8(a0) | ||
305 | dmtc2 t1, 0x0103 | ||
306 | ld t1, OCTEON_CP2_AES_KEY+16(a0) | ||
307 | dmtc2 t2, 0x0104 | ||
308 | ld t2, OCTEON_CP2_AES_KEY+24(a0) | ||
309 | dmtc2 t0, 0x0105 | ||
310 | ld t0, OCTEON_CP2_AES_KEYLEN(a0) | ||
311 | dmtc2 t1, 0x0106 | ||
312 | ld t1, OCTEON_CP2_AES_RESULT(a0) | ||
313 | dmtc2 t2, 0x0107 | ||
314 | ld t2, OCTEON_CP2_AES_RESULT+8(a0) | ||
315 | mfc0 t3, $15,0 /* Get the processor ID register */ | ||
316 | dmtc2 t0, 0x0110 | ||
317 | li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */ | ||
318 | dmtc2 t1, 0x0100 | ||
319 | bne v0, t3, 3f /* Skip the next stuff for non-pass1 */ | ||
320 | dmtc2 t2, 0x0101 | ||
321 | |||
322 | /* this code is specific for pass 1 */ | ||
323 | ld t0, OCTEON_CP2_HSH_DATW(a0) | ||
324 | ld t1, OCTEON_CP2_HSH_DATW+8(a0) | ||
325 | ld t2, OCTEON_CP2_HSH_DATW+16(a0) | ||
326 | dmtc2 t0, 0x0040 | ||
327 | ld t0, OCTEON_CP2_HSH_DATW+24(a0) | ||
328 | dmtc2 t1, 0x0041 | ||
329 | ld t1, OCTEON_CP2_HSH_DATW+32(a0) | ||
330 | dmtc2 t2, 0x0042 | ||
331 | ld t2, OCTEON_CP2_HSH_DATW+40(a0) | ||
332 | dmtc2 t0, 0x0043 | ||
333 | ld t0, OCTEON_CP2_HSH_DATW+48(a0) | ||
334 | dmtc2 t1, 0x0044 | ||
335 | ld t1, OCTEON_CP2_HSH_IVW(a0) | ||
336 | dmtc2 t2, 0x0045 | ||
337 | ld t2, OCTEON_CP2_HSH_IVW+8(a0) | ||
338 | dmtc2 t0, 0x0046 | ||
339 | ld t0, OCTEON_CP2_HSH_IVW+16(a0) | ||
340 | dmtc2 t1, 0x0048 | ||
341 | dmtc2 t2, 0x0049 | ||
342 | b done_restore /* unconditional branch */ | ||
343 | dmtc2 t0, 0x004A | ||
344 | |||
345 | 3: /* this is post-pass1 code */ | ||
346 | ld t2, OCTEON_CP2_HSH_DATW(a0) | ||
347 | ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/ | ||
348 | ld t0, OCTEON_CP2_HSH_DATW+8(a0) | ||
349 | ld t1, OCTEON_CP2_HSH_DATW+16(a0) | ||
350 | dmtc2 t2, 0x0240 | ||
351 | ld t2, OCTEON_CP2_HSH_DATW+24(a0) | ||
352 | dmtc2 t0, 0x0241 | ||
353 | ld t0, OCTEON_CP2_HSH_DATW+32(a0) | ||
354 | dmtc2 t1, 0x0242 | ||
355 | ld t1, OCTEON_CP2_HSH_DATW+40(a0) | ||
356 | dmtc2 t2, 0x0243 | ||
357 | ld t2, OCTEON_CP2_HSH_DATW+48(a0) | ||
358 | dmtc2 t0, 0x0244 | ||
359 | ld t0, OCTEON_CP2_HSH_DATW+56(a0) | ||
360 | dmtc2 t1, 0x0245 | ||
361 | ld t1, OCTEON_CP2_HSH_DATW+64(a0) | ||
362 | dmtc2 t2, 0x0246 | ||
363 | ld t2, OCTEON_CP2_HSH_DATW+72(a0) | ||
364 | dmtc2 t0, 0x0247 | ||
365 | ld t0, OCTEON_CP2_HSH_DATW+80(a0) | ||
366 | dmtc2 t1, 0x0248 | ||
367 | ld t1, OCTEON_CP2_HSH_DATW+88(a0) | ||
368 | dmtc2 t2, 0x0249 | ||
369 | ld t2, OCTEON_CP2_HSH_DATW+96(a0) | ||
370 | dmtc2 t0, 0x024A | ||
371 | ld t0, OCTEON_CP2_HSH_DATW+104(a0) | ||
372 | dmtc2 t1, 0x024B | ||
373 | ld t1, OCTEON_CP2_HSH_DATW+112(a0) | ||
374 | dmtc2 t2, 0x024C | ||
375 | ld t2, OCTEON_CP2_HSH_IVW(a0) | ||
376 | dmtc2 t0, 0x024D | ||
377 | ld t0, OCTEON_CP2_HSH_IVW+8(a0) | ||
378 | dmtc2 t1, 0x024E | ||
379 | ld t1, OCTEON_CP2_HSH_IVW+16(a0) | ||
380 | dmtc2 t2, 0x0250 | ||
381 | ld t2, OCTEON_CP2_HSH_IVW+24(a0) | ||
382 | dmtc2 t0, 0x0251 | ||
383 | ld t0, OCTEON_CP2_HSH_IVW+32(a0) | ||
384 | dmtc2 t1, 0x0252 | ||
385 | ld t1, OCTEON_CP2_HSH_IVW+40(a0) | ||
386 | dmtc2 t2, 0x0253 | ||
387 | ld t2, OCTEON_CP2_HSH_IVW+48(a0) | ||
388 | dmtc2 t0, 0x0254 | ||
389 | ld t0, OCTEON_CP2_HSH_IVW+56(a0) | ||
390 | dmtc2 t1, 0x0255 | ||
391 | ld t1, OCTEON_CP2_GFM_MULT(a0) | ||
392 | dmtc2 t2, 0x0256 | ||
393 | ld t2, OCTEON_CP2_GFM_MULT+8(a0) | ||
394 | dmtc2 t0, 0x0257 | ||
395 | ld t0, OCTEON_CP2_GFM_POLY(a0) | ||
396 | dmtc2 t1, 0x0258 | ||
397 | ld t1, OCTEON_CP2_GFM_RESULT(a0) | ||
398 | dmtc2 t2, 0x0259 | ||
399 | ld t2, OCTEON_CP2_GFM_RESULT+8(a0) | ||
400 | dmtc2 t0, 0x025E | ||
401 | subu v0, t3, v0 /* prid - lowest OCTEON III PrId */ | ||
402 | dmtc2 t1, 0x025A | ||
403 | bltz v0, done_restore | ||
404 | dmtc2 t2, 0x025B | ||
405 | /* OCTEON III things*/ | ||
406 | ld t0, OCTEON_CP2_SHA3(a0) | ||
407 | ld t1, OCTEON_CP2_SHA3+8(a0) | ||
408 | dmtc2 t0, 0x0051 | ||
409 | dmtc2 t1, 0x0050 | ||
410 | done_restore: | ||
411 | jr ra | ||
412 | nop | ||
413 | END(octeon_cop2_restore) | ||
414 | .set pop | ||
415 | |||
416 | /* | ||
417 | * void octeon_mult_save() | ||
418 | * sp is assumed to point to a struct pt_regs | ||
419 | * | ||
420 | * NOTE: This is called in SAVE_TEMP in stackframe.h. It can | ||
421 | * safely modify v1,k0, k1,$10-$15, and $24. It will | ||
422 | * be overwritten with a processor specific version of the code. | ||
423 | */ | ||
424 | .p2align 7 | ||
425 | .set push | ||
426 | .set noreorder | ||
427 | LEAF(octeon_mult_save) | ||
428 | jr ra | ||
429 | nop | ||
430 | .space 30 * 4, 0 | ||
431 | octeon_mult_save_end: | ||
432 | EXPORT(octeon_mult_save_end) | ||
433 | END(octeon_mult_save) | ||
434 | |||
435 | LEAF(octeon_mult_save2) | ||
436 | /* Save the multiplier state OCTEON II and earlier*/ | ||
437 | v3mulu k0, $0, $0 | ||
438 | v3mulu k1, $0, $0 | ||
439 | sd k0, PT_MTP(sp) /* PT_MTP has P0 */ | ||
440 | v3mulu k0, $0, $0 | ||
441 | sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */ | ||
442 | ori k1, $0, 1 | ||
443 | v3mulu k1, k1, $0 | ||
444 | sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */ | ||
445 | v3mulu k0, $0, $0 | ||
446 | sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */ | ||
447 | v3mulu k1, $0, $0 | ||
448 | sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */ | ||
449 | jr ra | ||
450 | sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */ | ||
451 | octeon_mult_save2_end: | ||
452 | EXPORT(octeon_mult_save2_end) | ||
453 | END(octeon_mult_save2) | ||
454 | |||
455 | LEAF(octeon_mult_save3) | ||
456 | /* Save the multiplier state OCTEON III */ | ||
457 | v3mulu $10, $0, $0 /* read P0 */ | ||
458 | v3mulu $11, $0, $0 /* read P1 */ | ||
459 | v3mulu $12, $0, $0 /* read P2 */ | ||
460 | sd $10, PT_MTP+(0*8)(sp) /* store P0 */ | ||
461 | v3mulu $10, $0, $0 /* read P3 */ | ||
462 | sd $11, PT_MTP+(1*8)(sp) /* store P1 */ | ||
463 | v3mulu $11, $0, $0 /* read P4 */ | ||
464 | sd $12, PT_MTP+(2*8)(sp) /* store P2 */ | ||
465 | ori $13, $0, 1 | ||
466 | v3mulu $12, $0, $0 /* read P5 */ | ||
467 | sd $10, PT_MTP+(3*8)(sp) /* store P3 */ | ||
468 | v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */ | ||
469 | sd $11, PT_MTP+(4*8)(sp) /* store P4 */ | ||
470 | v3mulu $10, $0, $0 /* read MPL1 */ | ||
471 | sd $12, PT_MTP+(5*8)(sp) /* store P5 */ | ||
472 | v3mulu $11, $0, $0 /* read MPL2 */ | ||
473 | sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */ | ||
474 | v3mulu $12, $0, $0 /* read MPL3 */ | ||
475 | sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */ | ||
476 | v3mulu $10, $0, $0 /* read MPL4 */ | ||
477 | sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */ | ||
478 | v3mulu $11, $0, $0 /* read MPL5 */ | ||
479 | sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */ | ||
480 | sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */ | ||
481 | jr ra | ||
482 | sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */ | ||
483 | octeon_mult_save3_end: | ||
484 | EXPORT(octeon_mult_save3_end) | ||
485 | END(octeon_mult_save3) | ||
486 | .set pop | ||
487 | |||
488 | /* | ||
489 | * void octeon_mult_restore() | ||
490 | * sp is assumed to point to a struct pt_regs | ||
491 | * | ||
492 | * NOTE: This is called in RESTORE_TEMP in stackframe.h. | ||
493 | */ | ||
494 | .p2align 7 | ||
495 | .set push | ||
496 | .set noreorder | ||
497 | LEAF(octeon_mult_restore) | ||
498 | jr ra | ||
499 | nop | ||
500 | .space 30 * 4, 0 | ||
501 | octeon_mult_restore_end: | ||
502 | EXPORT(octeon_mult_restore_end) | ||
503 | END(octeon_mult_restore) | ||
504 | |||
505 | LEAF(octeon_mult_restore2) | ||
506 | ld v0, PT_MPL(sp) /* MPL0 */ | ||
507 | ld v1, PT_MPL+8(sp) /* MPL1 */ | ||
508 | ld k0, PT_MPL+16(sp) /* MPL2 */ | ||
509 | /* Restore the multiplier state */ | ||
510 | ld k1, PT_MTP+16(sp) /* P2 */ | ||
511 | mtm0 v0 /* MPL0 */ | ||
512 | ld v0, PT_MTP+8(sp) /* P1 */ | ||
513 | mtm1 v1 /* MPL1 */ | ||
514 | ld v1, PT_MTP(sp) /* P0 */ | ||
515 | mtm2 k0 /* MPL2 */ | ||
516 | mtp2 k1 /* P2 */ | ||
517 | mtp1 v0 /* P1 */ | ||
518 | jr ra | ||
519 | mtp0 v1 /* P0 */ | ||
520 | octeon_mult_restore2_end: | ||
521 | EXPORT(octeon_mult_restore2_end) | ||
522 | END(octeon_mult_restore2) | ||
523 | |||
524 | LEAF(octeon_mult_restore3) | ||
525 | ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */ | ||
526 | ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */ | ||
527 | ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */ | ||
528 | ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */ | ||
529 | .word 0x718d0008 | ||
530 | /* mtm0 $12, $13 restore MPL0 and MPL3 */ | ||
531 | ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */ | ||
532 | .word 0x714b000c | ||
533 | /* mtm1 $10, $11 restore MPL1 and MPL4 */ | ||
534 | ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */ | ||
535 | ld $10, PT_MTP+(0*8)(sp) /* read P0 */ | ||
536 | ld $11, PT_MTP+(3*8)(sp) /* read P3 */ | ||
537 | .word 0x718d000d | ||
538 | /* mtm2 $12, $13 restore MPL2 and MPL5 */ | ||
539 | ld $12, PT_MTP+(1*8)(sp) /* read P1 */ | ||
540 | .word 0x714b0009 | ||
541 | /* mtp0 $10, $11 restore P0 and P3 */ | ||
542 | ld $13, PT_MTP+(4*8)(sp) /* read P4 */ | ||
543 | ld $10, PT_MTP+(2*8)(sp) /* read P2 */ | ||
544 | ld $11, PT_MTP+(5*8)(sp) /* read P5 */ | ||
545 | .word 0x718d000a | ||
546 | /* mtp1 $12, $13 restore P1 and P4 */ | ||
547 | jr ra | ||
548 | .word 0x714b000b | ||
549 | /* mtp2 $10, $11 restore P2 and P5 */ | ||
550 | |||
551 | octeon_mult_restore3_end: | ||
552 | EXPORT(octeon_mult_restore3_end) | ||
553 | END(octeon_mult_restore3) | ||
554 | .set pop | ||
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c new file mode 100644 index 000000000..5d7a9c039 --- /dev/null +++ b/arch/mips/kernel/perf_event.c | |||
@@ -0,0 +1,67 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * Linux performance counter support for MIPS. | ||
4 | * | ||
5 | * Copyright (C) 2010 MIPS Technologies, Inc. | ||
6 | * Author: Deng-Cheng Zhu | ||
7 | * | ||
8 | * This code is based on the implementation for ARM, which is in turn | ||
9 | * based on the sparc64 perf event code and the x86 code. Performance | ||
10 | * counter access is based on the MIPS Oprofile code. And the callchain | ||
11 | * support references the code of MIPS stacktrace.c. | ||
12 | */ | ||
13 | |||
14 | #include <linux/perf_event.h> | ||
15 | #include <linux/sched/task_stack.h> | ||
16 | |||
17 | #include <asm/stacktrace.h> | ||
18 | |||
19 | /* Callchain handling code. */ | ||
20 | |||
21 | /* | ||
22 | * Leave userspace callchain empty for now. When we find a way to trace | ||
23 | * the user stack callchains, we will add it here. | ||
24 | */ | ||
25 | |||
26 | static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry, | ||
27 | unsigned long reg29) | ||
28 | { | ||
29 | unsigned long *sp = (unsigned long *)reg29; | ||
30 | unsigned long addr; | ||
31 | |||
32 | while (!kstack_end(sp)) { | ||
33 | addr = *sp++; | ||
34 | if (__kernel_text_address(addr)) { | ||
35 | perf_callchain_store(entry, addr); | ||
36 | if (entry->nr >= entry->max_stack) | ||
37 | break; | ||
38 | } | ||
39 | } | ||
40 | } | ||
41 | |||
42 | void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, | ||
43 | struct pt_regs *regs) | ||
44 | { | ||
45 | unsigned long sp = regs->regs[29]; | ||
46 | #ifdef CONFIG_KALLSYMS | ||
47 | unsigned long ra = regs->regs[31]; | ||
48 | unsigned long pc = regs->cp0_epc; | ||
49 | |||
50 | if (raw_show_trace || !__kernel_text_address(pc)) { | ||
51 | unsigned long stack_page = | ||
52 | (unsigned long)task_stack_page(current); | ||
53 | if (stack_page && sp >= stack_page && | ||
54 | sp <= stack_page + THREAD_SIZE - 32) | ||
55 | save_raw_perf_callchain(entry, sp); | ||
56 | return; | ||
57 | } | ||
58 | do { | ||
59 | perf_callchain_store(entry, pc); | ||
60 | if (entry->nr >= entry->max_stack) | ||
61 | break; | ||
62 | pc = unwind_stack(current, &sp, pc, &ra); | ||
63 | } while (pc); | ||
64 | #else | ||
65 | save_raw_perf_callchain(entry, sp); | ||
66 | #endif | ||
67 | } | ||
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c new file mode 100644 index 000000000..011eb6bbf --- /dev/null +++ b/arch/mips/kernel/perf_event_mipsxx.c | |||
@@ -0,0 +1,2138 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * Linux performance counter support for MIPS. | ||
4 | * | ||
5 | * Copyright (C) 2010 MIPS Technologies, Inc. | ||
6 | * Copyright (C) 2011 Cavium Networks, Inc. | ||
7 | * Author: Deng-Cheng Zhu | ||
8 | * | ||
9 | * This code is based on the implementation for ARM, which is in turn | ||
10 | * based on the sparc64 perf event code and the x86 code. Performance | ||
11 | * counter access is based on the MIPS Oprofile code. And the callchain | ||
12 | * support references the code of MIPS stacktrace.c. | ||
13 | */ | ||
14 | |||
15 | #include <linux/cpumask.h> | ||
16 | #include <linux/interrupt.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/perf_event.h> | ||
20 | #include <linux/uaccess.h> | ||
21 | |||
22 | #include <asm/irq.h> | ||
23 | #include <asm/irq_regs.h> | ||
24 | #include <asm/stacktrace.h> | ||
25 | #include <asm/time.h> /* For perf_irq */ | ||
26 | |||
27 | #define MIPS_MAX_HWEVENTS 4 | ||
28 | #define MIPS_TCS_PER_COUNTER 2 | ||
29 | #define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1) | ||
30 | |||
31 | struct cpu_hw_events { | ||
32 | /* Array of events on this cpu. */ | ||
33 | struct perf_event *events[MIPS_MAX_HWEVENTS]; | ||
34 | |||
35 | /* | ||
36 | * Set the bit (indexed by the counter number) when the counter | ||
37 | * is used for an event. | ||
38 | */ | ||
39 | unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | ||
40 | |||
41 | /* | ||
42 | * Software copy of the control register for each performance counter. | ||
43 | * MIPS CPUs vary in performance counters. They use this differently, | ||
44 | * and even may not use it. | ||
45 | */ | ||
46 | unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; | ||
47 | }; | ||
48 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | ||
49 | .saved_ctrl = {0}, | ||
50 | }; | ||
51 | |||
52 | /* The description of MIPS performance events. */ | ||
53 | struct mips_perf_event { | ||
54 | unsigned int event_id; | ||
55 | /* | ||
56 | * MIPS performance counters are indexed starting from 0. | ||
57 | * CNTR_EVEN indicates the indexes of the counters to be used are | ||
58 | * even numbers. | ||
59 | */ | ||
60 | unsigned int cntr_mask; | ||
61 | #define CNTR_EVEN 0x55555555 | ||
62 | #define CNTR_ODD 0xaaaaaaaa | ||
63 | #define CNTR_ALL 0xffffffff | ||
64 | enum { | ||
65 | T = 0, | ||
66 | V = 1, | ||
67 | P = 2, | ||
68 | } range; | ||
69 | }; | ||
70 | |||
71 | static struct mips_perf_event raw_event; | ||
72 | static DEFINE_MUTEX(raw_event_mutex); | ||
73 | |||
74 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
75 | |||
76 | struct mips_pmu { | ||
77 | u64 max_period; | ||
78 | u64 valid_count; | ||
79 | u64 overflow; | ||
80 | const char *name; | ||
81 | int irq; | ||
82 | u64 (*read_counter)(unsigned int idx); | ||
83 | void (*write_counter)(unsigned int idx, u64 val); | ||
84 | const struct mips_perf_event *(*map_raw_event)(u64 config); | ||
85 | const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; | ||
86 | const struct mips_perf_event (*cache_event_map) | ||
87 | [PERF_COUNT_HW_CACHE_MAX] | ||
88 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
89 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
90 | unsigned int num_counters; | ||
91 | }; | ||
92 | |||
93 | static int counter_bits; | ||
94 | static struct mips_pmu mipspmu; | ||
95 | |||
96 | #define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \ | ||
97 | MIPS_PERFCTRL_EVENT) | ||
98 | #define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S) | ||
99 | |||
100 | #ifdef CONFIG_CPU_BMIPS5000 | ||
101 | #define M_PERFCTL_MT_EN(filter) 0 | ||
102 | #else /* !CONFIG_CPU_BMIPS5000 */ | ||
103 | #define M_PERFCTL_MT_EN(filter) (filter) | ||
104 | #endif /* CONFIG_CPU_BMIPS5000 */ | ||
105 | |||
106 | #define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL) | ||
107 | #define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE) | ||
108 | #define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC) | ||
109 | |||
110 | #define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \ | ||
111 | MIPS_PERFCTRL_K | \ | ||
112 | MIPS_PERFCTRL_U | \ | ||
113 | MIPS_PERFCTRL_S | \ | ||
114 | MIPS_PERFCTRL_IE) | ||
115 | |||
116 | #ifdef CONFIG_MIPS_MT_SMP | ||
117 | #define M_PERFCTL_CONFIG_MASK 0x3fff801f | ||
118 | #else | ||
119 | #define M_PERFCTL_CONFIG_MASK 0x1f | ||
120 | #endif | ||
121 | |||
122 | #define CNTR_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) | ||
123 | |||
124 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
125 | static DEFINE_RWLOCK(pmuint_rwlock); | ||
126 | |||
127 | #if defined(CONFIG_CPU_BMIPS5000) | ||
128 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ | ||
129 | 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK)) | ||
130 | #else | ||
131 | #define vpe_id() (cpu_has_mipsmt_pertccounters ? \ | ||
132 | 0 : cpu_vpe_id(¤t_cpu_data)) | ||
133 | #endif | ||
134 | |||
135 | /* Copied from op_model_mipsxx.c */ | ||
136 | static unsigned int vpe_shift(void) | ||
137 | { | ||
138 | if (num_possible_cpus() > 1) | ||
139 | return 1; | ||
140 | |||
141 | return 0; | ||
142 | } | ||
143 | |||
144 | static unsigned int counters_total_to_per_cpu(unsigned int counters) | ||
145 | { | ||
146 | return counters >> vpe_shift(); | ||
147 | } | ||
148 | |||
149 | #else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ | ||
150 | #define vpe_id() 0 | ||
151 | |||
152 | #endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */ | ||
153 | |||
154 | static void resume_local_counters(void); | ||
155 | static void pause_local_counters(void); | ||
156 | static irqreturn_t mipsxx_pmu_handle_irq(int, void *); | ||
157 | static int mipsxx_pmu_handle_shared_irq(void); | ||
158 | |||
159 | /* 0: Not Loongson-3 | ||
160 | * 1: Loongson-3A1000/3B1000/3B1500 | ||
161 | * 2: Loongson-3A2000/3A3000 | ||
162 | * 3: Loongson-3A4000+ | ||
163 | */ | ||
164 | |||
165 | #define LOONGSON_PMU_TYPE0 0 | ||
166 | #define LOONGSON_PMU_TYPE1 1 | ||
167 | #define LOONGSON_PMU_TYPE2 2 | ||
168 | #define LOONGSON_PMU_TYPE3 3 | ||
169 | |||
170 | static inline int get_loongson3_pmu_type(void) | ||
171 | { | ||
172 | if (boot_cpu_type() != CPU_LOONGSON64) | ||
173 | return LOONGSON_PMU_TYPE0; | ||
174 | if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY) | ||
175 | return LOONGSON_PMU_TYPE1; | ||
176 | if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C) | ||
177 | return LOONGSON_PMU_TYPE2; | ||
178 | if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G) | ||
179 | return LOONGSON_PMU_TYPE3; | ||
180 | |||
181 | return LOONGSON_PMU_TYPE0; | ||
182 | } | ||
183 | |||
184 | static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx) | ||
185 | { | ||
186 | if (vpe_id() == 1) | ||
187 | idx = (idx + 2) & 3; | ||
188 | return idx; | ||
189 | } | ||
190 | |||
191 | static u64 mipsxx_pmu_read_counter(unsigned int idx) | ||
192 | { | ||
193 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
194 | |||
195 | switch (idx) { | ||
196 | case 0: | ||
197 | /* | ||
198 | * The counters are unsigned, we must cast to truncate | ||
199 | * off the high bits. | ||
200 | */ | ||
201 | return (u32)read_c0_perfcntr0(); | ||
202 | case 1: | ||
203 | return (u32)read_c0_perfcntr1(); | ||
204 | case 2: | ||
205 | return (u32)read_c0_perfcntr2(); | ||
206 | case 3: | ||
207 | return (u32)read_c0_perfcntr3(); | ||
208 | default: | ||
209 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | ||
210 | return 0; | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static u64 mipsxx_pmu_read_counter_64(unsigned int idx) | ||
215 | { | ||
216 | u64 mask = CNTR_BIT_MASK(counter_bits); | ||
217 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
218 | |||
219 | switch (idx) { | ||
220 | case 0: | ||
221 | return read_c0_perfcntr0_64() & mask; | ||
222 | case 1: | ||
223 | return read_c0_perfcntr1_64() & mask; | ||
224 | case 2: | ||
225 | return read_c0_perfcntr2_64() & mask; | ||
226 | case 3: | ||
227 | return read_c0_perfcntr3_64() & mask; | ||
228 | default: | ||
229 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | ||
230 | return 0; | ||
231 | } | ||
232 | } | ||
233 | |||
234 | static void mipsxx_pmu_write_counter(unsigned int idx, u64 val) | ||
235 | { | ||
236 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
237 | |||
238 | switch (idx) { | ||
239 | case 0: | ||
240 | write_c0_perfcntr0(val); | ||
241 | return; | ||
242 | case 1: | ||
243 | write_c0_perfcntr1(val); | ||
244 | return; | ||
245 | case 2: | ||
246 | write_c0_perfcntr2(val); | ||
247 | return; | ||
248 | case 3: | ||
249 | write_c0_perfcntr3(val); | ||
250 | return; | ||
251 | } | ||
252 | } | ||
253 | |||
254 | static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val) | ||
255 | { | ||
256 | val &= CNTR_BIT_MASK(counter_bits); | ||
257 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
258 | |||
259 | switch (idx) { | ||
260 | case 0: | ||
261 | write_c0_perfcntr0_64(val); | ||
262 | return; | ||
263 | case 1: | ||
264 | write_c0_perfcntr1_64(val); | ||
265 | return; | ||
266 | case 2: | ||
267 | write_c0_perfcntr2_64(val); | ||
268 | return; | ||
269 | case 3: | ||
270 | write_c0_perfcntr3_64(val); | ||
271 | return; | ||
272 | } | ||
273 | } | ||
274 | |||
275 | static unsigned int mipsxx_pmu_read_control(unsigned int idx) | ||
276 | { | ||
277 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
278 | |||
279 | switch (idx) { | ||
280 | case 0: | ||
281 | return read_c0_perfctrl0(); | ||
282 | case 1: | ||
283 | return read_c0_perfctrl1(); | ||
284 | case 2: | ||
285 | return read_c0_perfctrl2(); | ||
286 | case 3: | ||
287 | return read_c0_perfctrl3(); | ||
288 | default: | ||
289 | WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx); | ||
290 | return 0; | ||
291 | } | ||
292 | } | ||
293 | |||
294 | static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val) | ||
295 | { | ||
296 | idx = mipsxx_pmu_swizzle_perf_idx(idx); | ||
297 | |||
298 | switch (idx) { | ||
299 | case 0: | ||
300 | write_c0_perfctrl0(val); | ||
301 | return; | ||
302 | case 1: | ||
303 | write_c0_perfctrl1(val); | ||
304 | return; | ||
305 | case 2: | ||
306 | write_c0_perfctrl2(val); | ||
307 | return; | ||
308 | case 3: | ||
309 | write_c0_perfctrl3(val); | ||
310 | return; | ||
311 | } | ||
312 | } | ||
313 | |||
314 | static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc, | ||
315 | struct hw_perf_event *hwc) | ||
316 | { | ||
317 | int i; | ||
318 | unsigned long cntr_mask; | ||
319 | |||
320 | /* | ||
321 | * We only need to care the counter mask. The range has been | ||
322 | * checked definitely. | ||
323 | */ | ||
324 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) | ||
325 | cntr_mask = (hwc->event_base >> 10) & 0xffff; | ||
326 | else | ||
327 | cntr_mask = (hwc->event_base >> 8) & 0xffff; | ||
328 | |||
329 | for (i = mipspmu.num_counters - 1; i >= 0; i--) { | ||
330 | /* | ||
331 | * Note that some MIPS perf events can be counted by both | ||
332 | * even and odd counters, wheresas many other are only by | ||
333 | * even _or_ odd counters. This introduces an issue that | ||
334 | * when the former kind of event takes the counter the | ||
335 | * latter kind of event wants to use, then the "counter | ||
336 | * allocation" for the latter event will fail. In fact if | ||
337 | * they can be dynamically swapped, they both feel happy. | ||
338 | * But here we leave this issue alone for now. | ||
339 | */ | ||
340 | if (test_bit(i, &cntr_mask) && | ||
341 | !test_and_set_bit(i, cpuc->used_mask)) | ||
342 | return i; | ||
343 | } | ||
344 | |||
345 | return -EAGAIN; | ||
346 | } | ||
347 | |||
348 | static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx) | ||
349 | { | ||
350 | struct perf_event *event = container_of(evt, struct perf_event, hw); | ||
351 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
352 | unsigned int range = evt->event_base >> 24; | ||
353 | |||
354 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | ||
355 | |||
356 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) | ||
357 | cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) | | ||
358 | (evt->config_base & M_PERFCTL_CONFIG_MASK) | | ||
359 | /* Make sure interrupt enabled. */ | ||
360 | MIPS_PERFCTRL_IE; | ||
361 | else | ||
362 | cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) | | ||
363 | (evt->config_base & M_PERFCTL_CONFIG_MASK) | | ||
364 | /* Make sure interrupt enabled. */ | ||
365 | MIPS_PERFCTRL_IE; | ||
366 | |||
367 | if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) { | ||
368 | /* enable the counter for the calling thread */ | ||
369 | cpuc->saved_ctrl[idx] |= | ||
370 | (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC; | ||
371 | } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) { | ||
372 | /* The counter is processor wide. Set it up to count all TCs. */ | ||
373 | pr_debug("Enabling perf counter for all TCs\n"); | ||
374 | cpuc->saved_ctrl[idx] |= M_TC_EN_ALL; | ||
375 | } else { | ||
376 | unsigned int cpu, ctrl; | ||
377 | |||
378 | /* | ||
379 | * Set up the counter for a particular CPU when event->cpu is | ||
380 | * a valid CPU number. Otherwise set up the counter for the CPU | ||
381 | * scheduling this thread. | ||
382 | */ | ||
383 | cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id(); | ||
384 | |||
385 | ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu])); | ||
386 | ctrl |= M_TC_EN_VPE; | ||
387 | cpuc->saved_ctrl[idx] |= ctrl; | ||
388 | pr_debug("Enabling perf counter for CPU%d\n", cpu); | ||
389 | } | ||
390 | /* | ||
391 | * We do not actually let the counter run. Leave it until start(). | ||
392 | */ | ||
393 | } | ||
394 | |||
395 | static void mipsxx_pmu_disable_event(int idx) | ||
396 | { | ||
397 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
398 | unsigned long flags; | ||
399 | |||
400 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | ||
401 | |||
402 | local_irq_save(flags); | ||
403 | cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) & | ||
404 | ~M_PERFCTL_COUNT_EVENT_WHENEVER; | ||
405 | mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]); | ||
406 | local_irq_restore(flags); | ||
407 | } | ||
408 | |||
409 | static int mipspmu_event_set_period(struct perf_event *event, | ||
410 | struct hw_perf_event *hwc, | ||
411 | int idx) | ||
412 | { | ||
413 | u64 left = local64_read(&hwc->period_left); | ||
414 | u64 period = hwc->sample_period; | ||
415 | int ret = 0; | ||
416 | |||
417 | if (unlikely((left + period) & (1ULL << 63))) { | ||
418 | /* left underflowed by more than period. */ | ||
419 | left = period; | ||
420 | local64_set(&hwc->period_left, left); | ||
421 | hwc->last_period = period; | ||
422 | ret = 1; | ||
423 | } else if (unlikely((left + period) <= period)) { | ||
424 | /* left underflowed by less than period. */ | ||
425 | left += period; | ||
426 | local64_set(&hwc->period_left, left); | ||
427 | hwc->last_period = period; | ||
428 | ret = 1; | ||
429 | } | ||
430 | |||
431 | if (left > mipspmu.max_period) { | ||
432 | left = mipspmu.max_period; | ||
433 | local64_set(&hwc->period_left, left); | ||
434 | } | ||
435 | |||
436 | local64_set(&hwc->prev_count, mipspmu.overflow - left); | ||
437 | |||
438 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) | ||
439 | mipsxx_pmu_write_control(idx, | ||
440 | M_PERFCTL_EVENT(hwc->event_base & 0x3ff)); | ||
441 | |||
442 | mipspmu.write_counter(idx, mipspmu.overflow - left); | ||
443 | |||
444 | perf_event_update_userpage(event); | ||
445 | |||
446 | return ret; | ||
447 | } | ||
448 | |||
449 | static void mipspmu_event_update(struct perf_event *event, | ||
450 | struct hw_perf_event *hwc, | ||
451 | int idx) | ||
452 | { | ||
453 | u64 prev_raw_count, new_raw_count; | ||
454 | u64 delta; | ||
455 | |||
456 | again: | ||
457 | prev_raw_count = local64_read(&hwc->prev_count); | ||
458 | new_raw_count = mipspmu.read_counter(idx); | ||
459 | |||
460 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
461 | new_raw_count) != prev_raw_count) | ||
462 | goto again; | ||
463 | |||
464 | delta = new_raw_count - prev_raw_count; | ||
465 | |||
466 | local64_add(delta, &event->count); | ||
467 | local64_sub(delta, &hwc->period_left); | ||
468 | } | ||
469 | |||
470 | static void mipspmu_start(struct perf_event *event, int flags) | ||
471 | { | ||
472 | struct hw_perf_event *hwc = &event->hw; | ||
473 | |||
474 | if (flags & PERF_EF_RELOAD) | ||
475 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | ||
476 | |||
477 | hwc->state = 0; | ||
478 | |||
479 | /* Set the period for the event. */ | ||
480 | mipspmu_event_set_period(event, hwc, hwc->idx); | ||
481 | |||
482 | /* Enable the event. */ | ||
483 | mipsxx_pmu_enable_event(hwc, hwc->idx); | ||
484 | } | ||
485 | |||
486 | static void mipspmu_stop(struct perf_event *event, int flags) | ||
487 | { | ||
488 | struct hw_perf_event *hwc = &event->hw; | ||
489 | |||
490 | if (!(hwc->state & PERF_HES_STOPPED)) { | ||
491 | /* We are working on a local event. */ | ||
492 | mipsxx_pmu_disable_event(hwc->idx); | ||
493 | barrier(); | ||
494 | mipspmu_event_update(event, hwc, hwc->idx); | ||
495 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
496 | } | ||
497 | } | ||
498 | |||
499 | static int mipspmu_add(struct perf_event *event, int flags) | ||
500 | { | ||
501 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
502 | struct hw_perf_event *hwc = &event->hw; | ||
503 | int idx; | ||
504 | int err = 0; | ||
505 | |||
506 | perf_pmu_disable(event->pmu); | ||
507 | |||
508 | /* To look for a free counter for this event. */ | ||
509 | idx = mipsxx_pmu_alloc_counter(cpuc, hwc); | ||
510 | if (idx < 0) { | ||
511 | err = idx; | ||
512 | goto out; | ||
513 | } | ||
514 | |||
515 | /* | ||
516 | * If there is an event in the counter we are going to use then | ||
517 | * make sure it is disabled. | ||
518 | */ | ||
519 | event->hw.idx = idx; | ||
520 | mipsxx_pmu_disable_event(idx); | ||
521 | cpuc->events[idx] = event; | ||
522 | |||
523 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | ||
524 | if (flags & PERF_EF_START) | ||
525 | mipspmu_start(event, PERF_EF_RELOAD); | ||
526 | |||
527 | /* Propagate our changes to the userspace mapping. */ | ||
528 | perf_event_update_userpage(event); | ||
529 | |||
530 | out: | ||
531 | perf_pmu_enable(event->pmu); | ||
532 | return err; | ||
533 | } | ||
534 | |||
535 | static void mipspmu_del(struct perf_event *event, int flags) | ||
536 | { | ||
537 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
538 | struct hw_perf_event *hwc = &event->hw; | ||
539 | int idx = hwc->idx; | ||
540 | |||
541 | WARN_ON(idx < 0 || idx >= mipspmu.num_counters); | ||
542 | |||
543 | mipspmu_stop(event, PERF_EF_UPDATE); | ||
544 | cpuc->events[idx] = NULL; | ||
545 | clear_bit(idx, cpuc->used_mask); | ||
546 | |||
547 | perf_event_update_userpage(event); | ||
548 | } | ||
549 | |||
550 | static void mipspmu_read(struct perf_event *event) | ||
551 | { | ||
552 | struct hw_perf_event *hwc = &event->hw; | ||
553 | |||
554 | /* Don't read disabled counters! */ | ||
555 | if (hwc->idx < 0) | ||
556 | return; | ||
557 | |||
558 | mipspmu_event_update(event, hwc, hwc->idx); | ||
559 | } | ||
560 | |||
561 | static void mipspmu_enable(struct pmu *pmu) | ||
562 | { | ||
563 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
564 | write_unlock(&pmuint_rwlock); | ||
565 | #endif | ||
566 | resume_local_counters(); | ||
567 | } | ||
568 | |||
569 | /* | ||
570 | * MIPS performance counters can be per-TC. The control registers can | ||
571 | * not be directly accessed across CPUs. Hence if we want to do global | ||
572 | * control, we need cross CPU calls. on_each_cpu() can help us, but we | ||
573 | * can not make sure this function is called with interrupts enabled. So | ||
574 | * here we pause local counters and then grab a rwlock and leave the | ||
575 | * counters on other CPUs alone. If any counter interrupt raises while | ||
576 | * we own the write lock, simply pause local counters on that CPU and | ||
577 | * spin in the handler. Also we know we won't be switched to another | ||
578 | * CPU after pausing local counters and before grabbing the lock. | ||
579 | */ | ||
580 | static void mipspmu_disable(struct pmu *pmu) | ||
581 | { | ||
582 | pause_local_counters(); | ||
583 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
584 | write_lock(&pmuint_rwlock); | ||
585 | #endif | ||
586 | } | ||
587 | |||
588 | static atomic_t active_events = ATOMIC_INIT(0); | ||
589 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
590 | static int (*save_perf_irq)(void); | ||
591 | |||
592 | static int mipspmu_get_irq(void) | ||
593 | { | ||
594 | int err; | ||
595 | |||
596 | if (mipspmu.irq >= 0) { | ||
597 | /* Request my own irq handler. */ | ||
598 | err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq, | ||
599 | IRQF_PERCPU | IRQF_NOBALANCING | | ||
600 | IRQF_NO_THREAD | IRQF_NO_SUSPEND | | ||
601 | IRQF_SHARED, | ||
602 | "mips_perf_pmu", &mipspmu); | ||
603 | if (err) { | ||
604 | pr_warn("Unable to request IRQ%d for MIPS performance counters!\n", | ||
605 | mipspmu.irq); | ||
606 | } | ||
607 | } else if (cp0_perfcount_irq < 0) { | ||
608 | /* | ||
609 | * We are sharing the irq number with the timer interrupt. | ||
610 | */ | ||
611 | save_perf_irq = perf_irq; | ||
612 | perf_irq = mipsxx_pmu_handle_shared_irq; | ||
613 | err = 0; | ||
614 | } else { | ||
615 | pr_warn("The platform hasn't properly defined its interrupt controller\n"); | ||
616 | err = -ENOENT; | ||
617 | } | ||
618 | |||
619 | return err; | ||
620 | } | ||
621 | |||
622 | static void mipspmu_free_irq(void) | ||
623 | { | ||
624 | if (mipspmu.irq >= 0) | ||
625 | free_irq(mipspmu.irq, &mipspmu); | ||
626 | else if (cp0_perfcount_irq < 0) | ||
627 | perf_irq = save_perf_irq; | ||
628 | } | ||
629 | |||
630 | /* | ||
631 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
632 | * specific low-level init routines. | ||
633 | */ | ||
634 | static void reset_counters(void *arg); | ||
635 | static int __hw_perf_event_init(struct perf_event *event); | ||
636 | |||
637 | static void hw_perf_event_destroy(struct perf_event *event) | ||
638 | { | ||
639 | if (atomic_dec_and_mutex_lock(&active_events, | ||
640 | &pmu_reserve_mutex)) { | ||
641 | /* | ||
642 | * We must not call the destroy function with interrupts | ||
643 | * disabled. | ||
644 | */ | ||
645 | on_each_cpu(reset_counters, | ||
646 | (void *)(long)mipspmu.num_counters, 1); | ||
647 | mipspmu_free_irq(); | ||
648 | mutex_unlock(&pmu_reserve_mutex); | ||
649 | } | ||
650 | } | ||
651 | |||
652 | static int mipspmu_event_init(struct perf_event *event) | ||
653 | { | ||
654 | int err = 0; | ||
655 | |||
656 | /* does not support taken branch sampling */ | ||
657 | if (has_branch_stack(event)) | ||
658 | return -EOPNOTSUPP; | ||
659 | |||
660 | switch (event->attr.type) { | ||
661 | case PERF_TYPE_RAW: | ||
662 | case PERF_TYPE_HARDWARE: | ||
663 | case PERF_TYPE_HW_CACHE: | ||
664 | break; | ||
665 | |||
666 | default: | ||
667 | return -ENOENT; | ||
668 | } | ||
669 | |||
670 | if (event->cpu >= 0 && !cpu_online(event->cpu)) | ||
671 | return -ENODEV; | ||
672 | |||
673 | if (!atomic_inc_not_zero(&active_events)) { | ||
674 | mutex_lock(&pmu_reserve_mutex); | ||
675 | if (atomic_read(&active_events) == 0) | ||
676 | err = mipspmu_get_irq(); | ||
677 | |||
678 | if (!err) | ||
679 | atomic_inc(&active_events); | ||
680 | mutex_unlock(&pmu_reserve_mutex); | ||
681 | } | ||
682 | |||
683 | if (err) | ||
684 | return err; | ||
685 | |||
686 | return __hw_perf_event_init(event); | ||
687 | } | ||
688 | |||
689 | static struct pmu pmu = { | ||
690 | .pmu_enable = mipspmu_enable, | ||
691 | .pmu_disable = mipspmu_disable, | ||
692 | .event_init = mipspmu_event_init, | ||
693 | .add = mipspmu_add, | ||
694 | .del = mipspmu_del, | ||
695 | .start = mipspmu_start, | ||
696 | .stop = mipspmu_stop, | ||
697 | .read = mipspmu_read, | ||
698 | }; | ||
699 | |||
700 | static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev) | ||
701 | { | ||
702 | /* | ||
703 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for | ||
704 | * event_id. | ||
705 | */ | ||
706 | #ifdef CONFIG_MIPS_MT_SMP | ||
707 | if (num_possible_cpus() > 1) | ||
708 | return ((unsigned int)pev->range << 24) | | ||
709 | (pev->cntr_mask & 0xffff00) | | ||
710 | (pev->event_id & 0xff); | ||
711 | else | ||
712 | #endif /* CONFIG_MIPS_MT_SMP */ | ||
713 | { | ||
714 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) | ||
715 | return (pev->cntr_mask & 0xfffc00) | | ||
716 | (pev->event_id & 0x3ff); | ||
717 | else | ||
718 | return (pev->cntr_mask & 0xffff00) | | ||
719 | (pev->event_id & 0xff); | ||
720 | } | ||
721 | } | ||
722 | |||
723 | static const struct mips_perf_event *mipspmu_map_general_event(int idx) | ||
724 | { | ||
725 | |||
726 | if ((*mipspmu.general_event_map)[idx].cntr_mask == 0) | ||
727 | return ERR_PTR(-EOPNOTSUPP); | ||
728 | return &(*mipspmu.general_event_map)[idx]; | ||
729 | } | ||
730 | |||
731 | static const struct mips_perf_event *mipspmu_map_cache_event(u64 config) | ||
732 | { | ||
733 | unsigned int cache_type, cache_op, cache_result; | ||
734 | const struct mips_perf_event *pev; | ||
735 | |||
736 | cache_type = (config >> 0) & 0xff; | ||
737 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
738 | return ERR_PTR(-EINVAL); | ||
739 | |||
740 | cache_op = (config >> 8) & 0xff; | ||
741 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
742 | return ERR_PTR(-EINVAL); | ||
743 | |||
744 | cache_result = (config >> 16) & 0xff; | ||
745 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
746 | return ERR_PTR(-EINVAL); | ||
747 | |||
748 | pev = &((*mipspmu.cache_event_map) | ||
749 | [cache_type] | ||
750 | [cache_op] | ||
751 | [cache_result]); | ||
752 | |||
753 | if (pev->cntr_mask == 0) | ||
754 | return ERR_PTR(-EOPNOTSUPP); | ||
755 | |||
756 | return pev; | ||
757 | |||
758 | } | ||
759 | |||
760 | static int validate_group(struct perf_event *event) | ||
761 | { | ||
762 | struct perf_event *sibling, *leader = event->group_leader; | ||
763 | struct cpu_hw_events fake_cpuc; | ||
764 | |||
765 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | ||
766 | |||
767 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0) | ||
768 | return -EINVAL; | ||
769 | |||
770 | for_each_sibling_event(sibling, leader) { | ||
771 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0) | ||
772 | return -EINVAL; | ||
773 | } | ||
774 | |||
775 | if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0) | ||
776 | return -EINVAL; | ||
777 | |||
778 | return 0; | ||
779 | } | ||
780 | |||
781 | /* This is needed by specific irq handlers in perf_event_*.c */ | ||
782 | static void handle_associated_event(struct cpu_hw_events *cpuc, | ||
783 | int idx, struct perf_sample_data *data, | ||
784 | struct pt_regs *regs) | ||
785 | { | ||
786 | struct perf_event *event = cpuc->events[idx]; | ||
787 | struct hw_perf_event *hwc = &event->hw; | ||
788 | |||
789 | mipspmu_event_update(event, hwc, idx); | ||
790 | data->period = event->hw.last_period; | ||
791 | if (!mipspmu_event_set_period(event, hwc, idx)) | ||
792 | return; | ||
793 | |||
794 | if (perf_event_overflow(event, data, regs)) | ||
795 | mipsxx_pmu_disable_event(idx); | ||
796 | } | ||
797 | |||
798 | |||
799 | static int __n_counters(void) | ||
800 | { | ||
801 | if (!cpu_has_perf) | ||
802 | return 0; | ||
803 | if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M)) | ||
804 | return 1; | ||
805 | if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M)) | ||
806 | return 2; | ||
807 | if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M)) | ||
808 | return 3; | ||
809 | |||
810 | return 4; | ||
811 | } | ||
812 | |||
813 | static int n_counters(void) | ||
814 | { | ||
815 | int counters; | ||
816 | |||
817 | switch (current_cpu_type()) { | ||
818 | case CPU_R10000: | ||
819 | counters = 2; | ||
820 | break; | ||
821 | |||
822 | case CPU_R12000: | ||
823 | case CPU_R14000: | ||
824 | case CPU_R16000: | ||
825 | counters = 4; | ||
826 | break; | ||
827 | |||
828 | default: | ||
829 | counters = __n_counters(); | ||
830 | } | ||
831 | |||
832 | return counters; | ||
833 | } | ||
834 | |||
835 | static void loongson3_reset_counters(void *arg) | ||
836 | { | ||
837 | int counters = (int)(long)arg; | ||
838 | |||
839 | switch (counters) { | ||
840 | case 4: | ||
841 | mipsxx_pmu_write_control(3, 0); | ||
842 | mipspmu.write_counter(3, 0); | ||
843 | mipsxx_pmu_write_control(3, 127<<5); | ||
844 | mipspmu.write_counter(3, 0); | ||
845 | mipsxx_pmu_write_control(3, 191<<5); | ||
846 | mipspmu.write_counter(3, 0); | ||
847 | mipsxx_pmu_write_control(3, 255<<5); | ||
848 | mipspmu.write_counter(3, 0); | ||
849 | mipsxx_pmu_write_control(3, 319<<5); | ||
850 | mipspmu.write_counter(3, 0); | ||
851 | mipsxx_pmu_write_control(3, 383<<5); | ||
852 | mipspmu.write_counter(3, 0); | ||
853 | mipsxx_pmu_write_control(3, 575<<5); | ||
854 | mipspmu.write_counter(3, 0); | ||
855 | fallthrough; | ||
856 | case 3: | ||
857 | mipsxx_pmu_write_control(2, 0); | ||
858 | mipspmu.write_counter(2, 0); | ||
859 | mipsxx_pmu_write_control(2, 127<<5); | ||
860 | mipspmu.write_counter(2, 0); | ||
861 | mipsxx_pmu_write_control(2, 191<<5); | ||
862 | mipspmu.write_counter(2, 0); | ||
863 | mipsxx_pmu_write_control(2, 255<<5); | ||
864 | mipspmu.write_counter(2, 0); | ||
865 | mipsxx_pmu_write_control(2, 319<<5); | ||
866 | mipspmu.write_counter(2, 0); | ||
867 | mipsxx_pmu_write_control(2, 383<<5); | ||
868 | mipspmu.write_counter(2, 0); | ||
869 | mipsxx_pmu_write_control(2, 575<<5); | ||
870 | mipspmu.write_counter(2, 0); | ||
871 | fallthrough; | ||
872 | case 2: | ||
873 | mipsxx_pmu_write_control(1, 0); | ||
874 | mipspmu.write_counter(1, 0); | ||
875 | mipsxx_pmu_write_control(1, 127<<5); | ||
876 | mipspmu.write_counter(1, 0); | ||
877 | mipsxx_pmu_write_control(1, 191<<5); | ||
878 | mipspmu.write_counter(1, 0); | ||
879 | mipsxx_pmu_write_control(1, 255<<5); | ||
880 | mipspmu.write_counter(1, 0); | ||
881 | mipsxx_pmu_write_control(1, 319<<5); | ||
882 | mipspmu.write_counter(1, 0); | ||
883 | mipsxx_pmu_write_control(1, 383<<5); | ||
884 | mipspmu.write_counter(1, 0); | ||
885 | mipsxx_pmu_write_control(1, 575<<5); | ||
886 | mipspmu.write_counter(1, 0); | ||
887 | fallthrough; | ||
888 | case 1: | ||
889 | mipsxx_pmu_write_control(0, 0); | ||
890 | mipspmu.write_counter(0, 0); | ||
891 | mipsxx_pmu_write_control(0, 127<<5); | ||
892 | mipspmu.write_counter(0, 0); | ||
893 | mipsxx_pmu_write_control(0, 191<<5); | ||
894 | mipspmu.write_counter(0, 0); | ||
895 | mipsxx_pmu_write_control(0, 255<<5); | ||
896 | mipspmu.write_counter(0, 0); | ||
897 | mipsxx_pmu_write_control(0, 319<<5); | ||
898 | mipspmu.write_counter(0, 0); | ||
899 | mipsxx_pmu_write_control(0, 383<<5); | ||
900 | mipspmu.write_counter(0, 0); | ||
901 | mipsxx_pmu_write_control(0, 575<<5); | ||
902 | mipspmu.write_counter(0, 0); | ||
903 | break; | ||
904 | } | ||
905 | } | ||
906 | |||
907 | static void reset_counters(void *arg) | ||
908 | { | ||
909 | int counters = (int)(long)arg; | ||
910 | |||
911 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) { | ||
912 | loongson3_reset_counters(arg); | ||
913 | return; | ||
914 | } | ||
915 | |||
916 | switch (counters) { | ||
917 | case 4: | ||
918 | mipsxx_pmu_write_control(3, 0); | ||
919 | mipspmu.write_counter(3, 0); | ||
920 | fallthrough; | ||
921 | case 3: | ||
922 | mipsxx_pmu_write_control(2, 0); | ||
923 | mipspmu.write_counter(2, 0); | ||
924 | fallthrough; | ||
925 | case 2: | ||
926 | mipsxx_pmu_write_control(1, 0); | ||
927 | mipspmu.write_counter(1, 0); | ||
928 | fallthrough; | ||
929 | case 1: | ||
930 | mipsxx_pmu_write_control(0, 0); | ||
931 | mipspmu.write_counter(0, 0); | ||
932 | break; | ||
933 | } | ||
934 | } | ||
935 | |||
936 | /* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */ | ||
937 | static const struct mips_perf_event mipsxxcore_event_map | ||
938 | [PERF_COUNT_HW_MAX] = { | ||
939 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, | ||
940 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | ||
941 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T }, | ||
942 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, | ||
943 | }; | ||
944 | |||
945 | /* 74K/proAptiv core has different branch event code. */ | ||
946 | static const struct mips_perf_event mipsxxcore_event_map2 | ||
947 | [PERF_COUNT_HW_MAX] = { | ||
948 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P }, | ||
949 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | ||
950 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T }, | ||
951 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T }, | ||
952 | }; | ||
953 | |||
954 | static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = { | ||
955 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD }, | ||
956 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD }, | ||
957 | /* These only count dcache, not icache */ | ||
958 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD }, | ||
959 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD }, | ||
960 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD }, | ||
961 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD }, | ||
962 | }; | ||
963 | |||
964 | static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = { | ||
965 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN }, | ||
966 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD }, | ||
967 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN }, | ||
968 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD }, | ||
969 | }; | ||
970 | |||
971 | static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = { | ||
972 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL }, | ||
973 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL }, | ||
974 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL }, | ||
975 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL }, | ||
976 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL }, | ||
977 | }; | ||
978 | |||
979 | static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = { | ||
980 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL }, | ||
981 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL }, | ||
982 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL }, | ||
983 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL }, | ||
984 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL }, | ||
985 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL }, | ||
986 | }; | ||
987 | |||
988 | static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = { | ||
989 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, | ||
990 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL }, | ||
991 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL }, | ||
992 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL }, | ||
993 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL }, | ||
994 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL }, | ||
995 | [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL }, | ||
996 | }; | ||
997 | |||
998 | static const struct mips_perf_event bmips5000_event_map | ||
999 | [PERF_COUNT_HW_MAX] = { | ||
1000 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T }, | ||
1001 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T }, | ||
1002 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T }, | ||
1003 | }; | ||
1004 | |||
1005 | static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = { | ||
1006 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL }, | ||
1007 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */ | ||
1008 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ | ||
1009 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ | ||
1010 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */ | ||
1011 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */ | ||
1012 | }; | ||
1013 | |||
1014 | /* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */ | ||
1015 | static const struct mips_perf_event mipsxxcore_cache_map | ||
1016 | [PERF_COUNT_HW_CACHE_MAX] | ||
1017 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1018 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1019 | [C(L1D)] = { | ||
1020 | /* | ||
1021 | * Like some other architectures (e.g. ARM), the performance | ||
1022 | * counters don't differentiate between read and write | ||
1023 | * accesses/misses, so this isn't strictly correct, but it's the | ||
1024 | * best we can do. Writes and reads get combined. | ||
1025 | */ | ||
1026 | [C(OP_READ)] = { | ||
1027 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, | ||
1028 | [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, | ||
1029 | }, | ||
1030 | [C(OP_WRITE)] = { | ||
1031 | [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T }, | ||
1032 | [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T }, | ||
1033 | }, | ||
1034 | }, | ||
1035 | [C(L1I)] = { | ||
1036 | [C(OP_READ)] = { | ||
1037 | [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, | ||
1038 | [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, | ||
1039 | }, | ||
1040 | [C(OP_WRITE)] = { | ||
1041 | [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T }, | ||
1042 | [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T }, | ||
1043 | }, | ||
1044 | [C(OP_PREFETCH)] = { | ||
1045 | [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T }, | ||
1046 | /* | ||
1047 | * Note that MIPS has only "hit" events countable for | ||
1048 | * the prefetch operation. | ||
1049 | */ | ||
1050 | }, | ||
1051 | }, | ||
1052 | [C(LL)] = { | ||
1053 | [C(OP_READ)] = { | ||
1054 | [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, | ||
1055 | [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, | ||
1056 | }, | ||
1057 | [C(OP_WRITE)] = { | ||
1058 | [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P }, | ||
1059 | [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P }, | ||
1060 | }, | ||
1061 | }, | ||
1062 | [C(DTLB)] = { | ||
1063 | [C(OP_READ)] = { | ||
1064 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | ||
1065 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | ||
1066 | }, | ||
1067 | [C(OP_WRITE)] = { | ||
1068 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | ||
1069 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | ||
1070 | }, | ||
1071 | }, | ||
1072 | [C(ITLB)] = { | ||
1073 | [C(OP_READ)] = { | ||
1074 | [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, | ||
1075 | [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, | ||
1076 | }, | ||
1077 | [C(OP_WRITE)] = { | ||
1078 | [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T }, | ||
1079 | [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T }, | ||
1080 | }, | ||
1081 | }, | ||
1082 | [C(BPU)] = { | ||
1083 | /* Using the same code for *HW_BRANCH* */ | ||
1084 | [C(OP_READ)] = { | ||
1085 | [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, | ||
1086 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | ||
1087 | }, | ||
1088 | [C(OP_WRITE)] = { | ||
1089 | [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T }, | ||
1090 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | ||
1091 | }, | ||
1092 | }, | ||
1093 | }; | ||
1094 | |||
1095 | /* 74K/proAptiv core has completely different cache event map. */ | ||
1096 | static const struct mips_perf_event mipsxxcore_cache_map2 | ||
1097 | [PERF_COUNT_HW_CACHE_MAX] | ||
1098 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1099 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1100 | [C(L1D)] = { | ||
1101 | /* | ||
1102 | * Like some other architectures (e.g. ARM), the performance | ||
1103 | * counters don't differentiate between read and write | ||
1104 | * accesses/misses, so this isn't strictly correct, but it's the | ||
1105 | * best we can do. Writes and reads get combined. | ||
1106 | */ | ||
1107 | [C(OP_READ)] = { | ||
1108 | [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, | ||
1109 | [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, | ||
1110 | }, | ||
1111 | [C(OP_WRITE)] = { | ||
1112 | [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T }, | ||
1113 | [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T }, | ||
1114 | }, | ||
1115 | }, | ||
1116 | [C(L1I)] = { | ||
1117 | [C(OP_READ)] = { | ||
1118 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | ||
1119 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | ||
1120 | }, | ||
1121 | [C(OP_WRITE)] = { | ||
1122 | [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T }, | ||
1123 | [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T }, | ||
1124 | }, | ||
1125 | [C(OP_PREFETCH)] = { | ||
1126 | [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T }, | ||
1127 | /* | ||
1128 | * Note that MIPS has only "hit" events countable for | ||
1129 | * the prefetch operation. | ||
1130 | */ | ||
1131 | }, | ||
1132 | }, | ||
1133 | [C(LL)] = { | ||
1134 | [C(OP_READ)] = { | ||
1135 | [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, | ||
1136 | [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, | ||
1137 | }, | ||
1138 | [C(OP_WRITE)] = { | ||
1139 | [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P }, | ||
1140 | [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P }, | ||
1141 | }, | ||
1142 | }, | ||
1143 | /* | ||
1144 | * 74K core does not have specific DTLB events. proAptiv core has | ||
1145 | * "speculative" DTLB events which are numbered 0x63 (even/odd) and | ||
1146 | * not included here. One can use raw events if really needed. | ||
1147 | */ | ||
1148 | [C(ITLB)] = { | ||
1149 | [C(OP_READ)] = { | ||
1150 | [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, | ||
1151 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, | ||
1152 | }, | ||
1153 | [C(OP_WRITE)] = { | ||
1154 | [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T }, | ||
1155 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T }, | ||
1156 | }, | ||
1157 | }, | ||
1158 | [C(BPU)] = { | ||
1159 | /* Using the same code for *HW_BRANCH* */ | ||
1160 | [C(OP_READ)] = { | ||
1161 | [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, | ||
1162 | [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, | ||
1163 | }, | ||
1164 | [C(OP_WRITE)] = { | ||
1165 | [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T }, | ||
1166 | [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T }, | ||
1167 | }, | ||
1168 | }, | ||
1169 | }; | ||
1170 | |||
1171 | static const struct mips_perf_event i6x00_cache_map | ||
1172 | [PERF_COUNT_HW_CACHE_MAX] | ||
1173 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1174 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1175 | [C(L1D)] = { | ||
1176 | [C(OP_READ)] = { | ||
1177 | [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD }, | ||
1178 | [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD }, | ||
1179 | }, | ||
1180 | [C(OP_WRITE)] = { | ||
1181 | [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD }, | ||
1182 | [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD }, | ||
1183 | }, | ||
1184 | }, | ||
1185 | [C(L1I)] = { | ||
1186 | [C(OP_READ)] = { | ||
1187 | [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD }, | ||
1188 | [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD }, | ||
1189 | }, | ||
1190 | }, | ||
1191 | [C(DTLB)] = { | ||
1192 | /* Can't distinguish read & write */ | ||
1193 | [C(OP_READ)] = { | ||
1194 | [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD }, | ||
1195 | [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD }, | ||
1196 | }, | ||
1197 | [C(OP_WRITE)] = { | ||
1198 | [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD }, | ||
1199 | [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD }, | ||
1200 | }, | ||
1201 | }, | ||
1202 | [C(BPU)] = { | ||
1203 | /* Conditional branches / mispredicted */ | ||
1204 | [C(OP_READ)] = { | ||
1205 | [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD }, | ||
1206 | [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD }, | ||
1207 | }, | ||
1208 | }, | ||
1209 | }; | ||
1210 | |||
1211 | static const struct mips_perf_event loongson3_cache_map1 | ||
1212 | [PERF_COUNT_HW_CACHE_MAX] | ||
1213 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1214 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1215 | [C(L1D)] = { | ||
1216 | /* | ||
1217 | * Like some other architectures (e.g. ARM), the performance | ||
1218 | * counters don't differentiate between read and write | ||
1219 | * accesses/misses, so this isn't strictly correct, but it's the | ||
1220 | * best we can do. Writes and reads get combined. | ||
1221 | */ | ||
1222 | [C(OP_READ)] = { | ||
1223 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, | ||
1224 | }, | ||
1225 | [C(OP_WRITE)] = { | ||
1226 | [C(RESULT_MISS)] = { 0x04, CNTR_ODD }, | ||
1227 | }, | ||
1228 | }, | ||
1229 | [C(L1I)] = { | ||
1230 | [C(OP_READ)] = { | ||
1231 | [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, | ||
1232 | }, | ||
1233 | [C(OP_WRITE)] = { | ||
1234 | [C(RESULT_MISS)] = { 0x04, CNTR_EVEN }, | ||
1235 | }, | ||
1236 | }, | ||
1237 | [C(DTLB)] = { | ||
1238 | [C(OP_READ)] = { | ||
1239 | [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, | ||
1240 | }, | ||
1241 | [C(OP_WRITE)] = { | ||
1242 | [C(RESULT_MISS)] = { 0x09, CNTR_ODD }, | ||
1243 | }, | ||
1244 | }, | ||
1245 | [C(ITLB)] = { | ||
1246 | [C(OP_READ)] = { | ||
1247 | [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, | ||
1248 | }, | ||
1249 | [C(OP_WRITE)] = { | ||
1250 | [C(RESULT_MISS)] = { 0x0c, CNTR_ODD }, | ||
1251 | }, | ||
1252 | }, | ||
1253 | [C(BPU)] = { | ||
1254 | /* Using the same code for *HW_BRANCH* */ | ||
1255 | [C(OP_READ)] = { | ||
1256 | [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN }, | ||
1257 | [C(RESULT_MISS)] = { 0x01, CNTR_ODD }, | ||
1258 | }, | ||
1259 | [C(OP_WRITE)] = { | ||
1260 | [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN }, | ||
1261 | [C(RESULT_MISS)] = { 0x01, CNTR_ODD }, | ||
1262 | }, | ||
1263 | }, | ||
1264 | }; | ||
1265 | |||
1266 | static const struct mips_perf_event loongson3_cache_map2 | ||
1267 | [PERF_COUNT_HW_CACHE_MAX] | ||
1268 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1269 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1270 | [C(L1D)] = { | ||
1271 | /* | ||
1272 | * Like some other architectures (e.g. ARM), the performance | ||
1273 | * counters don't differentiate between read and write | ||
1274 | * accesses/misses, so this isn't strictly correct, but it's the | ||
1275 | * best we can do. Writes and reads get combined. | ||
1276 | */ | ||
1277 | [C(OP_READ)] = { | ||
1278 | [C(RESULT_ACCESS)] = { 0x156, CNTR_ALL }, | ||
1279 | }, | ||
1280 | [C(OP_WRITE)] = { | ||
1281 | [C(RESULT_ACCESS)] = { 0x155, CNTR_ALL }, | ||
1282 | [C(RESULT_MISS)] = { 0x153, CNTR_ALL }, | ||
1283 | }, | ||
1284 | }, | ||
1285 | [C(L1I)] = { | ||
1286 | [C(OP_READ)] = { | ||
1287 | [C(RESULT_MISS)] = { 0x18, CNTR_ALL }, | ||
1288 | }, | ||
1289 | [C(OP_WRITE)] = { | ||
1290 | [C(RESULT_MISS)] = { 0x18, CNTR_ALL }, | ||
1291 | }, | ||
1292 | }, | ||
1293 | [C(LL)] = { | ||
1294 | [C(OP_READ)] = { | ||
1295 | [C(RESULT_ACCESS)] = { 0x1b6, CNTR_ALL }, | ||
1296 | }, | ||
1297 | [C(OP_WRITE)] = { | ||
1298 | [C(RESULT_ACCESS)] = { 0x1b7, CNTR_ALL }, | ||
1299 | }, | ||
1300 | [C(OP_PREFETCH)] = { | ||
1301 | [C(RESULT_ACCESS)] = { 0x1bf, CNTR_ALL }, | ||
1302 | }, | ||
1303 | }, | ||
1304 | [C(DTLB)] = { | ||
1305 | [C(OP_READ)] = { | ||
1306 | [C(RESULT_MISS)] = { 0x92, CNTR_ALL }, | ||
1307 | }, | ||
1308 | [C(OP_WRITE)] = { | ||
1309 | [C(RESULT_MISS)] = { 0x92, CNTR_ALL }, | ||
1310 | }, | ||
1311 | }, | ||
1312 | [C(ITLB)] = { | ||
1313 | [C(OP_READ)] = { | ||
1314 | [C(RESULT_MISS)] = { 0x1a, CNTR_ALL }, | ||
1315 | }, | ||
1316 | [C(OP_WRITE)] = { | ||
1317 | [C(RESULT_MISS)] = { 0x1a, CNTR_ALL }, | ||
1318 | }, | ||
1319 | }, | ||
1320 | [C(BPU)] = { | ||
1321 | /* Using the same code for *HW_BRANCH* */ | ||
1322 | [C(OP_READ)] = { | ||
1323 | [C(RESULT_ACCESS)] = { 0x94, CNTR_ALL }, | ||
1324 | [C(RESULT_MISS)] = { 0x9c, CNTR_ALL }, | ||
1325 | }, | ||
1326 | }, | ||
1327 | }; | ||
1328 | |||
1329 | static const struct mips_perf_event loongson3_cache_map3 | ||
1330 | [PERF_COUNT_HW_CACHE_MAX] | ||
1331 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1332 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1333 | [C(L1D)] = { | ||
1334 | /* | ||
1335 | * Like some other architectures (e.g. ARM), the performance | ||
1336 | * counters don't differentiate between read and write | ||
1337 | * accesses/misses, so this isn't strictly correct, but it's the | ||
1338 | * best we can do. Writes and reads get combined. | ||
1339 | */ | ||
1340 | [C(OP_READ)] = { | ||
1341 | [C(RESULT_ACCESS)] = { 0x1e, CNTR_ALL }, | ||
1342 | [C(RESULT_MISS)] = { 0x1f, CNTR_ALL }, | ||
1343 | }, | ||
1344 | [C(OP_PREFETCH)] = { | ||
1345 | [C(RESULT_ACCESS)] = { 0xaa, CNTR_ALL }, | ||
1346 | [C(RESULT_MISS)] = { 0xa9, CNTR_ALL }, | ||
1347 | }, | ||
1348 | }, | ||
1349 | [C(L1I)] = { | ||
1350 | [C(OP_READ)] = { | ||
1351 | [C(RESULT_ACCESS)] = { 0x1c, CNTR_ALL }, | ||
1352 | [C(RESULT_MISS)] = { 0x1d, CNTR_ALL }, | ||
1353 | }, | ||
1354 | }, | ||
1355 | [C(LL)] = { | ||
1356 | [C(OP_READ)] = { | ||
1357 | [C(RESULT_ACCESS)] = { 0x2e, CNTR_ALL }, | ||
1358 | [C(RESULT_MISS)] = { 0x2f, CNTR_ALL }, | ||
1359 | }, | ||
1360 | }, | ||
1361 | [C(DTLB)] = { | ||
1362 | [C(OP_READ)] = { | ||
1363 | [C(RESULT_ACCESS)] = { 0x14, CNTR_ALL }, | ||
1364 | [C(RESULT_MISS)] = { 0x1b, CNTR_ALL }, | ||
1365 | }, | ||
1366 | }, | ||
1367 | [C(ITLB)] = { | ||
1368 | [C(OP_READ)] = { | ||
1369 | [C(RESULT_MISS)] = { 0x1a, CNTR_ALL }, | ||
1370 | }, | ||
1371 | }, | ||
1372 | [C(BPU)] = { | ||
1373 | /* Using the same code for *HW_BRANCH* */ | ||
1374 | [C(OP_READ)] = { | ||
1375 | [C(RESULT_ACCESS)] = { 0x02, CNTR_ALL }, | ||
1376 | [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, | ||
1377 | }, | ||
1378 | }, | ||
1379 | }; | ||
1380 | |||
1381 | /* BMIPS5000 */ | ||
1382 | static const struct mips_perf_event bmips5000_cache_map | ||
1383 | [PERF_COUNT_HW_CACHE_MAX] | ||
1384 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1385 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1386 | [C(L1D)] = { | ||
1387 | /* | ||
1388 | * Like some other architectures (e.g. ARM), the performance | ||
1389 | * counters don't differentiate between read and write | ||
1390 | * accesses/misses, so this isn't strictly correct, but it's the | ||
1391 | * best we can do. Writes and reads get combined. | ||
1392 | */ | ||
1393 | [C(OP_READ)] = { | ||
1394 | [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T }, | ||
1395 | [C(RESULT_MISS)] = { 12, CNTR_ODD, T }, | ||
1396 | }, | ||
1397 | [C(OP_WRITE)] = { | ||
1398 | [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T }, | ||
1399 | [C(RESULT_MISS)] = { 12, CNTR_ODD, T }, | ||
1400 | }, | ||
1401 | }, | ||
1402 | [C(L1I)] = { | ||
1403 | [C(OP_READ)] = { | ||
1404 | [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T }, | ||
1405 | [C(RESULT_MISS)] = { 10, CNTR_ODD, T }, | ||
1406 | }, | ||
1407 | [C(OP_WRITE)] = { | ||
1408 | [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T }, | ||
1409 | [C(RESULT_MISS)] = { 10, CNTR_ODD, T }, | ||
1410 | }, | ||
1411 | [C(OP_PREFETCH)] = { | ||
1412 | [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T }, | ||
1413 | /* | ||
1414 | * Note that MIPS has only "hit" events countable for | ||
1415 | * the prefetch operation. | ||
1416 | */ | ||
1417 | }, | ||
1418 | }, | ||
1419 | [C(LL)] = { | ||
1420 | [C(OP_READ)] = { | ||
1421 | [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P }, | ||
1422 | [C(RESULT_MISS)] = { 28, CNTR_ODD, P }, | ||
1423 | }, | ||
1424 | [C(OP_WRITE)] = { | ||
1425 | [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P }, | ||
1426 | [C(RESULT_MISS)] = { 28, CNTR_ODD, P }, | ||
1427 | }, | ||
1428 | }, | ||
1429 | [C(BPU)] = { | ||
1430 | /* Using the same code for *HW_BRANCH* */ | ||
1431 | [C(OP_READ)] = { | ||
1432 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | ||
1433 | }, | ||
1434 | [C(OP_WRITE)] = { | ||
1435 | [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T }, | ||
1436 | }, | ||
1437 | }, | ||
1438 | }; | ||
1439 | |||
1440 | static const struct mips_perf_event octeon_cache_map | ||
1441 | [PERF_COUNT_HW_CACHE_MAX] | ||
1442 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1443 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1444 | [C(L1D)] = { | ||
1445 | [C(OP_READ)] = { | ||
1446 | [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL }, | ||
1447 | [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, | ||
1448 | }, | ||
1449 | [C(OP_WRITE)] = { | ||
1450 | [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL }, | ||
1451 | }, | ||
1452 | }, | ||
1453 | [C(L1I)] = { | ||
1454 | [C(OP_READ)] = { | ||
1455 | [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL }, | ||
1456 | }, | ||
1457 | [C(OP_PREFETCH)] = { | ||
1458 | [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL }, | ||
1459 | }, | ||
1460 | }, | ||
1461 | [C(DTLB)] = { | ||
1462 | /* | ||
1463 | * Only general DTLB misses are counted use the same event for | ||
1464 | * read and write. | ||
1465 | */ | ||
1466 | [C(OP_READ)] = { | ||
1467 | [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, | ||
1468 | }, | ||
1469 | [C(OP_WRITE)] = { | ||
1470 | [C(RESULT_MISS)] = { 0x35, CNTR_ALL }, | ||
1471 | }, | ||
1472 | }, | ||
1473 | [C(ITLB)] = { | ||
1474 | [C(OP_READ)] = { | ||
1475 | [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, | ||
1476 | }, | ||
1477 | }, | ||
1478 | }; | ||
1479 | |||
1480 | static const struct mips_perf_event xlp_cache_map | ||
1481 | [PERF_COUNT_HW_CACHE_MAX] | ||
1482 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
1483 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
1484 | [C(L1D)] = { | ||
1485 | [C(OP_READ)] = { | ||
1486 | [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */ | ||
1487 | [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */ | ||
1488 | }, | ||
1489 | [C(OP_WRITE)] = { | ||
1490 | [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */ | ||
1491 | [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */ | ||
1492 | }, | ||
1493 | }, | ||
1494 | [C(L1I)] = { | ||
1495 | [C(OP_READ)] = { | ||
1496 | [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */ | ||
1497 | [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */ | ||
1498 | }, | ||
1499 | }, | ||
1500 | [C(LL)] = { | ||
1501 | [C(OP_READ)] = { | ||
1502 | [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */ | ||
1503 | [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */ | ||
1504 | }, | ||
1505 | [C(OP_WRITE)] = { | ||
1506 | [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */ | ||
1507 | [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */ | ||
1508 | }, | ||
1509 | }, | ||
1510 | [C(DTLB)] = { | ||
1511 | /* | ||
1512 | * Only general DTLB misses are counted use the same event for | ||
1513 | * read and write. | ||
1514 | */ | ||
1515 | [C(OP_READ)] = { | ||
1516 | [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ | ||
1517 | }, | ||
1518 | [C(OP_WRITE)] = { | ||
1519 | [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */ | ||
1520 | }, | ||
1521 | }, | ||
1522 | [C(ITLB)] = { | ||
1523 | [C(OP_READ)] = { | ||
1524 | [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ | ||
1525 | }, | ||
1526 | [C(OP_WRITE)] = { | ||
1527 | [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */ | ||
1528 | }, | ||
1529 | }, | ||
1530 | [C(BPU)] = { | ||
1531 | [C(OP_READ)] = { | ||
1532 | [C(RESULT_MISS)] = { 0x25, CNTR_ALL }, | ||
1533 | }, | ||
1534 | }, | ||
1535 | }; | ||
1536 | |||
1537 | static int __hw_perf_event_init(struct perf_event *event) | ||
1538 | { | ||
1539 | struct perf_event_attr *attr = &event->attr; | ||
1540 | struct hw_perf_event *hwc = &event->hw; | ||
1541 | const struct mips_perf_event *pev; | ||
1542 | int err; | ||
1543 | |||
1544 | /* Returning MIPS event descriptor for generic perf event. */ | ||
1545 | if (PERF_TYPE_HARDWARE == event->attr.type) { | ||
1546 | if (event->attr.config >= PERF_COUNT_HW_MAX) | ||
1547 | return -EINVAL; | ||
1548 | pev = mipspmu_map_general_event(event->attr.config); | ||
1549 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | ||
1550 | pev = mipspmu_map_cache_event(event->attr.config); | ||
1551 | } else if (PERF_TYPE_RAW == event->attr.type) { | ||
1552 | /* We are working on the global raw event. */ | ||
1553 | mutex_lock(&raw_event_mutex); | ||
1554 | pev = mipspmu.map_raw_event(event->attr.config); | ||
1555 | } else { | ||
1556 | /* The event type is not (yet) supported. */ | ||
1557 | return -EOPNOTSUPP; | ||
1558 | } | ||
1559 | |||
1560 | if (IS_ERR(pev)) { | ||
1561 | if (PERF_TYPE_RAW == event->attr.type) | ||
1562 | mutex_unlock(&raw_event_mutex); | ||
1563 | return PTR_ERR(pev); | ||
1564 | } | ||
1565 | |||
1566 | /* | ||
1567 | * We allow max flexibility on how each individual counter shared | ||
1568 | * by the single CPU operates (the mode exclusion and the range). | ||
1569 | */ | ||
1570 | hwc->config_base = MIPS_PERFCTRL_IE; | ||
1571 | |||
1572 | hwc->event_base = mipspmu_perf_event_encode(pev); | ||
1573 | if (PERF_TYPE_RAW == event->attr.type) | ||
1574 | mutex_unlock(&raw_event_mutex); | ||
1575 | |||
1576 | if (!attr->exclude_user) | ||
1577 | hwc->config_base |= MIPS_PERFCTRL_U; | ||
1578 | if (!attr->exclude_kernel) { | ||
1579 | hwc->config_base |= MIPS_PERFCTRL_K; | ||
1580 | /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */ | ||
1581 | hwc->config_base |= MIPS_PERFCTRL_EXL; | ||
1582 | } | ||
1583 | if (!attr->exclude_hv) | ||
1584 | hwc->config_base |= MIPS_PERFCTRL_S; | ||
1585 | |||
1586 | hwc->config_base &= M_PERFCTL_CONFIG_MASK; | ||
1587 | /* | ||
1588 | * The event can belong to another cpu. We do not assign a local | ||
1589 | * counter for it for now. | ||
1590 | */ | ||
1591 | hwc->idx = -1; | ||
1592 | hwc->config = 0; | ||
1593 | |||
1594 | if (!hwc->sample_period) { | ||
1595 | hwc->sample_period = mipspmu.max_period; | ||
1596 | hwc->last_period = hwc->sample_period; | ||
1597 | local64_set(&hwc->period_left, hwc->sample_period); | ||
1598 | } | ||
1599 | |||
1600 | err = 0; | ||
1601 | if (event->group_leader != event) | ||
1602 | err = validate_group(event); | ||
1603 | |||
1604 | event->destroy = hw_perf_event_destroy; | ||
1605 | |||
1606 | if (err) | ||
1607 | event->destroy(event); | ||
1608 | |||
1609 | return err; | ||
1610 | } | ||
1611 | |||
1612 | static void pause_local_counters(void) | ||
1613 | { | ||
1614 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
1615 | int ctr = mipspmu.num_counters; | ||
1616 | unsigned long flags; | ||
1617 | |||
1618 | local_irq_save(flags); | ||
1619 | do { | ||
1620 | ctr--; | ||
1621 | cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr); | ||
1622 | mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] & | ||
1623 | ~M_PERFCTL_COUNT_EVENT_WHENEVER); | ||
1624 | } while (ctr > 0); | ||
1625 | local_irq_restore(flags); | ||
1626 | } | ||
1627 | |||
1628 | static void resume_local_counters(void) | ||
1629 | { | ||
1630 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
1631 | int ctr = mipspmu.num_counters; | ||
1632 | |||
1633 | do { | ||
1634 | ctr--; | ||
1635 | mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]); | ||
1636 | } while (ctr > 0); | ||
1637 | } | ||
1638 | |||
1639 | static int mipsxx_pmu_handle_shared_irq(void) | ||
1640 | { | ||
1641 | struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events); | ||
1642 | struct perf_sample_data data; | ||
1643 | unsigned int counters = mipspmu.num_counters; | ||
1644 | u64 counter; | ||
1645 | int n, handled = IRQ_NONE; | ||
1646 | struct pt_regs *regs; | ||
1647 | |||
1648 | if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI)) | ||
1649 | return handled; | ||
1650 | /* | ||
1651 | * First we pause the local counters, so that when we are locked | ||
1652 | * here, the counters are all paused. When it gets locked due to | ||
1653 | * perf_disable(), the timer interrupt handler will be delayed. | ||
1654 | * | ||
1655 | * See also mipsxx_pmu_start(). | ||
1656 | */ | ||
1657 | pause_local_counters(); | ||
1658 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
1659 | read_lock(&pmuint_rwlock); | ||
1660 | #endif | ||
1661 | |||
1662 | regs = get_irq_regs(); | ||
1663 | |||
1664 | perf_sample_data_init(&data, 0, 0); | ||
1665 | |||
1666 | for (n = counters - 1; n >= 0; n--) { | ||
1667 | if (!test_bit(n, cpuc->used_mask)) | ||
1668 | continue; | ||
1669 | |||
1670 | counter = mipspmu.read_counter(n); | ||
1671 | if (!(counter & mipspmu.overflow)) | ||
1672 | continue; | ||
1673 | |||
1674 | handle_associated_event(cpuc, n, &data, regs); | ||
1675 | handled = IRQ_HANDLED; | ||
1676 | } | ||
1677 | |||
1678 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
1679 | read_unlock(&pmuint_rwlock); | ||
1680 | #endif | ||
1681 | resume_local_counters(); | ||
1682 | |||
1683 | /* | ||
1684 | * Do all the work for the pending perf events. We can do this | ||
1685 | * in here because the performance counter interrupt is a regular | ||
1686 | * interrupt, not NMI. | ||
1687 | */ | ||
1688 | if (handled == IRQ_HANDLED) | ||
1689 | irq_work_run(); | ||
1690 | |||
1691 | return handled; | ||
1692 | } | ||
1693 | |||
1694 | static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev) | ||
1695 | { | ||
1696 | return mipsxx_pmu_handle_shared_irq(); | ||
1697 | } | ||
1698 | |||
1699 | /* 24K */ | ||
1700 | #define IS_BOTH_COUNTERS_24K_EVENT(b) \ | ||
1701 | ((b) == 0 || (b) == 1 || (b) == 11) | ||
1702 | |||
1703 | /* 34K */ | ||
1704 | #define IS_BOTH_COUNTERS_34K_EVENT(b) \ | ||
1705 | ((b) == 0 || (b) == 1 || (b) == 11) | ||
1706 | #ifdef CONFIG_MIPS_MT_SMP | ||
1707 | #define IS_RANGE_P_34K_EVENT(r, b) \ | ||
1708 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ | ||
1709 | (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \ | ||
1710 | (r) == 176 || ((b) >= 50 && (b) <= 55) || \ | ||
1711 | ((b) >= 64 && (b) <= 67)) | ||
1712 | #define IS_RANGE_V_34K_EVENT(r) ((r) == 47) | ||
1713 | #endif | ||
1714 | |||
1715 | /* 74K */ | ||
1716 | #define IS_BOTH_COUNTERS_74K_EVENT(b) \ | ||
1717 | ((b) == 0 || (b) == 1) | ||
1718 | |||
1719 | /* proAptiv */ | ||
1720 | #define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \ | ||
1721 | ((b) == 0 || (b) == 1) | ||
1722 | /* P5600 */ | ||
1723 | #define IS_BOTH_COUNTERS_P5600_EVENT(b) \ | ||
1724 | ((b) == 0 || (b) == 1) | ||
1725 | |||
1726 | /* 1004K */ | ||
1727 | #define IS_BOTH_COUNTERS_1004K_EVENT(b) \ | ||
1728 | ((b) == 0 || (b) == 1 || (b) == 11) | ||
1729 | #ifdef CONFIG_MIPS_MT_SMP | ||
1730 | #define IS_RANGE_P_1004K_EVENT(r, b) \ | ||
1731 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ | ||
1732 | (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \ | ||
1733 | (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \ | ||
1734 | (r) == 188 || (b) == 61 || (b) == 62 || \ | ||
1735 | ((b) >= 64 && (b) <= 67)) | ||
1736 | #define IS_RANGE_V_1004K_EVENT(r) ((r) == 47) | ||
1737 | #endif | ||
1738 | |||
1739 | /* interAptiv */ | ||
1740 | #define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \ | ||
1741 | ((b) == 0 || (b) == 1 || (b) == 11) | ||
1742 | #ifdef CONFIG_MIPS_MT_SMP | ||
1743 | /* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */ | ||
1744 | #define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \ | ||
1745 | ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \ | ||
1746 | (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \ | ||
1747 | (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \ | ||
1748 | (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \ | ||
1749 | ((b) >= 64 && (b) <= 67)) | ||
1750 | #define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175) | ||
1751 | #endif | ||
1752 | |||
1753 | /* BMIPS5000 */ | ||
1754 | #define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \ | ||
1755 | ((b) == 0 || (b) == 1) | ||
1756 | |||
1757 | |||
1758 | /* | ||
1759 | * For most cores the user can use 0-255 raw events, where 0-127 for the events | ||
1760 | * of even counters, and 128-255 for odd counters. Note that bit 7 is used to | ||
1761 | * indicate the even/odd bank selector. So, for example, when user wants to take | ||
1762 | * the Event Num of 15 for odd counters (by referring to the user manual), then | ||
1763 | * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F) | ||
1764 | * to be used. | ||
1765 | * | ||
1766 | * Some newer cores have even more events, in which case the user can use raw | ||
1767 | * events 0-511, where 0-255 are for the events of even counters, and 256-511 | ||
1768 | * are for odd counters, so bit 8 is used to indicate the even/odd bank selector. | ||
1769 | */ | ||
1770 | static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config) | ||
1771 | { | ||
1772 | /* currently most cores have 7-bit event numbers */ | ||
1773 | int pmu_type; | ||
1774 | unsigned int raw_id = config & 0xff; | ||
1775 | unsigned int base_id = raw_id & 0x7f; | ||
1776 | |||
1777 | switch (current_cpu_type()) { | ||
1778 | case CPU_24K: | ||
1779 | if (IS_BOTH_COUNTERS_24K_EVENT(base_id)) | ||
1780 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1781 | else | ||
1782 | raw_event.cntr_mask = | ||
1783 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1784 | #ifdef CONFIG_MIPS_MT_SMP | ||
1785 | /* | ||
1786 | * This is actually doing nothing. Non-multithreading | ||
1787 | * CPUs will not check and calculate the range. | ||
1788 | */ | ||
1789 | raw_event.range = P; | ||
1790 | #endif | ||
1791 | break; | ||
1792 | case CPU_34K: | ||
1793 | if (IS_BOTH_COUNTERS_34K_EVENT(base_id)) | ||
1794 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1795 | else | ||
1796 | raw_event.cntr_mask = | ||
1797 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1798 | #ifdef CONFIG_MIPS_MT_SMP | ||
1799 | if (IS_RANGE_P_34K_EVENT(raw_id, base_id)) | ||
1800 | raw_event.range = P; | ||
1801 | else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id))) | ||
1802 | raw_event.range = V; | ||
1803 | else | ||
1804 | raw_event.range = T; | ||
1805 | #endif | ||
1806 | break; | ||
1807 | case CPU_74K: | ||
1808 | case CPU_1074K: | ||
1809 | if (IS_BOTH_COUNTERS_74K_EVENT(base_id)) | ||
1810 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1811 | else | ||
1812 | raw_event.cntr_mask = | ||
1813 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1814 | #ifdef CONFIG_MIPS_MT_SMP | ||
1815 | raw_event.range = P; | ||
1816 | #endif | ||
1817 | break; | ||
1818 | case CPU_PROAPTIV: | ||
1819 | if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id)) | ||
1820 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1821 | else | ||
1822 | raw_event.cntr_mask = | ||
1823 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1824 | #ifdef CONFIG_MIPS_MT_SMP | ||
1825 | raw_event.range = P; | ||
1826 | #endif | ||
1827 | break; | ||
1828 | case CPU_P5600: | ||
1829 | case CPU_P6600: | ||
1830 | /* 8-bit event numbers */ | ||
1831 | raw_id = config & 0x1ff; | ||
1832 | base_id = raw_id & 0xff; | ||
1833 | if (IS_BOTH_COUNTERS_P5600_EVENT(base_id)) | ||
1834 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1835 | else | ||
1836 | raw_event.cntr_mask = | ||
1837 | raw_id > 255 ? CNTR_ODD : CNTR_EVEN; | ||
1838 | #ifdef CONFIG_MIPS_MT_SMP | ||
1839 | raw_event.range = P; | ||
1840 | #endif | ||
1841 | break; | ||
1842 | case CPU_I6400: | ||
1843 | case CPU_I6500: | ||
1844 | /* 8-bit event numbers */ | ||
1845 | base_id = config & 0xff; | ||
1846 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1847 | break; | ||
1848 | case CPU_1004K: | ||
1849 | if (IS_BOTH_COUNTERS_1004K_EVENT(base_id)) | ||
1850 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1851 | else | ||
1852 | raw_event.cntr_mask = | ||
1853 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1854 | #ifdef CONFIG_MIPS_MT_SMP | ||
1855 | if (IS_RANGE_P_1004K_EVENT(raw_id, base_id)) | ||
1856 | raw_event.range = P; | ||
1857 | else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id))) | ||
1858 | raw_event.range = V; | ||
1859 | else | ||
1860 | raw_event.range = T; | ||
1861 | #endif | ||
1862 | break; | ||
1863 | case CPU_INTERAPTIV: | ||
1864 | if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id)) | ||
1865 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1866 | else | ||
1867 | raw_event.cntr_mask = | ||
1868 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1869 | #ifdef CONFIG_MIPS_MT_SMP | ||
1870 | if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id)) | ||
1871 | raw_event.range = P; | ||
1872 | else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id))) | ||
1873 | raw_event.range = V; | ||
1874 | else | ||
1875 | raw_event.range = T; | ||
1876 | #endif | ||
1877 | break; | ||
1878 | case CPU_BMIPS5000: | ||
1879 | if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id)) | ||
1880 | raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD; | ||
1881 | else | ||
1882 | raw_event.cntr_mask = | ||
1883 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1884 | break; | ||
1885 | case CPU_LOONGSON64: | ||
1886 | pmu_type = get_loongson3_pmu_type(); | ||
1887 | |||
1888 | switch (pmu_type) { | ||
1889 | case LOONGSON_PMU_TYPE1: | ||
1890 | raw_event.cntr_mask = | ||
1891 | raw_id > 127 ? CNTR_ODD : CNTR_EVEN; | ||
1892 | break; | ||
1893 | case LOONGSON_PMU_TYPE2: | ||
1894 | base_id = config & 0x3ff; | ||
1895 | raw_event.cntr_mask = CNTR_ALL; | ||
1896 | |||
1897 | if ((base_id >= 1 && base_id < 28) || | ||
1898 | (base_id >= 64 && base_id < 90) || | ||
1899 | (base_id >= 128 && base_id < 164) || | ||
1900 | (base_id >= 192 && base_id < 200) || | ||
1901 | (base_id >= 256 && base_id < 275) || | ||
1902 | (base_id >= 320 && base_id < 361) || | ||
1903 | (base_id >= 384 && base_id < 574)) | ||
1904 | break; | ||
1905 | |||
1906 | return ERR_PTR(-EOPNOTSUPP); | ||
1907 | case LOONGSON_PMU_TYPE3: | ||
1908 | base_id = raw_id; | ||
1909 | raw_event.cntr_mask = CNTR_ALL; | ||
1910 | break; | ||
1911 | } | ||
1912 | break; | ||
1913 | } | ||
1914 | |||
1915 | raw_event.event_id = base_id; | ||
1916 | |||
1917 | return &raw_event; | ||
1918 | } | ||
1919 | |||
1920 | static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config) | ||
1921 | { | ||
1922 | unsigned int raw_id = config & 0xff; | ||
1923 | unsigned int base_id = raw_id & 0x7f; | ||
1924 | |||
1925 | |||
1926 | raw_event.cntr_mask = CNTR_ALL; | ||
1927 | raw_event.event_id = base_id; | ||
1928 | |||
1929 | if (current_cpu_type() == CPU_CAVIUM_OCTEON2) { | ||
1930 | if (base_id > 0x42) | ||
1931 | return ERR_PTR(-EOPNOTSUPP); | ||
1932 | } else { | ||
1933 | if (base_id > 0x3a) | ||
1934 | return ERR_PTR(-EOPNOTSUPP); | ||
1935 | } | ||
1936 | |||
1937 | switch (base_id) { | ||
1938 | case 0x00: | ||
1939 | case 0x0f: | ||
1940 | case 0x1e: | ||
1941 | case 0x1f: | ||
1942 | case 0x2f: | ||
1943 | case 0x34: | ||
1944 | case 0x3b ... 0x3f: | ||
1945 | return ERR_PTR(-EOPNOTSUPP); | ||
1946 | default: | ||
1947 | break; | ||
1948 | } | ||
1949 | |||
1950 | return &raw_event; | ||
1951 | } | ||
1952 | |||
1953 | static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config) | ||
1954 | { | ||
1955 | unsigned int raw_id = config & 0xff; | ||
1956 | |||
1957 | /* Only 1-63 are defined */ | ||
1958 | if ((raw_id < 0x01) || (raw_id > 0x3f)) | ||
1959 | return ERR_PTR(-EOPNOTSUPP); | ||
1960 | |||
1961 | raw_event.cntr_mask = CNTR_ALL; | ||
1962 | raw_event.event_id = raw_id; | ||
1963 | |||
1964 | return &raw_event; | ||
1965 | } | ||
1966 | |||
1967 | static int __init | ||
1968 | init_hw_perf_events(void) | ||
1969 | { | ||
1970 | int counters, irq, pmu_type; | ||
1971 | |||
1972 | pr_info("Performance counters: "); | ||
1973 | |||
1974 | counters = n_counters(); | ||
1975 | if (counters == 0) { | ||
1976 | pr_cont("No available PMU.\n"); | ||
1977 | return -ENODEV; | ||
1978 | } | ||
1979 | |||
1980 | #ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS | ||
1981 | if (!cpu_has_mipsmt_pertccounters) | ||
1982 | counters = counters_total_to_per_cpu(counters); | ||
1983 | #endif | ||
1984 | |||
1985 | if (get_c0_perfcount_int) | ||
1986 | irq = get_c0_perfcount_int(); | ||
1987 | else if (cp0_perfcount_irq >= 0) | ||
1988 | irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; | ||
1989 | else | ||
1990 | irq = -1; | ||
1991 | |||
1992 | mipspmu.map_raw_event = mipsxx_pmu_map_raw_event; | ||
1993 | |||
1994 | switch (current_cpu_type()) { | ||
1995 | case CPU_24K: | ||
1996 | mipspmu.name = "mips/24K"; | ||
1997 | mipspmu.general_event_map = &mipsxxcore_event_map; | ||
1998 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | ||
1999 | break; | ||
2000 | case CPU_34K: | ||
2001 | mipspmu.name = "mips/34K"; | ||
2002 | mipspmu.general_event_map = &mipsxxcore_event_map; | ||
2003 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | ||
2004 | break; | ||
2005 | case CPU_74K: | ||
2006 | mipspmu.name = "mips/74K"; | ||
2007 | mipspmu.general_event_map = &mipsxxcore_event_map2; | ||
2008 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; | ||
2009 | break; | ||
2010 | case CPU_PROAPTIV: | ||
2011 | mipspmu.name = "mips/proAptiv"; | ||
2012 | mipspmu.general_event_map = &mipsxxcore_event_map2; | ||
2013 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; | ||
2014 | break; | ||
2015 | case CPU_P5600: | ||
2016 | mipspmu.name = "mips/P5600"; | ||
2017 | mipspmu.general_event_map = &mipsxxcore_event_map2; | ||
2018 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; | ||
2019 | break; | ||
2020 | case CPU_P6600: | ||
2021 | mipspmu.name = "mips/P6600"; | ||
2022 | mipspmu.general_event_map = &mipsxxcore_event_map2; | ||
2023 | mipspmu.cache_event_map = &mipsxxcore_cache_map2; | ||
2024 | break; | ||
2025 | case CPU_I6400: | ||
2026 | mipspmu.name = "mips/I6400"; | ||
2027 | mipspmu.general_event_map = &i6x00_event_map; | ||
2028 | mipspmu.cache_event_map = &i6x00_cache_map; | ||
2029 | break; | ||
2030 | case CPU_I6500: | ||
2031 | mipspmu.name = "mips/I6500"; | ||
2032 | mipspmu.general_event_map = &i6x00_event_map; | ||
2033 | mipspmu.cache_event_map = &i6x00_cache_map; | ||
2034 | break; | ||
2035 | case CPU_1004K: | ||
2036 | mipspmu.name = "mips/1004K"; | ||
2037 | mipspmu.general_event_map = &mipsxxcore_event_map; | ||
2038 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | ||
2039 | break; | ||
2040 | case CPU_1074K: | ||
2041 | mipspmu.name = "mips/1074K"; | ||
2042 | mipspmu.general_event_map = &mipsxxcore_event_map; | ||
2043 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | ||
2044 | break; | ||
2045 | case CPU_INTERAPTIV: | ||
2046 | mipspmu.name = "mips/interAptiv"; | ||
2047 | mipspmu.general_event_map = &mipsxxcore_event_map; | ||
2048 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | ||
2049 | break; | ||
2050 | case CPU_LOONGSON32: | ||
2051 | mipspmu.name = "mips/loongson1"; | ||
2052 | mipspmu.general_event_map = &mipsxxcore_event_map; | ||
2053 | mipspmu.cache_event_map = &mipsxxcore_cache_map; | ||
2054 | break; | ||
2055 | case CPU_LOONGSON64: | ||
2056 | mipspmu.name = "mips/loongson3"; | ||
2057 | pmu_type = get_loongson3_pmu_type(); | ||
2058 | |||
2059 | switch (pmu_type) { | ||
2060 | case LOONGSON_PMU_TYPE1: | ||
2061 | counters = 2; | ||
2062 | mipspmu.general_event_map = &loongson3_event_map1; | ||
2063 | mipspmu.cache_event_map = &loongson3_cache_map1; | ||
2064 | break; | ||
2065 | case LOONGSON_PMU_TYPE2: | ||
2066 | counters = 4; | ||
2067 | mipspmu.general_event_map = &loongson3_event_map2; | ||
2068 | mipspmu.cache_event_map = &loongson3_cache_map2; | ||
2069 | break; | ||
2070 | case LOONGSON_PMU_TYPE3: | ||
2071 | counters = 4; | ||
2072 | mipspmu.general_event_map = &loongson3_event_map3; | ||
2073 | mipspmu.cache_event_map = &loongson3_cache_map3; | ||
2074 | break; | ||
2075 | } | ||
2076 | break; | ||
2077 | case CPU_CAVIUM_OCTEON: | ||
2078 | case CPU_CAVIUM_OCTEON_PLUS: | ||
2079 | case CPU_CAVIUM_OCTEON2: | ||
2080 | mipspmu.name = "octeon"; | ||
2081 | mipspmu.general_event_map = &octeon_event_map; | ||
2082 | mipspmu.cache_event_map = &octeon_cache_map; | ||
2083 | mipspmu.map_raw_event = octeon_pmu_map_raw_event; | ||
2084 | break; | ||
2085 | case CPU_BMIPS5000: | ||
2086 | mipspmu.name = "BMIPS5000"; | ||
2087 | mipspmu.general_event_map = &bmips5000_event_map; | ||
2088 | mipspmu.cache_event_map = &bmips5000_cache_map; | ||
2089 | break; | ||
2090 | case CPU_XLP: | ||
2091 | mipspmu.name = "xlp"; | ||
2092 | mipspmu.general_event_map = &xlp_event_map; | ||
2093 | mipspmu.cache_event_map = &xlp_cache_map; | ||
2094 | mipspmu.map_raw_event = xlp_pmu_map_raw_event; | ||
2095 | break; | ||
2096 | default: | ||
2097 | pr_cont("Either hardware does not support performance " | ||
2098 | "counters, or not yet implemented.\n"); | ||
2099 | return -ENODEV; | ||
2100 | } | ||
2101 | |||
2102 | mipspmu.num_counters = counters; | ||
2103 | mipspmu.irq = irq; | ||
2104 | |||
2105 | if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) { | ||
2106 | if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) { | ||
2107 | counter_bits = 48; | ||
2108 | mipspmu.max_period = (1ULL << 47) - 1; | ||
2109 | mipspmu.valid_count = (1ULL << 47) - 1; | ||
2110 | mipspmu.overflow = 1ULL << 47; | ||
2111 | } else { | ||
2112 | counter_bits = 64; | ||
2113 | mipspmu.max_period = (1ULL << 63) - 1; | ||
2114 | mipspmu.valid_count = (1ULL << 63) - 1; | ||
2115 | mipspmu.overflow = 1ULL << 63; | ||
2116 | } | ||
2117 | mipspmu.read_counter = mipsxx_pmu_read_counter_64; | ||
2118 | mipspmu.write_counter = mipsxx_pmu_write_counter_64; | ||
2119 | } else { | ||
2120 | counter_bits = 32; | ||
2121 | mipspmu.max_period = (1ULL << 31) - 1; | ||
2122 | mipspmu.valid_count = (1ULL << 31) - 1; | ||
2123 | mipspmu.overflow = 1ULL << 31; | ||
2124 | mipspmu.read_counter = mipsxx_pmu_read_counter; | ||
2125 | mipspmu.write_counter = mipsxx_pmu_write_counter; | ||
2126 | } | ||
2127 | |||
2128 | on_each_cpu(reset_counters, (void *)(long)counters, 1); | ||
2129 | |||
2130 | pr_cont("%s PMU enabled, %d %d-bit counters available to each " | ||
2131 | "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq, | ||
2132 | irq < 0 ? " (share with timer interrupt)" : ""); | ||
2133 | |||
2134 | perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW); | ||
2135 | |||
2136 | return 0; | ||
2137 | } | ||
2138 | early_initcall(init_hw_perf_events); | ||
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c new file mode 100644 index 000000000..9bf60d7d4 --- /dev/null +++ b/arch/mips/kernel/pm-cps.c | |||
@@ -0,0 +1,738 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2014 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/cpuhotplug.h> | ||
8 | #include <linux/init.h> | ||
9 | #include <linux/percpu.h> | ||
10 | #include <linux/slab.h> | ||
11 | #include <linux/suspend.h> | ||
12 | |||
13 | #include <asm/asm-offsets.h> | ||
14 | #include <asm/cacheflush.h> | ||
15 | #include <asm/cacheops.h> | ||
16 | #include <asm/idle.h> | ||
17 | #include <asm/mips-cps.h> | ||
18 | #include <asm/mipsmtregs.h> | ||
19 | #include <asm/pm.h> | ||
20 | #include <asm/pm-cps.h> | ||
21 | #include <asm/smp-cps.h> | ||
22 | #include <asm/uasm.h> | ||
23 | |||
24 | /* | ||
25 | * cps_nc_entry_fn - type of a generated non-coherent state entry function | ||
26 | * @online: the count of online coupled VPEs | ||
27 | * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count | ||
28 | * | ||
29 | * The code entering & exiting non-coherent states is generated at runtime | ||
30 | * using uasm, in order to ensure that the compiler cannot insert a stray | ||
31 | * memory access at an unfortunate time and to allow the generation of optimal | ||
32 | * core-specific code particularly for cache routines. If coupled_coherence | ||
33 | * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state, | ||
34 | * returns the number of VPEs that were in the wait state at the point this | ||
35 | * VPE left it. Returns garbage if coupled_coherence is zero or this is not | ||
36 | * the entry function for CPS_PM_NC_WAIT. | ||
37 | */ | ||
38 | typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count); | ||
39 | |||
40 | /* | ||
41 | * The entry point of the generated non-coherent idle state entry/exit | ||
42 | * functions. Actually per-core rather than per-CPU. | ||
43 | */ | ||
44 | static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT], | ||
45 | nc_asm_enter); | ||
46 | |||
47 | /* Bitmap indicating which states are supported by the system */ | ||
48 | static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT); | ||
49 | |||
50 | /* | ||
51 | * Indicates the number of coupled VPEs ready to operate in a non-coherent | ||
52 | * state. Actually per-core rather than per-CPU. | ||
53 | */ | ||
54 | static DEFINE_PER_CPU_ALIGNED(u32*, ready_count); | ||
55 | |||
56 | /* Indicates online CPUs coupled with the current CPU */ | ||
57 | static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled); | ||
58 | |||
59 | /* | ||
60 | * Used to synchronize entry to deep idle states. Actually per-core rather | ||
61 | * than per-CPU. | ||
62 | */ | ||
63 | static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier); | ||
64 | |||
65 | /* Saved CPU state across the CPS_PM_POWER_GATED state */ | ||
66 | DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state); | ||
67 | |||
68 | /* A somewhat arbitrary number of labels & relocs for uasm */ | ||
69 | static struct uasm_label labels[32]; | ||
70 | static struct uasm_reloc relocs[32]; | ||
71 | |||
72 | enum mips_reg { | ||
73 | zero, at, v0, v1, a0, a1, a2, a3, | ||
74 | t0, t1, t2, t3, t4, t5, t6, t7, | ||
75 | s0, s1, s2, s3, s4, s5, s6, s7, | ||
76 | t8, t9, k0, k1, gp, sp, fp, ra, | ||
77 | }; | ||
78 | |||
79 | bool cps_pm_support_state(enum cps_pm_state state) | ||
80 | { | ||
81 | return test_bit(state, state_support); | ||
82 | } | ||
83 | |||
84 | static void coupled_barrier(atomic_t *a, unsigned online) | ||
85 | { | ||
86 | /* | ||
87 | * This function is effectively the same as | ||
88 | * cpuidle_coupled_parallel_barrier, which can't be used here since | ||
89 | * there's no cpuidle device. | ||
90 | */ | ||
91 | |||
92 | if (!coupled_coherence) | ||
93 | return; | ||
94 | |||
95 | smp_mb__before_atomic(); | ||
96 | atomic_inc(a); | ||
97 | |||
98 | while (atomic_read(a) < online) | ||
99 | cpu_relax(); | ||
100 | |||
101 | if (atomic_inc_return(a) == online * 2) { | ||
102 | atomic_set(a, 0); | ||
103 | return; | ||
104 | } | ||
105 | |||
106 | while (atomic_read(a) > online) | ||
107 | cpu_relax(); | ||
108 | } | ||
109 | |||
110 | int cps_pm_enter_state(enum cps_pm_state state) | ||
111 | { | ||
112 | unsigned cpu = smp_processor_id(); | ||
113 | unsigned core = cpu_core(¤t_cpu_data); | ||
114 | unsigned online, left; | ||
115 | cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled); | ||
116 | u32 *core_ready_count, *nc_core_ready_count; | ||
117 | void *nc_addr; | ||
118 | cps_nc_entry_fn entry; | ||
119 | struct core_boot_config *core_cfg; | ||
120 | struct vpe_boot_config *vpe_cfg; | ||
121 | |||
122 | /* Check that there is an entry function for this state */ | ||
123 | entry = per_cpu(nc_asm_enter, core)[state]; | ||
124 | if (!entry) | ||
125 | return -EINVAL; | ||
126 | |||
127 | /* Calculate which coupled CPUs (VPEs) are online */ | ||
128 | #if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6) | ||
129 | if (cpu_online(cpu)) { | ||
130 | cpumask_and(coupled_mask, cpu_online_mask, | ||
131 | &cpu_sibling_map[cpu]); | ||
132 | online = cpumask_weight(coupled_mask); | ||
133 | cpumask_clear_cpu(cpu, coupled_mask); | ||
134 | } else | ||
135 | #endif | ||
136 | { | ||
137 | cpumask_clear(coupled_mask); | ||
138 | online = 1; | ||
139 | } | ||
140 | |||
141 | /* Setup the VPE to run mips_cps_pm_restore when started again */ | ||
142 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | ||
143 | /* Power gating relies upon CPS SMP */ | ||
144 | if (!mips_cps_smp_in_use()) | ||
145 | return -EINVAL; | ||
146 | |||
147 | core_cfg = &mips_cps_core_bootcfg[core]; | ||
148 | vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(¤t_cpu_data)]; | ||
149 | vpe_cfg->pc = (unsigned long)mips_cps_pm_restore; | ||
150 | vpe_cfg->gp = (unsigned long)current_thread_info(); | ||
151 | vpe_cfg->sp = 0; | ||
152 | } | ||
153 | |||
154 | /* Indicate that this CPU might not be coherent */ | ||
155 | cpumask_clear_cpu(cpu, &cpu_coherent_mask); | ||
156 | smp_mb__after_atomic(); | ||
157 | |||
158 | /* Create a non-coherent mapping of the core ready_count */ | ||
159 | core_ready_count = per_cpu(ready_count, core); | ||
160 | nc_addr = kmap_noncoherent(virt_to_page(core_ready_count), | ||
161 | (unsigned long)core_ready_count); | ||
162 | nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK); | ||
163 | nc_core_ready_count = nc_addr; | ||
164 | |||
165 | /* Ensure ready_count is zero-initialised before the assembly runs */ | ||
166 | WRITE_ONCE(*nc_core_ready_count, 0); | ||
167 | coupled_barrier(&per_cpu(pm_barrier, core), online); | ||
168 | |||
169 | /* Run the generated entry code */ | ||
170 | left = entry(online, nc_core_ready_count); | ||
171 | |||
172 | /* Remove the non-coherent mapping of ready_count */ | ||
173 | kunmap_noncoherent(); | ||
174 | |||
175 | /* Indicate that this CPU is definitely coherent */ | ||
176 | cpumask_set_cpu(cpu, &cpu_coherent_mask); | ||
177 | |||
178 | /* | ||
179 | * If this VPE is the first to leave the non-coherent wait state then | ||
180 | * it needs to wake up any coupled VPEs still running their wait | ||
181 | * instruction so that they return to cpuidle, which can then complete | ||
182 | * coordination between the coupled VPEs & provide the governor with | ||
183 | * a chance to reflect on the length of time the VPEs were in the | ||
184 | * idle state. | ||
185 | */ | ||
186 | if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online)) | ||
187 | arch_send_call_function_ipi_mask(coupled_mask); | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl, | ||
193 | struct uasm_reloc **pr, | ||
194 | const struct cache_desc *cache, | ||
195 | unsigned op, int lbl) | ||
196 | { | ||
197 | unsigned cache_size = cache->ways << cache->waybit; | ||
198 | unsigned i; | ||
199 | const unsigned unroll_lines = 32; | ||
200 | |||
201 | /* If the cache isn't present this function has it easy */ | ||
202 | if (cache->flags & MIPS_CACHE_NOT_PRESENT) | ||
203 | return; | ||
204 | |||
205 | /* Load base address */ | ||
206 | UASM_i_LA(pp, t0, (long)CKSEG0); | ||
207 | |||
208 | /* Calculate end address */ | ||
209 | if (cache_size < 0x8000) | ||
210 | uasm_i_addiu(pp, t1, t0, cache_size); | ||
211 | else | ||
212 | UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size)); | ||
213 | |||
214 | /* Start of cache op loop */ | ||
215 | uasm_build_label(pl, *pp, lbl); | ||
216 | |||
217 | /* Generate the cache ops */ | ||
218 | for (i = 0; i < unroll_lines; i++) { | ||
219 | if (cpu_has_mips_r6) { | ||
220 | uasm_i_cache(pp, op, 0, t0); | ||
221 | uasm_i_addiu(pp, t0, t0, cache->linesz); | ||
222 | } else { | ||
223 | uasm_i_cache(pp, op, i * cache->linesz, t0); | ||
224 | } | ||
225 | } | ||
226 | |||
227 | if (!cpu_has_mips_r6) | ||
228 | /* Update the base address */ | ||
229 | uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz); | ||
230 | |||
231 | /* Loop if we haven't reached the end address yet */ | ||
232 | uasm_il_bne(pp, pr, t0, t1, lbl); | ||
233 | uasm_i_nop(pp); | ||
234 | } | ||
235 | |||
236 | static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl, | ||
237 | struct uasm_reloc **pr, | ||
238 | const struct cpuinfo_mips *cpu_info, | ||
239 | int lbl) | ||
240 | { | ||
241 | unsigned i, fsb_size = 8; | ||
242 | unsigned num_loads = (fsb_size * 3) / 2; | ||
243 | unsigned line_stride = 2; | ||
244 | unsigned line_size = cpu_info->dcache.linesz; | ||
245 | unsigned perf_counter, perf_event; | ||
246 | unsigned revision = cpu_info->processor_id & PRID_REV_MASK; | ||
247 | |||
248 | /* | ||
249 | * Determine whether this CPU requires an FSB flush, and if so which | ||
250 | * performance counter/event reflect stalls due to a full FSB. | ||
251 | */ | ||
252 | switch (__get_cpu_type(cpu_info->cputype)) { | ||
253 | case CPU_INTERAPTIV: | ||
254 | perf_counter = 1; | ||
255 | perf_event = 51; | ||
256 | break; | ||
257 | |||
258 | case CPU_PROAPTIV: | ||
259 | /* Newer proAptiv cores don't require this workaround */ | ||
260 | if (revision >= PRID_REV_ENCODE_332(1, 1, 0)) | ||
261 | return 0; | ||
262 | |||
263 | /* On older ones it's unavailable */ | ||
264 | return -1; | ||
265 | |||
266 | default: | ||
267 | /* Assume that the CPU does not need this workaround */ | ||
268 | return 0; | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * Ensure that the fill/store buffer (FSB) is not holding the results | ||
273 | * of a prefetch, since if it is then the CPC sequencer may become | ||
274 | * stuck in the D3 (ClrBus) state whilst entering a low power state. | ||
275 | */ | ||
276 | |||
277 | /* Preserve perf counter setup */ | ||
278 | uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | ||
279 | uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
280 | |||
281 | /* Setup perf counter to count FSB full pipeline stalls */ | ||
282 | uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf); | ||
283 | uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | ||
284 | uasm_i_ehb(pp); | ||
285 | uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
286 | uasm_i_ehb(pp); | ||
287 | |||
288 | /* Base address for loads */ | ||
289 | UASM_i_LA(pp, t0, (long)CKSEG0); | ||
290 | |||
291 | /* Start of clear loop */ | ||
292 | uasm_build_label(pl, *pp, lbl); | ||
293 | |||
294 | /* Perform some loads to fill the FSB */ | ||
295 | for (i = 0; i < num_loads; i++) | ||
296 | uasm_i_lw(pp, zero, i * line_size * line_stride, t0); | ||
297 | |||
298 | /* | ||
299 | * Invalidate the new D-cache entries so that the cache will need | ||
300 | * refilling (via the FSB) if the loop is executed again. | ||
301 | */ | ||
302 | for (i = 0; i < num_loads; i++) { | ||
303 | uasm_i_cache(pp, Hit_Invalidate_D, | ||
304 | i * line_size * line_stride, t0); | ||
305 | uasm_i_cache(pp, Hit_Writeback_Inv_SD, | ||
306 | i * line_size * line_stride, t0); | ||
307 | } | ||
308 | |||
309 | /* Barrier ensuring previous cache invalidates are complete */ | ||
310 | uasm_i_sync(pp, __SYNC_full); | ||
311 | uasm_i_ehb(pp); | ||
312 | |||
313 | /* Check whether the pipeline stalled due to the FSB being full */ | ||
314 | uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
315 | |||
316 | /* Loop if it didn't */ | ||
317 | uasm_il_beqz(pp, pr, t1, lbl); | ||
318 | uasm_i_nop(pp); | ||
319 | |||
320 | /* Restore perf counter 1. The count may well now be wrong... */ | ||
321 | uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */ | ||
322 | uasm_i_ehb(pp); | ||
323 | uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */ | ||
324 | uasm_i_ehb(pp); | ||
325 | |||
326 | return 0; | ||
327 | } | ||
328 | |||
329 | static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl, | ||
330 | struct uasm_reloc **pr, | ||
331 | unsigned r_addr, int lbl) | ||
332 | { | ||
333 | uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000)); | ||
334 | uasm_build_label(pl, *pp, lbl); | ||
335 | uasm_i_ll(pp, t1, 0, r_addr); | ||
336 | uasm_i_or(pp, t1, t1, t0); | ||
337 | uasm_i_sc(pp, t1, 0, r_addr); | ||
338 | uasm_il_beqz(pp, pr, t1, lbl); | ||
339 | uasm_i_nop(pp); | ||
340 | } | ||
341 | |||
342 | static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) | ||
343 | { | ||
344 | struct uasm_label *l = labels; | ||
345 | struct uasm_reloc *r = relocs; | ||
346 | u32 *buf, *p; | ||
347 | const unsigned r_online = a0; | ||
348 | const unsigned r_nc_count = a1; | ||
349 | const unsigned r_pcohctl = t7; | ||
350 | const unsigned max_instrs = 256; | ||
351 | unsigned cpc_cmd; | ||
352 | int err; | ||
353 | enum { | ||
354 | lbl_incready = 1, | ||
355 | lbl_poll_cont, | ||
356 | lbl_secondary_hang, | ||
357 | lbl_disable_coherence, | ||
358 | lbl_flush_fsb, | ||
359 | lbl_invicache, | ||
360 | lbl_flushdcache, | ||
361 | lbl_hang, | ||
362 | lbl_set_cont, | ||
363 | lbl_secondary_cont, | ||
364 | lbl_decready, | ||
365 | }; | ||
366 | |||
367 | /* Allocate a buffer to hold the generated code */ | ||
368 | p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); | ||
369 | if (!buf) | ||
370 | return NULL; | ||
371 | |||
372 | /* Clear labels & relocs ready for (re)use */ | ||
373 | memset(labels, 0, sizeof(labels)); | ||
374 | memset(relocs, 0, sizeof(relocs)); | ||
375 | |||
376 | if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) { | ||
377 | /* Power gating relies upon CPS SMP */ | ||
378 | if (!mips_cps_smp_in_use()) | ||
379 | goto out_err; | ||
380 | |||
381 | /* | ||
382 | * Save CPU state. Note the non-standard calling convention | ||
383 | * with the return address placed in v0 to avoid clobbering | ||
384 | * the ra register before it is saved. | ||
385 | */ | ||
386 | UASM_i_LA(&p, t0, (long)mips_cps_pm_save); | ||
387 | uasm_i_jalr(&p, v0, t0); | ||
388 | uasm_i_nop(&p); | ||
389 | } | ||
390 | |||
391 | /* | ||
392 | * Load addresses of required CM & CPC registers. This is done early | ||
393 | * because they're needed in both the enable & disable coherence steps | ||
394 | * but in the coupled case the enable step will only run on one VPE. | ||
395 | */ | ||
396 | UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence()); | ||
397 | |||
398 | if (coupled_coherence) { | ||
399 | /* Increment ready_count */ | ||
400 | uasm_i_sync(&p, __SYNC_mb); | ||
401 | uasm_build_label(&l, p, lbl_incready); | ||
402 | uasm_i_ll(&p, t1, 0, r_nc_count); | ||
403 | uasm_i_addiu(&p, t2, t1, 1); | ||
404 | uasm_i_sc(&p, t2, 0, r_nc_count); | ||
405 | uasm_il_beqz(&p, &r, t2, lbl_incready); | ||
406 | uasm_i_addiu(&p, t1, t1, 1); | ||
407 | |||
408 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ | ||
409 | uasm_i_sync(&p, __SYNC_mb); | ||
410 | |||
411 | /* | ||
412 | * If this is the last VPE to become ready for non-coherence | ||
413 | * then it should branch below. | ||
414 | */ | ||
415 | uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); | ||
416 | uasm_i_nop(&p); | ||
417 | |||
418 | if (state < CPS_PM_POWER_GATED) { | ||
419 | /* | ||
420 | * Otherwise this is not the last VPE to become ready | ||
421 | * for non-coherence. It needs to wait until coherence | ||
422 | * has been disabled before proceeding, which it will do | ||
423 | * by polling for the top bit of ready_count being set. | ||
424 | */ | ||
425 | uasm_i_addiu(&p, t1, zero, -1); | ||
426 | uasm_build_label(&l, p, lbl_poll_cont); | ||
427 | uasm_i_lw(&p, t0, 0, r_nc_count); | ||
428 | uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); | ||
429 | uasm_i_ehb(&p); | ||
430 | if (cpu_has_mipsmt) | ||
431 | uasm_i_yield(&p, zero, t1); | ||
432 | uasm_il_b(&p, &r, lbl_poll_cont); | ||
433 | uasm_i_nop(&p); | ||
434 | } else { | ||
435 | /* | ||
436 | * The core will lose power & this VPE will not continue | ||
437 | * so it can simply halt here. | ||
438 | */ | ||
439 | if (cpu_has_mipsmt) { | ||
440 | /* Halt the VPE via C0 tchalt register */ | ||
441 | uasm_i_addiu(&p, t0, zero, TCHALT_H); | ||
442 | uasm_i_mtc0(&p, t0, 2, 4); | ||
443 | } else if (cpu_has_vp) { | ||
444 | /* Halt the VP via the CPC VP_STOP register */ | ||
445 | unsigned int vpe_id; | ||
446 | |||
447 | vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
448 | uasm_i_addiu(&p, t0, zero, 1 << vpe_id); | ||
449 | UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop()); | ||
450 | uasm_i_sw(&p, t0, 0, t1); | ||
451 | } else { | ||
452 | BUG(); | ||
453 | } | ||
454 | uasm_build_label(&l, p, lbl_secondary_hang); | ||
455 | uasm_il_b(&p, &r, lbl_secondary_hang); | ||
456 | uasm_i_nop(&p); | ||
457 | } | ||
458 | } | ||
459 | |||
460 | /* | ||
461 | * This is the point of no return - this VPE will now proceed to | ||
462 | * disable coherence. At this point we *must* be sure that no other | ||
463 | * VPE within the core will interfere with the L1 dcache. | ||
464 | */ | ||
465 | uasm_build_label(&l, p, lbl_disable_coherence); | ||
466 | |||
467 | /* Invalidate the L1 icache */ | ||
468 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, | ||
469 | Index_Invalidate_I, lbl_invicache); | ||
470 | |||
471 | /* Writeback & invalidate the L1 dcache */ | ||
472 | cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, | ||
473 | Index_Writeback_Inv_D, lbl_flushdcache); | ||
474 | |||
475 | /* Barrier ensuring previous cache invalidates are complete */ | ||
476 | uasm_i_sync(&p, __SYNC_full); | ||
477 | uasm_i_ehb(&p); | ||
478 | |||
479 | if (mips_cm_revision() < CM_REV_CM3) { | ||
480 | /* | ||
481 | * Disable all but self interventions. The load from COHCTL is | ||
482 | * defined by the interAptiv & proAptiv SUMs as ensuring that the | ||
483 | * operation resulting from the preceding store is complete. | ||
484 | */ | ||
485 | uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu])); | ||
486 | uasm_i_sw(&p, t0, 0, r_pcohctl); | ||
487 | uasm_i_lw(&p, t0, 0, r_pcohctl); | ||
488 | |||
489 | /* Barrier to ensure write to coherence control is complete */ | ||
490 | uasm_i_sync(&p, __SYNC_full); | ||
491 | uasm_i_ehb(&p); | ||
492 | } | ||
493 | |||
494 | /* Disable coherence */ | ||
495 | uasm_i_sw(&p, zero, 0, r_pcohctl); | ||
496 | uasm_i_lw(&p, t0, 0, r_pcohctl); | ||
497 | |||
498 | if (state >= CPS_PM_CLOCK_GATED) { | ||
499 | err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu], | ||
500 | lbl_flush_fsb); | ||
501 | if (err) | ||
502 | goto out_err; | ||
503 | |||
504 | /* Determine the CPC command to issue */ | ||
505 | switch (state) { | ||
506 | case CPS_PM_CLOCK_GATED: | ||
507 | cpc_cmd = CPC_Cx_CMD_CLOCKOFF; | ||
508 | break; | ||
509 | case CPS_PM_POWER_GATED: | ||
510 | cpc_cmd = CPC_Cx_CMD_PWRDOWN; | ||
511 | break; | ||
512 | default: | ||
513 | BUG(); | ||
514 | goto out_err; | ||
515 | } | ||
516 | |||
517 | /* Issue the CPC command */ | ||
518 | UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); | ||
519 | uasm_i_addiu(&p, t1, zero, cpc_cmd); | ||
520 | uasm_i_sw(&p, t1, 0, t0); | ||
521 | |||
522 | if (state == CPS_PM_POWER_GATED) { | ||
523 | /* If anything goes wrong just hang */ | ||
524 | uasm_build_label(&l, p, lbl_hang); | ||
525 | uasm_il_b(&p, &r, lbl_hang); | ||
526 | uasm_i_nop(&p); | ||
527 | |||
528 | /* | ||
529 | * There's no point generating more code, the core is | ||
530 | * powered down & if powered back up will run from the | ||
531 | * reset vector not from here. | ||
532 | */ | ||
533 | goto gen_done; | ||
534 | } | ||
535 | |||
536 | /* Barrier to ensure write to CPC command is complete */ | ||
537 | uasm_i_sync(&p, __SYNC_full); | ||
538 | uasm_i_ehb(&p); | ||
539 | } | ||
540 | |||
541 | if (state == CPS_PM_NC_WAIT) { | ||
542 | /* | ||
543 | * At this point it is safe for all VPEs to proceed with | ||
544 | * execution. This VPE will set the top bit of ready_count | ||
545 | * to indicate to the other VPEs that they may continue. | ||
546 | */ | ||
547 | if (coupled_coherence) | ||
548 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, | ||
549 | lbl_set_cont); | ||
550 | |||
551 | /* | ||
552 | * VPEs which did not disable coherence will continue | ||
553 | * executing, after coherence has been disabled, from this | ||
554 | * point. | ||
555 | */ | ||
556 | uasm_build_label(&l, p, lbl_secondary_cont); | ||
557 | |||
558 | /* Now perform our wait */ | ||
559 | uasm_i_wait(&p, 0); | ||
560 | } | ||
561 | |||
562 | /* | ||
563 | * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs | ||
564 | * will run this. The first will actually re-enable coherence & the | ||
565 | * rest will just be performing a rather unusual nop. | ||
566 | */ | ||
567 | uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3 | ||
568 | ? CM_GCR_Cx_COHERENCE_COHDOMAINEN | ||
569 | : CM3_GCR_Cx_COHERENCE_COHEN); | ||
570 | |||
571 | uasm_i_sw(&p, t0, 0, r_pcohctl); | ||
572 | uasm_i_lw(&p, t0, 0, r_pcohctl); | ||
573 | |||
574 | /* Barrier to ensure write to coherence control is complete */ | ||
575 | uasm_i_sync(&p, __SYNC_full); | ||
576 | uasm_i_ehb(&p); | ||
577 | |||
578 | if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { | ||
579 | /* Decrement ready_count */ | ||
580 | uasm_build_label(&l, p, lbl_decready); | ||
581 | uasm_i_sync(&p, __SYNC_mb); | ||
582 | uasm_i_ll(&p, t1, 0, r_nc_count); | ||
583 | uasm_i_addiu(&p, t2, t1, -1); | ||
584 | uasm_i_sc(&p, t2, 0, r_nc_count); | ||
585 | uasm_il_beqz(&p, &r, t2, lbl_decready); | ||
586 | uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); | ||
587 | |||
588 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ | ||
589 | uasm_i_sync(&p, __SYNC_mb); | ||
590 | } | ||
591 | |||
592 | if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { | ||
593 | /* | ||
594 | * At this point it is safe for all VPEs to proceed with | ||
595 | * execution. This VPE will set the top bit of ready_count | ||
596 | * to indicate to the other VPEs that they may continue. | ||
597 | */ | ||
598 | cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); | ||
599 | |||
600 | /* | ||
601 | * This core will be reliant upon another core sending a | ||
602 | * power-up command to the CPC in order to resume operation. | ||
603 | * Thus an arbitrary VPE can't trigger the core leaving the | ||
604 | * idle state and the one that disables coherence might as well | ||
605 | * be the one to re-enable it. The rest will continue from here | ||
606 | * after that has been done. | ||
607 | */ | ||
608 | uasm_build_label(&l, p, lbl_secondary_cont); | ||
609 | |||
610 | /* Barrier ensuring all CPUs see the updated r_nc_count value */ | ||
611 | uasm_i_sync(&p, __SYNC_mb); | ||
612 | } | ||
613 | |||
614 | /* The core is coherent, time to return to C code */ | ||
615 | uasm_i_jr(&p, ra); | ||
616 | uasm_i_nop(&p); | ||
617 | |||
618 | gen_done: | ||
619 | /* Ensure the code didn't exceed the resources allocated for it */ | ||
620 | BUG_ON((p - buf) > max_instrs); | ||
621 | BUG_ON((l - labels) > ARRAY_SIZE(labels)); | ||
622 | BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); | ||
623 | |||
624 | /* Patch branch offsets */ | ||
625 | uasm_resolve_relocs(relocs, labels); | ||
626 | |||
627 | /* Flush the icache */ | ||
628 | local_flush_icache_range((unsigned long)buf, (unsigned long)p); | ||
629 | |||
630 | return buf; | ||
631 | out_err: | ||
632 | kfree(buf); | ||
633 | return NULL; | ||
634 | } | ||
635 | |||
636 | static int cps_pm_online_cpu(unsigned int cpu) | ||
637 | { | ||
638 | enum cps_pm_state state; | ||
639 | unsigned core = cpu_core(&cpu_data[cpu]); | ||
640 | void *entry_fn, *core_rc; | ||
641 | |||
642 | for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) { | ||
643 | if (per_cpu(nc_asm_enter, core)[state]) | ||
644 | continue; | ||
645 | if (!test_bit(state, state_support)) | ||
646 | continue; | ||
647 | |||
648 | entry_fn = cps_gen_entry_code(cpu, state); | ||
649 | if (!entry_fn) { | ||
650 | pr_err("Failed to generate core %u state %u entry\n", | ||
651 | core, state); | ||
652 | clear_bit(state, state_support); | ||
653 | } | ||
654 | |||
655 | per_cpu(nc_asm_enter, core)[state] = entry_fn; | ||
656 | } | ||
657 | |||
658 | if (!per_cpu(ready_count, core)) { | ||
659 | core_rc = kmalloc(sizeof(u32), GFP_KERNEL); | ||
660 | if (!core_rc) { | ||
661 | pr_err("Failed allocate core %u ready_count\n", core); | ||
662 | return -ENOMEM; | ||
663 | } | ||
664 | per_cpu(ready_count, core) = core_rc; | ||
665 | } | ||
666 | |||
667 | return 0; | ||
668 | } | ||
669 | |||
670 | static int cps_pm_power_notifier(struct notifier_block *this, | ||
671 | unsigned long event, void *ptr) | ||
672 | { | ||
673 | unsigned int stat; | ||
674 | |||
675 | switch (event) { | ||
676 | case PM_SUSPEND_PREPARE: | ||
677 | stat = read_cpc_cl_stat_conf(); | ||
678 | /* | ||
679 | * If we're attempting to suspend the system and power down all | ||
680 | * of the cores, the JTAG detect bit indicates that the CPC will | ||
681 | * instead put the cores into clock-off state. In this state | ||
682 | * a connected debugger can cause the CPU to attempt | ||
683 | * interactions with the powered down system. At best this will | ||
684 | * fail. At worst, it can hang the NoC, requiring a hard reset. | ||
685 | * To avoid this, just block system suspend if a JTAG probe | ||
686 | * is detected. | ||
687 | */ | ||
688 | if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) { | ||
689 | pr_warn("JTAG probe is connected - abort suspend\n"); | ||
690 | return NOTIFY_BAD; | ||
691 | } | ||
692 | return NOTIFY_DONE; | ||
693 | default: | ||
694 | return NOTIFY_DONE; | ||
695 | } | ||
696 | } | ||
697 | |||
698 | static int __init cps_pm_init(void) | ||
699 | { | ||
700 | /* A CM is required for all non-coherent states */ | ||
701 | if (!mips_cm_present()) { | ||
702 | pr_warn("pm-cps: no CM, non-coherent states unavailable\n"); | ||
703 | return 0; | ||
704 | } | ||
705 | |||
706 | /* | ||
707 | * If interrupts were enabled whilst running a wait instruction on a | ||
708 | * non-coherent core then the VPE may end up processing interrupts | ||
709 | * whilst non-coherent. That would be bad. | ||
710 | */ | ||
711 | if (cpu_wait == r4k_wait_irqoff) | ||
712 | set_bit(CPS_PM_NC_WAIT, state_support); | ||
713 | else | ||
714 | pr_warn("pm-cps: non-coherent wait unavailable\n"); | ||
715 | |||
716 | /* Detect whether a CPC is present */ | ||
717 | if (mips_cpc_present()) { | ||
718 | /* Detect whether clock gating is implemented */ | ||
719 | if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL) | ||
720 | set_bit(CPS_PM_CLOCK_GATED, state_support); | ||
721 | else | ||
722 | pr_warn("pm-cps: CPC does not support clock gating\n"); | ||
723 | |||
724 | /* Power gating is available with CPS SMP & any CPC */ | ||
725 | if (mips_cps_smp_in_use()) | ||
726 | set_bit(CPS_PM_POWER_GATED, state_support); | ||
727 | else | ||
728 | pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n"); | ||
729 | } else { | ||
730 | pr_warn("pm-cps: no CPC, clock & power gating unavailable\n"); | ||
731 | } | ||
732 | |||
733 | pm_notifier(cps_pm_power_notifier, 0); | ||
734 | |||
735 | return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online", | ||
736 | cps_pm_online_cpu, NULL); | ||
737 | } | ||
738 | arch_initcall(cps_pm_init); | ||
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c new file mode 100644 index 000000000..486ed2bf2 --- /dev/null +++ b/arch/mips/kernel/pm.c | |||
@@ -0,0 +1,95 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2014 Imagination Technologies Ltd. | ||
4 | * | ||
5 | * CPU PM notifiers for saving/restoring general CPU state. | ||
6 | */ | ||
7 | |||
8 | #include <linux/cpu_pm.h> | ||
9 | #include <linux/init.h> | ||
10 | |||
11 | #include <asm/dsp.h> | ||
12 | #include <asm/fpu.h> | ||
13 | #include <asm/mmu_context.h> | ||
14 | #include <asm/pm.h> | ||
15 | #include <asm/watch.h> | ||
16 | |||
17 | /* Used by PM helper macros in asm/pm.h */ | ||
18 | struct mips_static_suspend_state mips_static_suspend_state; | ||
19 | |||
20 | /** | ||
21 | * mips_cpu_save() - Save general CPU state. | ||
22 | * Ensures that general CPU context is saved, notably FPU and DSP. | ||
23 | */ | ||
24 | static int mips_cpu_save(void) | ||
25 | { | ||
26 | /* Save FPU state */ | ||
27 | lose_fpu(1); | ||
28 | |||
29 | /* Save DSP state */ | ||
30 | save_dsp(current); | ||
31 | |||
32 | return 0; | ||
33 | } | ||
34 | |||
35 | /** | ||
36 | * mips_cpu_restore() - Restore general CPU state. | ||
37 | * Restores important CPU context. | ||
38 | */ | ||
39 | static void mips_cpu_restore(void) | ||
40 | { | ||
41 | unsigned int cpu = smp_processor_id(); | ||
42 | |||
43 | /* Restore ASID */ | ||
44 | if (current->mm) | ||
45 | write_c0_entryhi(cpu_asid(cpu, current->mm)); | ||
46 | |||
47 | /* Restore DSP state */ | ||
48 | restore_dsp(current); | ||
49 | |||
50 | /* Restore UserLocal */ | ||
51 | if (cpu_has_userlocal) | ||
52 | write_c0_userlocal(current_thread_info()->tp_value); | ||
53 | |||
54 | /* Restore watch registers */ | ||
55 | __restore_watch(current); | ||
56 | } | ||
57 | |||
58 | /** | ||
59 | * mips_pm_notifier() - Notifier for preserving general CPU context. | ||
60 | * @self: Notifier block. | ||
61 | * @cmd: CPU PM event. | ||
62 | * @v: Private data (unused). | ||
63 | * | ||
64 | * This is called when a CPU power management event occurs, and is used to | ||
65 | * ensure that important CPU context is preserved across a CPU power down. | ||
66 | */ | ||
67 | static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd, | ||
68 | void *v) | ||
69 | { | ||
70 | int ret; | ||
71 | |||
72 | switch (cmd) { | ||
73 | case CPU_PM_ENTER: | ||
74 | ret = mips_cpu_save(); | ||
75 | if (ret) | ||
76 | return NOTIFY_STOP; | ||
77 | break; | ||
78 | case CPU_PM_ENTER_FAILED: | ||
79 | case CPU_PM_EXIT: | ||
80 | mips_cpu_restore(); | ||
81 | break; | ||
82 | } | ||
83 | |||
84 | return NOTIFY_OK; | ||
85 | } | ||
86 | |||
87 | static struct notifier_block mips_pm_notifier_block = { | ||
88 | .notifier_call = mips_pm_notifier, | ||
89 | }; | ||
90 | |||
91 | static int __init mips_pm_init(void) | ||
92 | { | ||
93 | return cpu_pm_register_notifier(&mips_pm_notifier_block); | ||
94 | } | ||
95 | arch_initcall(mips_pm_init); | ||
diff --git a/arch/mips/kernel/probes-common.h b/arch/mips/kernel/probes-common.h new file mode 100644 index 000000000..73e1d5e95 --- /dev/null +++ b/arch/mips/kernel/probes-common.h | |||
@@ -0,0 +1,79 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-or-later */ | ||
2 | /* | ||
3 | * Copyright (C) 2016 Imagination Technologies | ||
4 | * Author: Marcin Nowakowski <marcin.nowakowski@mips.com> | ||
5 | */ | ||
6 | |||
7 | #ifndef __PROBES_COMMON_H | ||
8 | #define __PROBES_COMMON_H | ||
9 | |||
10 | #include <asm/inst.h> | ||
11 | |||
12 | int __insn_is_compact_branch(union mips_instruction insn); | ||
13 | |||
14 | static inline int __insn_has_delay_slot(const union mips_instruction insn) | ||
15 | { | ||
16 | switch (insn.i_format.opcode) { | ||
17 | /* | ||
18 | * jr and jalr are in r_format format. | ||
19 | */ | ||
20 | case spec_op: | ||
21 | switch (insn.r_format.func) { | ||
22 | case jalr_op: | ||
23 | case jr_op: | ||
24 | return 1; | ||
25 | } | ||
26 | break; | ||
27 | |||
28 | /* | ||
29 | * This group contains: | ||
30 | * bltz_op, bgez_op, bltzl_op, bgezl_op, | ||
31 | * bltzal_op, bgezal_op, bltzall_op, bgezall_op. | ||
32 | */ | ||
33 | case bcond_op: | ||
34 | switch (insn.i_format.rt) { | ||
35 | case bltz_op: | ||
36 | case bltzl_op: | ||
37 | case bgez_op: | ||
38 | case bgezl_op: | ||
39 | case bltzal_op: | ||
40 | case bltzall_op: | ||
41 | case bgezal_op: | ||
42 | case bgezall_op: | ||
43 | case bposge32_op: | ||
44 | return 1; | ||
45 | } | ||
46 | break; | ||
47 | |||
48 | /* | ||
49 | * These are unconditional and in j_format. | ||
50 | */ | ||
51 | case jal_op: | ||
52 | case j_op: | ||
53 | case beq_op: | ||
54 | case beql_op: | ||
55 | case bne_op: | ||
56 | case bnel_op: | ||
57 | case blez_op: /* not really i_format */ | ||
58 | case blezl_op: | ||
59 | case bgtz_op: | ||
60 | case bgtzl_op: | ||
61 | return 1; | ||
62 | |||
63 | /* | ||
64 | * And now the FPA/cp1 branch instructions. | ||
65 | */ | ||
66 | case cop1_op: | ||
67 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
68 | case lwc2_op: /* This is bbit0 on Octeon */ | ||
69 | case ldc2_op: /* This is bbit032 on Octeon */ | ||
70 | case swc2_op: /* This is bbit1 on Octeon */ | ||
71 | case sdc2_op: /* This is bbit132 on Octeon */ | ||
72 | #endif | ||
73 | return 1; | ||
74 | } | ||
75 | |||
76 | return 0; | ||
77 | } | ||
78 | |||
79 | #endif /* __PROBES_COMMON_H */ | ||
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c new file mode 100644 index 000000000..33a02f381 --- /dev/null +++ b/arch/mips/kernel/proc.c | |||
@@ -0,0 +1,193 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Copyright (C) 1995, 1996, 2001 Ralf Baechle | ||
4 | * Copyright (C) 2001, 2004 MIPS Technologies, Inc. | ||
5 | * Copyright (C) 2004 Maciej W. Rozycki | ||
6 | */ | ||
7 | #include <linux/delay.h> | ||
8 | #include <linux/kernel.h> | ||
9 | #include <linux/sched.h> | ||
10 | #include <linux/seq_file.h> | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/cpu.h> | ||
13 | #include <asm/cpu-features.h> | ||
14 | #include <asm/idle.h> | ||
15 | #include <asm/mipsregs.h> | ||
16 | #include <asm/processor.h> | ||
17 | #include <asm/prom.h> | ||
18 | |||
19 | unsigned int vced_count, vcei_count; | ||
20 | |||
21 | /* | ||
22 | * * No lock; only written during early bootup by CPU 0. | ||
23 | * */ | ||
24 | static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain); | ||
25 | |||
26 | int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb) | ||
27 | { | ||
28 | return raw_notifier_chain_register(&proc_cpuinfo_chain, nb); | ||
29 | } | ||
30 | |||
31 | int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v) | ||
32 | { | ||
33 | return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v); | ||
34 | } | ||
35 | |||
36 | static int show_cpuinfo(struct seq_file *m, void *v) | ||
37 | { | ||
38 | struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args; | ||
39 | unsigned long n = (unsigned long) v - 1; | ||
40 | unsigned int version = cpu_data[n].processor_id; | ||
41 | unsigned int fp_vers = cpu_data[n].fpu_id; | ||
42 | char fmt [64]; | ||
43 | int i; | ||
44 | |||
45 | #ifdef CONFIG_SMP | ||
46 | if (!cpu_online(n)) | ||
47 | return 0; | ||
48 | #endif | ||
49 | |||
50 | /* | ||
51 | * For the first processor also print the system type | ||
52 | */ | ||
53 | if (n == 0) { | ||
54 | seq_printf(m, "system type\t\t: %s\n", get_system_type()); | ||
55 | if (mips_get_machine_name()) | ||
56 | seq_printf(m, "machine\t\t\t: %s\n", | ||
57 | mips_get_machine_name()); | ||
58 | } | ||
59 | |||
60 | seq_printf(m, "processor\t\t: %ld\n", n); | ||
61 | sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", | ||
62 | cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); | ||
63 | seq_printf(m, fmt, __cpu_name[n], | ||
64 | (version >> 4) & 0x0f, version & 0x0f, | ||
65 | (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); | ||
66 | seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", | ||
67 | cpu_data[n].udelay_val / (500000/HZ), | ||
68 | (cpu_data[n].udelay_val / (5000/HZ)) % 100); | ||
69 | seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); | ||
70 | seq_printf(m, "microsecond timers\t: %s\n", | ||
71 | cpu_has_counter ? "yes" : "no"); | ||
72 | seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); | ||
73 | seq_printf(m, "extra interrupt vector\t: %s\n", | ||
74 | cpu_has_divec ? "yes" : "no"); | ||
75 | seq_printf(m, "hardware watchpoint\t: %s", | ||
76 | cpu_has_watch ? "yes, " : "no\n"); | ||
77 | if (cpu_has_watch) { | ||
78 | seq_printf(m, "count: %d, address/irw mask: [", | ||
79 | cpu_data[n].watch_reg_count); | ||
80 | for (i = 0; i < cpu_data[n].watch_reg_count; i++) | ||
81 | seq_printf(m, "%s0x%04x", i ? ", " : "" , | ||
82 | cpu_data[n].watch_reg_masks[i]); | ||
83 | seq_printf(m, "]\n"); | ||
84 | } | ||
85 | |||
86 | seq_printf(m, "isa\t\t\t:"); | ||
87 | if (cpu_has_mips_1) | ||
88 | seq_printf(m, " mips1"); | ||
89 | if (cpu_has_mips_2) | ||
90 | seq_printf(m, "%s", " mips2"); | ||
91 | if (cpu_has_mips_3) | ||
92 | seq_printf(m, "%s", " mips3"); | ||
93 | if (cpu_has_mips_4) | ||
94 | seq_printf(m, "%s", " mips4"); | ||
95 | if (cpu_has_mips_5) | ||
96 | seq_printf(m, "%s", " mips5"); | ||
97 | if (cpu_has_mips32r1) | ||
98 | seq_printf(m, "%s", " mips32r1"); | ||
99 | if (cpu_has_mips32r2) | ||
100 | seq_printf(m, "%s", " mips32r2"); | ||
101 | if (cpu_has_mips32r5) | ||
102 | seq_printf(m, "%s", " mips32r5"); | ||
103 | if (cpu_has_mips32r6) | ||
104 | seq_printf(m, "%s", " mips32r6"); | ||
105 | if (cpu_has_mips64r1) | ||
106 | seq_printf(m, "%s", " mips64r1"); | ||
107 | if (cpu_has_mips64r2) | ||
108 | seq_printf(m, "%s", " mips64r2"); | ||
109 | if (cpu_has_mips64r5) | ||
110 | seq_printf(m, "%s", " mips64r5"); | ||
111 | if (cpu_has_mips64r6) | ||
112 | seq_printf(m, "%s", " mips64r6"); | ||
113 | seq_printf(m, "\n"); | ||
114 | |||
115 | seq_printf(m, "ASEs implemented\t:"); | ||
116 | if (cpu_has_mips16) seq_printf(m, "%s", " mips16"); | ||
117 | if (cpu_has_mips16e2) seq_printf(m, "%s", " mips16e2"); | ||
118 | if (cpu_has_mdmx) seq_printf(m, "%s", " mdmx"); | ||
119 | if (cpu_has_mips3d) seq_printf(m, "%s", " mips3d"); | ||
120 | if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips"); | ||
121 | if (cpu_has_dsp) seq_printf(m, "%s", " dsp"); | ||
122 | if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2"); | ||
123 | if (cpu_has_dsp3) seq_printf(m, "%s", " dsp3"); | ||
124 | if (cpu_has_mipsmt) seq_printf(m, "%s", " mt"); | ||
125 | if (cpu_has_mmips) seq_printf(m, "%s", " micromips"); | ||
126 | if (cpu_has_vz) seq_printf(m, "%s", " vz"); | ||
127 | if (cpu_has_msa) seq_printf(m, "%s", " msa"); | ||
128 | if (cpu_has_eva) seq_printf(m, "%s", " eva"); | ||
129 | if (cpu_has_htw) seq_printf(m, "%s", " htw"); | ||
130 | if (cpu_has_xpa) seq_printf(m, "%s", " xpa"); | ||
131 | if (cpu_has_loongson_mmi) seq_printf(m, "%s", " loongson-mmi"); | ||
132 | if (cpu_has_loongson_cam) seq_printf(m, "%s", " loongson-cam"); | ||
133 | if (cpu_has_loongson_ext) seq_printf(m, "%s", " loongson-ext"); | ||
134 | if (cpu_has_loongson_ext2) seq_printf(m, "%s", " loongson-ext2"); | ||
135 | seq_printf(m, "\n"); | ||
136 | |||
137 | if (cpu_has_mmips) { | ||
138 | seq_printf(m, "micromips kernel\t: %s\n", | ||
139 | (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no"); | ||
140 | } | ||
141 | seq_printf(m, "shadow register sets\t: %d\n", | ||
142 | cpu_data[n].srsets); | ||
143 | seq_printf(m, "kscratch registers\t: %d\n", | ||
144 | hweight8(cpu_data[n].kscratch_mask)); | ||
145 | seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package); | ||
146 | seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n])); | ||
147 | |||
148 | #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6) | ||
149 | if (cpu_has_mipsmt) | ||
150 | seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); | ||
151 | else if (cpu_has_vp) | ||
152 | seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n])); | ||
153 | #endif | ||
154 | |||
155 | sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", | ||
156 | cpu_has_vce ? "%u" : "not available"); | ||
157 | seq_printf(m, fmt, 'D', vced_count); | ||
158 | seq_printf(m, fmt, 'I', vcei_count); | ||
159 | |||
160 | proc_cpuinfo_notifier_args.m = m; | ||
161 | proc_cpuinfo_notifier_args.n = n; | ||
162 | |||
163 | raw_notifier_call_chain(&proc_cpuinfo_chain, 0, | ||
164 | &proc_cpuinfo_notifier_args); | ||
165 | |||
166 | seq_printf(m, "\n"); | ||
167 | |||
168 | return 0; | ||
169 | } | ||
170 | |||
171 | static void *c_start(struct seq_file *m, loff_t *pos) | ||
172 | { | ||
173 | unsigned long i = *pos; | ||
174 | |||
175 | return i < nr_cpu_ids ? (void *) (i + 1) : NULL; | ||
176 | } | ||
177 | |||
178 | static void *c_next(struct seq_file *m, void *v, loff_t *pos) | ||
179 | { | ||
180 | ++*pos; | ||
181 | return c_start(m, pos); | ||
182 | } | ||
183 | |||
184 | static void c_stop(struct seq_file *m, void *v) | ||
185 | { | ||
186 | } | ||
187 | |||
188 | const struct seq_operations cpuinfo_op = { | ||
189 | .start = c_start, | ||
190 | .next = c_next, | ||
191 | .stop = c_stop, | ||
192 | .show = show_cpuinfo, | ||
193 | }; | ||
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c new file mode 100644 index 000000000..98ecaf6f3 --- /dev/null +++ b/arch/mips/kernel/process.c | |||
@@ -0,0 +1,896 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others. | ||
7 | * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org) | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | * Copyright (C) 2004 Thiemo Seufer | ||
10 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
11 | */ | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/sched/debug.h> | ||
15 | #include <linux/sched/task.h> | ||
16 | #include <linux/sched/task_stack.h> | ||
17 | #include <linux/tick.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/stddef.h> | ||
21 | #include <linux/unistd.h> | ||
22 | #include <linux/export.h> | ||
23 | #include <linux/ptrace.h> | ||
24 | #include <linux/mman.h> | ||
25 | #include <linux/personality.h> | ||
26 | #include <linux/sys.h> | ||
27 | #include <linux/init.h> | ||
28 | #include <linux/completion.h> | ||
29 | #include <linux/kallsyms.h> | ||
30 | #include <linux/random.h> | ||
31 | #include <linux/prctl.h> | ||
32 | #include <linux/nmi.h> | ||
33 | #include <linux/cpu.h> | ||
34 | |||
35 | #include <asm/abi.h> | ||
36 | #include <asm/asm.h> | ||
37 | #include <asm/bootinfo.h> | ||
38 | #include <asm/cpu.h> | ||
39 | #include <asm/dsemul.h> | ||
40 | #include <asm/dsp.h> | ||
41 | #include <asm/fpu.h> | ||
42 | #include <asm/irq.h> | ||
43 | #include <asm/mips-cps.h> | ||
44 | #include <asm/msa.h> | ||
45 | #include <asm/mipsregs.h> | ||
46 | #include <asm/processor.h> | ||
47 | #include <asm/reg.h> | ||
48 | #include <linux/uaccess.h> | ||
49 | #include <asm/io.h> | ||
50 | #include <asm/elf.h> | ||
51 | #include <asm/isadep.h> | ||
52 | #include <asm/inst.h> | ||
53 | #include <asm/stacktrace.h> | ||
54 | #include <asm/irq_regs.h> | ||
55 | #include <asm/exec.h> | ||
56 | |||
57 | #ifdef CONFIG_HOTPLUG_CPU | ||
58 | void arch_cpu_idle_dead(void) | ||
59 | { | ||
60 | play_dead(); | ||
61 | } | ||
62 | #endif | ||
63 | |||
64 | asmlinkage void ret_from_fork(void); | ||
65 | asmlinkage void ret_from_kernel_thread(void); | ||
66 | |||
67 | void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp) | ||
68 | { | ||
69 | unsigned long status; | ||
70 | |||
71 | /* New thread loses kernel privileges. */ | ||
72 | status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK); | ||
73 | status |= KU_USER; | ||
74 | regs->cp0_status = status; | ||
75 | lose_fpu(0); | ||
76 | clear_thread_flag(TIF_MSA_CTX_LIVE); | ||
77 | clear_used_math(); | ||
78 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
79 | atomic_set(¤t->thread.bd_emu_frame, BD_EMUFRAME_NONE); | ||
80 | #endif | ||
81 | init_dsp(); | ||
82 | regs->cp0_epc = pc; | ||
83 | regs->regs[29] = sp; | ||
84 | } | ||
85 | |||
86 | void exit_thread(struct task_struct *tsk) | ||
87 | { | ||
88 | /* | ||
89 | * User threads may have allocated a delay slot emulation frame. | ||
90 | * If so, clean up that allocation. | ||
91 | */ | ||
92 | if (!(current->flags & PF_KTHREAD)) | ||
93 | dsemul_thread_cleanup(tsk); | ||
94 | } | ||
95 | |||
96 | int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) | ||
97 | { | ||
98 | /* | ||
99 | * Save any process state which is live in hardware registers to the | ||
100 | * parent context prior to duplication. This prevents the new child | ||
101 | * state becoming stale if the parent is preempted before copy_thread() | ||
102 | * gets a chance to save the parent's live hardware registers to the | ||
103 | * child context. | ||
104 | */ | ||
105 | preempt_disable(); | ||
106 | |||
107 | if (is_msa_enabled()) | ||
108 | save_msa(current); | ||
109 | else if (is_fpu_owner()) | ||
110 | _save_fp(current); | ||
111 | |||
112 | save_dsp(current); | ||
113 | |||
114 | preempt_enable(); | ||
115 | |||
116 | *dst = *src; | ||
117 | return 0; | ||
118 | } | ||
119 | |||
120 | /* | ||
121 | * Copy architecture-specific thread state | ||
122 | */ | ||
123 | int copy_thread(unsigned long clone_flags, unsigned long usp, | ||
124 | unsigned long kthread_arg, struct task_struct *p, | ||
125 | unsigned long tls) | ||
126 | { | ||
127 | struct thread_info *ti = task_thread_info(p); | ||
128 | struct pt_regs *childregs, *regs = current_pt_regs(); | ||
129 | unsigned long childksp; | ||
130 | |||
131 | childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32; | ||
132 | |||
133 | /* set up new TSS. */ | ||
134 | childregs = (struct pt_regs *) childksp - 1; | ||
135 | /* Put the stack after the struct pt_regs. */ | ||
136 | childksp = (unsigned long) childregs; | ||
137 | p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK; | ||
138 | if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) { | ||
139 | /* kernel thread */ | ||
140 | unsigned long status = p->thread.cp0_status; | ||
141 | memset(childregs, 0, sizeof(struct pt_regs)); | ||
142 | ti->addr_limit = KERNEL_DS; | ||
143 | p->thread.reg16 = usp; /* fn */ | ||
144 | p->thread.reg17 = kthread_arg; | ||
145 | p->thread.reg29 = childksp; | ||
146 | p->thread.reg31 = (unsigned long) ret_from_kernel_thread; | ||
147 | #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) | ||
148 | status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) | | ||
149 | ((status & (ST0_KUC | ST0_IEC)) << 2); | ||
150 | #else | ||
151 | status |= ST0_EXL; | ||
152 | #endif | ||
153 | childregs->cp0_status = status; | ||
154 | return 0; | ||
155 | } | ||
156 | |||
157 | /* user thread */ | ||
158 | *childregs = *regs; | ||
159 | childregs->regs[7] = 0; /* Clear error flag */ | ||
160 | childregs->regs[2] = 0; /* Child gets zero as return value */ | ||
161 | if (usp) | ||
162 | childregs->regs[29] = usp; | ||
163 | ti->addr_limit = USER_DS; | ||
164 | |||
165 | p->thread.reg29 = (unsigned long) childregs; | ||
166 | p->thread.reg31 = (unsigned long) ret_from_fork; | ||
167 | |||
168 | /* | ||
169 | * New tasks lose permission to use the fpu. This accelerates context | ||
170 | * switching for most programs since they don't use the fpu. | ||
171 | */ | ||
172 | childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); | ||
173 | |||
174 | clear_tsk_thread_flag(p, TIF_USEDFPU); | ||
175 | clear_tsk_thread_flag(p, TIF_USEDMSA); | ||
176 | clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE); | ||
177 | |||
178 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
179 | clear_tsk_thread_flag(p, TIF_FPUBOUND); | ||
180 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
181 | |||
182 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
183 | atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE); | ||
184 | #endif | ||
185 | |||
186 | if (clone_flags & CLONE_SETTLS) | ||
187 | ti->tp_value = tls; | ||
188 | |||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | #ifdef CONFIG_STACKPROTECTOR | ||
193 | #include <linux/stackprotector.h> | ||
194 | unsigned long __stack_chk_guard __read_mostly; | ||
195 | EXPORT_SYMBOL(__stack_chk_guard); | ||
196 | #endif | ||
197 | |||
198 | struct mips_frame_info { | ||
199 | void *func; | ||
200 | unsigned long func_size; | ||
201 | int frame_size; | ||
202 | int pc_offset; | ||
203 | }; | ||
204 | |||
205 | #define J_TARGET(pc,target) \ | ||
206 | (((unsigned long)(pc) & 0xf0000000) | ((target) << 2)) | ||
207 | |||
208 | static inline int is_ra_save_ins(union mips_instruction *ip, int *poff) | ||
209 | { | ||
210 | #ifdef CONFIG_CPU_MICROMIPS | ||
211 | /* | ||
212 | * swsp ra,offset | ||
213 | * swm16 reglist,offset(sp) | ||
214 | * swm32 reglist,offset(sp) | ||
215 | * sw32 ra,offset(sp) | ||
216 | * jradiussp - NOT SUPPORTED | ||
217 | * | ||
218 | * microMIPS is way more fun... | ||
219 | */ | ||
220 | if (mm_insn_16bit(ip->word >> 16)) { | ||
221 | switch (ip->mm16_r5_format.opcode) { | ||
222 | case mm_swsp16_op: | ||
223 | if (ip->mm16_r5_format.rt != 31) | ||
224 | return 0; | ||
225 | |||
226 | *poff = ip->mm16_r5_format.imm; | ||
227 | *poff = (*poff << 2) / sizeof(ulong); | ||
228 | return 1; | ||
229 | |||
230 | case mm_pool16c_op: | ||
231 | switch (ip->mm16_m_format.func) { | ||
232 | case mm_swm16_op: | ||
233 | *poff = ip->mm16_m_format.imm; | ||
234 | *poff += 1 + ip->mm16_m_format.rlist; | ||
235 | *poff = (*poff << 2) / sizeof(ulong); | ||
236 | return 1; | ||
237 | |||
238 | default: | ||
239 | return 0; | ||
240 | } | ||
241 | |||
242 | default: | ||
243 | return 0; | ||
244 | } | ||
245 | } | ||
246 | |||
247 | switch (ip->i_format.opcode) { | ||
248 | case mm_sw32_op: | ||
249 | if (ip->i_format.rs != 29) | ||
250 | return 0; | ||
251 | if (ip->i_format.rt != 31) | ||
252 | return 0; | ||
253 | |||
254 | *poff = ip->i_format.simmediate / sizeof(ulong); | ||
255 | return 1; | ||
256 | |||
257 | case mm_pool32b_op: | ||
258 | switch (ip->mm_m_format.func) { | ||
259 | case mm_swm32_func: | ||
260 | if (ip->mm_m_format.rd < 0x10) | ||
261 | return 0; | ||
262 | if (ip->mm_m_format.base != 29) | ||
263 | return 0; | ||
264 | |||
265 | *poff = ip->mm_m_format.simmediate; | ||
266 | *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32); | ||
267 | *poff /= sizeof(ulong); | ||
268 | return 1; | ||
269 | default: | ||
270 | return 0; | ||
271 | } | ||
272 | |||
273 | default: | ||
274 | return 0; | ||
275 | } | ||
276 | #else | ||
277 | /* sw / sd $ra, offset($sp) */ | ||
278 | if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) && | ||
279 | ip->i_format.rs == 29 && ip->i_format.rt == 31) { | ||
280 | *poff = ip->i_format.simmediate / sizeof(ulong); | ||
281 | return 1; | ||
282 | } | ||
283 | #ifdef CONFIG_CPU_LOONGSON64 | ||
284 | if ((ip->loongson3_lswc2_format.opcode == swc2_op) && | ||
285 | (ip->loongson3_lswc2_format.ls == 1) && | ||
286 | (ip->loongson3_lswc2_format.fr == 0) && | ||
287 | (ip->loongson3_lswc2_format.base == 29)) { | ||
288 | if (ip->loongson3_lswc2_format.rt == 31) { | ||
289 | *poff = ip->loongson3_lswc2_format.offset << 1; | ||
290 | return 1; | ||
291 | } | ||
292 | if (ip->loongson3_lswc2_format.rq == 31) { | ||
293 | *poff = (ip->loongson3_lswc2_format.offset << 1) + 1; | ||
294 | return 1; | ||
295 | } | ||
296 | } | ||
297 | #endif | ||
298 | return 0; | ||
299 | #endif | ||
300 | } | ||
301 | |||
302 | static inline int is_jump_ins(union mips_instruction *ip) | ||
303 | { | ||
304 | #ifdef CONFIG_CPU_MICROMIPS | ||
305 | /* | ||
306 | * jr16,jrc,jalr16,jalr16 | ||
307 | * jal | ||
308 | * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb | ||
309 | * jraddiusp - NOT SUPPORTED | ||
310 | * | ||
311 | * microMIPS is kind of more fun... | ||
312 | */ | ||
313 | if (mm_insn_16bit(ip->word >> 16)) { | ||
314 | if ((ip->mm16_r5_format.opcode == mm_pool16c_op && | ||
315 | (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op)) | ||
316 | return 1; | ||
317 | return 0; | ||
318 | } | ||
319 | |||
320 | if (ip->j_format.opcode == mm_j32_op) | ||
321 | return 1; | ||
322 | if (ip->j_format.opcode == mm_jal32_op) | ||
323 | return 1; | ||
324 | if (ip->r_format.opcode != mm_pool32a_op || | ||
325 | ip->r_format.func != mm_pool32axf_op) | ||
326 | return 0; | ||
327 | return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op; | ||
328 | #else | ||
329 | if (ip->j_format.opcode == j_op) | ||
330 | return 1; | ||
331 | if (ip->j_format.opcode == jal_op) | ||
332 | return 1; | ||
333 | if (ip->r_format.opcode != spec_op) | ||
334 | return 0; | ||
335 | return ip->r_format.func == jalr_op || ip->r_format.func == jr_op; | ||
336 | #endif | ||
337 | } | ||
338 | |||
339 | static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size) | ||
340 | { | ||
341 | #ifdef CONFIG_CPU_MICROMIPS | ||
342 | unsigned short tmp; | ||
343 | |||
344 | /* | ||
345 | * addiusp -imm | ||
346 | * addius5 sp,-imm | ||
347 | * addiu32 sp,sp,-imm | ||
348 | * jradiussp - NOT SUPPORTED | ||
349 | * | ||
350 | * microMIPS is not more fun... | ||
351 | */ | ||
352 | if (mm_insn_16bit(ip->word >> 16)) { | ||
353 | if (ip->mm16_r3_format.opcode == mm_pool16d_op && | ||
354 | ip->mm16_r3_format.simmediate & mm_addiusp_func) { | ||
355 | tmp = ip->mm_b0_format.simmediate >> 1; | ||
356 | tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100; | ||
357 | if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */ | ||
358 | tmp ^= 0x100; | ||
359 | *frame_size = -(signed short)(tmp << 2); | ||
360 | return 1; | ||
361 | } | ||
362 | if (ip->mm16_r5_format.opcode == mm_pool16d_op && | ||
363 | ip->mm16_r5_format.rt == 29) { | ||
364 | tmp = ip->mm16_r5_format.imm >> 1; | ||
365 | *frame_size = -(signed short)(tmp & 0xf); | ||
366 | return 1; | ||
367 | } | ||
368 | return 0; | ||
369 | } | ||
370 | |||
371 | if (ip->mm_i_format.opcode == mm_addiu32_op && | ||
372 | ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) { | ||
373 | *frame_size = -ip->i_format.simmediate; | ||
374 | return 1; | ||
375 | } | ||
376 | #else | ||
377 | /* addiu/daddiu sp,sp,-imm */ | ||
378 | if (ip->i_format.rs != 29 || ip->i_format.rt != 29) | ||
379 | return 0; | ||
380 | |||
381 | if (ip->i_format.opcode == addiu_op || | ||
382 | ip->i_format.opcode == daddiu_op) { | ||
383 | *frame_size = -ip->i_format.simmediate; | ||
384 | return 1; | ||
385 | } | ||
386 | #endif | ||
387 | return 0; | ||
388 | } | ||
389 | |||
390 | static int get_frame_info(struct mips_frame_info *info) | ||
391 | { | ||
392 | bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS); | ||
393 | union mips_instruction insn, *ip; | ||
394 | const unsigned int max_insns = 128; | ||
395 | unsigned int last_insn_size = 0; | ||
396 | unsigned int i; | ||
397 | bool saw_jump = false; | ||
398 | |||
399 | info->pc_offset = -1; | ||
400 | info->frame_size = 0; | ||
401 | |||
402 | ip = (void *)msk_isa16_mode((ulong)info->func); | ||
403 | if (!ip) | ||
404 | goto err; | ||
405 | |||
406 | for (i = 0; i < max_insns; i++) { | ||
407 | ip = (void *)ip + last_insn_size; | ||
408 | |||
409 | if (is_mmips && mm_insn_16bit(ip->halfword[0])) { | ||
410 | insn.word = ip->halfword[0] << 16; | ||
411 | last_insn_size = 2; | ||
412 | } else if (is_mmips) { | ||
413 | insn.word = ip->halfword[0] << 16 | ip->halfword[1]; | ||
414 | last_insn_size = 4; | ||
415 | } else { | ||
416 | insn.word = ip->word; | ||
417 | last_insn_size = 4; | ||
418 | } | ||
419 | |||
420 | if (!info->frame_size) { | ||
421 | is_sp_move_ins(&insn, &info->frame_size); | ||
422 | continue; | ||
423 | } else if (!saw_jump && is_jump_ins(ip)) { | ||
424 | /* | ||
425 | * If we see a jump instruction, we are finished | ||
426 | * with the frame save. | ||
427 | * | ||
428 | * Some functions can have a shortcut return at | ||
429 | * the beginning of the function, so don't start | ||
430 | * looking for jump instruction until we see the | ||
431 | * frame setup. | ||
432 | * | ||
433 | * The RA save instruction can get put into the | ||
434 | * delay slot of the jump instruction, so look | ||
435 | * at the next instruction, too. | ||
436 | */ | ||
437 | saw_jump = true; | ||
438 | continue; | ||
439 | } | ||
440 | if (info->pc_offset == -1 && | ||
441 | is_ra_save_ins(&insn, &info->pc_offset)) | ||
442 | break; | ||
443 | if (saw_jump) | ||
444 | break; | ||
445 | } | ||
446 | if (info->frame_size && info->pc_offset >= 0) /* nested */ | ||
447 | return 0; | ||
448 | if (info->pc_offset < 0) /* leaf */ | ||
449 | return 1; | ||
450 | /* prologue seems bogus... */ | ||
451 | err: | ||
452 | return -1; | ||
453 | } | ||
454 | |||
455 | static struct mips_frame_info schedule_mfi __read_mostly; | ||
456 | |||
457 | #ifdef CONFIG_KALLSYMS | ||
458 | static unsigned long get___schedule_addr(void) | ||
459 | { | ||
460 | return kallsyms_lookup_name("__schedule"); | ||
461 | } | ||
462 | #else | ||
463 | static unsigned long get___schedule_addr(void) | ||
464 | { | ||
465 | union mips_instruction *ip = (void *)schedule; | ||
466 | int max_insns = 8; | ||
467 | int i; | ||
468 | |||
469 | for (i = 0; i < max_insns; i++, ip++) { | ||
470 | if (ip->j_format.opcode == j_op) | ||
471 | return J_TARGET(ip, ip->j_format.target); | ||
472 | } | ||
473 | return 0; | ||
474 | } | ||
475 | #endif | ||
476 | |||
477 | static int __init frame_info_init(void) | ||
478 | { | ||
479 | unsigned long size = 0; | ||
480 | #ifdef CONFIG_KALLSYMS | ||
481 | unsigned long ofs; | ||
482 | #endif | ||
483 | unsigned long addr; | ||
484 | |||
485 | addr = get___schedule_addr(); | ||
486 | if (!addr) | ||
487 | addr = (unsigned long)schedule; | ||
488 | |||
489 | #ifdef CONFIG_KALLSYMS | ||
490 | kallsyms_lookup_size_offset(addr, &size, &ofs); | ||
491 | #endif | ||
492 | schedule_mfi.func = (void *)addr; | ||
493 | schedule_mfi.func_size = size; | ||
494 | |||
495 | get_frame_info(&schedule_mfi); | ||
496 | |||
497 | /* | ||
498 | * Without schedule() frame info, result given by | ||
499 | * thread_saved_pc() and get_wchan() are not reliable. | ||
500 | */ | ||
501 | if (schedule_mfi.pc_offset < 0) | ||
502 | printk("Can't analyze schedule() prologue at %p\n", schedule); | ||
503 | |||
504 | return 0; | ||
505 | } | ||
506 | |||
507 | arch_initcall(frame_info_init); | ||
508 | |||
509 | /* | ||
510 | * Return saved PC of a blocked thread. | ||
511 | */ | ||
512 | static unsigned long thread_saved_pc(struct task_struct *tsk) | ||
513 | { | ||
514 | struct thread_struct *t = &tsk->thread; | ||
515 | |||
516 | /* New born processes are a special case */ | ||
517 | if (t->reg31 == (unsigned long) ret_from_fork) | ||
518 | return t->reg31; | ||
519 | if (schedule_mfi.pc_offset < 0) | ||
520 | return 0; | ||
521 | return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset]; | ||
522 | } | ||
523 | |||
524 | |||
525 | #ifdef CONFIG_KALLSYMS | ||
526 | /* generic stack unwinding function */ | ||
527 | unsigned long notrace unwind_stack_by_address(unsigned long stack_page, | ||
528 | unsigned long *sp, | ||
529 | unsigned long pc, | ||
530 | unsigned long *ra) | ||
531 | { | ||
532 | unsigned long low, high, irq_stack_high; | ||
533 | struct mips_frame_info info; | ||
534 | unsigned long size, ofs; | ||
535 | struct pt_regs *regs; | ||
536 | int leaf; | ||
537 | |||
538 | if (!stack_page) | ||
539 | return 0; | ||
540 | |||
541 | /* | ||
542 | * IRQ stacks start at IRQ_STACK_START | ||
543 | * task stacks at THREAD_SIZE - 32 | ||
544 | */ | ||
545 | low = stack_page; | ||
546 | if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) { | ||
547 | high = stack_page + IRQ_STACK_START; | ||
548 | irq_stack_high = high; | ||
549 | } else { | ||
550 | high = stack_page + THREAD_SIZE - 32; | ||
551 | irq_stack_high = 0; | ||
552 | } | ||
553 | |||
554 | /* | ||
555 | * If we reached the top of the interrupt stack, start unwinding | ||
556 | * the interrupted task stack. | ||
557 | */ | ||
558 | if (unlikely(*sp == irq_stack_high)) { | ||
559 | unsigned long task_sp = *(unsigned long *)*sp; | ||
560 | |||
561 | /* | ||
562 | * Check that the pointer saved in the IRQ stack head points to | ||
563 | * something within the stack of the current task | ||
564 | */ | ||
565 | if (!object_is_on_stack((void *)task_sp)) | ||
566 | return 0; | ||
567 | |||
568 | /* | ||
569 | * Follow pointer to tasks kernel stack frame where interrupted | ||
570 | * state was saved. | ||
571 | */ | ||
572 | regs = (struct pt_regs *)task_sp; | ||
573 | pc = regs->cp0_epc; | ||
574 | if (!user_mode(regs) && __kernel_text_address(pc)) { | ||
575 | *sp = regs->regs[29]; | ||
576 | *ra = regs->regs[31]; | ||
577 | return pc; | ||
578 | } | ||
579 | return 0; | ||
580 | } | ||
581 | if (!kallsyms_lookup_size_offset(pc, &size, &ofs)) | ||
582 | return 0; | ||
583 | /* | ||
584 | * Return ra if an exception occurred at the first instruction | ||
585 | */ | ||
586 | if (unlikely(ofs == 0)) { | ||
587 | pc = *ra; | ||
588 | *ra = 0; | ||
589 | return pc; | ||
590 | } | ||
591 | |||
592 | info.func = (void *)(pc - ofs); | ||
593 | info.func_size = ofs; /* analyze from start to ofs */ | ||
594 | leaf = get_frame_info(&info); | ||
595 | if (leaf < 0) | ||
596 | return 0; | ||
597 | |||
598 | if (*sp < low || *sp + info.frame_size > high) | ||
599 | return 0; | ||
600 | |||
601 | if (leaf) | ||
602 | /* | ||
603 | * For some extreme cases, get_frame_info() can | ||
604 | * consider wrongly a nested function as a leaf | ||
605 | * one. In that cases avoid to return always the | ||
606 | * same value. | ||
607 | */ | ||
608 | pc = pc != *ra ? *ra : 0; | ||
609 | else | ||
610 | pc = ((unsigned long *)(*sp))[info.pc_offset]; | ||
611 | |||
612 | *sp += info.frame_size; | ||
613 | *ra = 0; | ||
614 | return __kernel_text_address(pc) ? pc : 0; | ||
615 | } | ||
616 | EXPORT_SYMBOL(unwind_stack_by_address); | ||
617 | |||
618 | /* used by show_backtrace() */ | ||
619 | unsigned long unwind_stack(struct task_struct *task, unsigned long *sp, | ||
620 | unsigned long pc, unsigned long *ra) | ||
621 | { | ||
622 | unsigned long stack_page = 0; | ||
623 | int cpu; | ||
624 | |||
625 | for_each_possible_cpu(cpu) { | ||
626 | if (on_irq_stack(cpu, *sp)) { | ||
627 | stack_page = (unsigned long)irq_stack[cpu]; | ||
628 | break; | ||
629 | } | ||
630 | } | ||
631 | |||
632 | if (!stack_page) | ||
633 | stack_page = (unsigned long)task_stack_page(task); | ||
634 | |||
635 | return unwind_stack_by_address(stack_page, sp, pc, ra); | ||
636 | } | ||
637 | #endif | ||
638 | |||
639 | /* | ||
640 | * get_wchan - a maintenance nightmare^W^Wpain in the ass ... | ||
641 | */ | ||
642 | unsigned long get_wchan(struct task_struct *task) | ||
643 | { | ||
644 | unsigned long pc = 0; | ||
645 | #ifdef CONFIG_KALLSYMS | ||
646 | unsigned long sp; | ||
647 | unsigned long ra = 0; | ||
648 | #endif | ||
649 | |||
650 | if (!task || task == current || task->state == TASK_RUNNING) | ||
651 | goto out; | ||
652 | if (!task_stack_page(task)) | ||
653 | goto out; | ||
654 | |||
655 | pc = thread_saved_pc(task); | ||
656 | |||
657 | #ifdef CONFIG_KALLSYMS | ||
658 | sp = task->thread.reg29 + schedule_mfi.frame_size; | ||
659 | |||
660 | while (in_sched_functions(pc)) | ||
661 | pc = unwind_stack(task, &sp, pc, &ra); | ||
662 | #endif | ||
663 | |||
664 | out: | ||
665 | return pc; | ||
666 | } | ||
667 | |||
668 | unsigned long mips_stack_top(void) | ||
669 | { | ||
670 | unsigned long top = TASK_SIZE & PAGE_MASK; | ||
671 | |||
672 | if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { | ||
673 | /* One page for branch delay slot "emulation" */ | ||
674 | top -= PAGE_SIZE; | ||
675 | } | ||
676 | |||
677 | /* Space for the VDSO, data page & GIC user page */ | ||
678 | top -= PAGE_ALIGN(current->thread.abi->vdso->size); | ||
679 | top -= PAGE_SIZE; | ||
680 | top -= mips_gic_present() ? PAGE_SIZE : 0; | ||
681 | |||
682 | /* Space for cache colour alignment */ | ||
683 | if (cpu_has_dc_aliases) | ||
684 | top -= shm_align_mask + 1; | ||
685 | |||
686 | /* Space to randomize the VDSO base */ | ||
687 | if (current->flags & PF_RANDOMIZE) | ||
688 | top -= VDSO_RANDOMIZE_SIZE; | ||
689 | |||
690 | return top; | ||
691 | } | ||
692 | |||
693 | /* | ||
694 | * Don't forget that the stack pointer must be aligned on a 8 bytes | ||
695 | * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. | ||
696 | */ | ||
697 | unsigned long arch_align_stack(unsigned long sp) | ||
698 | { | ||
699 | if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) | ||
700 | sp -= get_random_int() & ~PAGE_MASK; | ||
701 | |||
702 | return sp & ALMASK; | ||
703 | } | ||
704 | |||
705 | static DEFINE_PER_CPU(call_single_data_t, backtrace_csd); | ||
706 | static struct cpumask backtrace_csd_busy; | ||
707 | |||
708 | static void handle_backtrace(void *info) | ||
709 | { | ||
710 | nmi_cpu_backtrace(get_irq_regs()); | ||
711 | cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy); | ||
712 | } | ||
713 | |||
714 | static void raise_backtrace(cpumask_t *mask) | ||
715 | { | ||
716 | call_single_data_t *csd; | ||
717 | int cpu; | ||
718 | |||
719 | for_each_cpu(cpu, mask) { | ||
720 | /* | ||
721 | * If we previously sent an IPI to the target CPU & it hasn't | ||
722 | * cleared its bit in the busy cpumask then it didn't handle | ||
723 | * our previous IPI & it's not safe for us to reuse the | ||
724 | * call_single_data_t. | ||
725 | */ | ||
726 | if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) { | ||
727 | pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n", | ||
728 | cpu); | ||
729 | continue; | ||
730 | } | ||
731 | |||
732 | csd = &per_cpu(backtrace_csd, cpu); | ||
733 | csd->func = handle_backtrace; | ||
734 | smp_call_function_single_async(cpu, csd); | ||
735 | } | ||
736 | } | ||
737 | |||
738 | void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self) | ||
739 | { | ||
740 | nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace); | ||
741 | } | ||
742 | |||
743 | int mips_get_process_fp_mode(struct task_struct *task) | ||
744 | { | ||
745 | int value = 0; | ||
746 | |||
747 | if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS)) | ||
748 | value |= PR_FP_MODE_FR; | ||
749 | if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS)) | ||
750 | value |= PR_FP_MODE_FRE; | ||
751 | |||
752 | return value; | ||
753 | } | ||
754 | |||
755 | static long prepare_for_fp_mode_switch(void *unused) | ||
756 | { | ||
757 | /* | ||
758 | * This is icky, but we use this to simply ensure that all CPUs have | ||
759 | * context switched, regardless of whether they were previously running | ||
760 | * kernel or user code. This ensures that no CPU that a mode-switching | ||
761 | * program may execute on keeps its FPU enabled (& in the old mode) | ||
762 | * throughout the mode switch. | ||
763 | */ | ||
764 | return 0; | ||
765 | } | ||
766 | |||
767 | int mips_set_process_fp_mode(struct task_struct *task, unsigned int value) | ||
768 | { | ||
769 | const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE; | ||
770 | struct task_struct *t; | ||
771 | struct cpumask process_cpus; | ||
772 | int cpu; | ||
773 | |||
774 | /* If nothing to change, return right away, successfully. */ | ||
775 | if (value == mips_get_process_fp_mode(task)) | ||
776 | return 0; | ||
777 | |||
778 | /* Only accept a mode change if 64-bit FP enabled for o32. */ | ||
779 | if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT)) | ||
780 | return -EOPNOTSUPP; | ||
781 | |||
782 | /* And only for o32 tasks. */ | ||
783 | if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS)) | ||
784 | return -EOPNOTSUPP; | ||
785 | |||
786 | /* Check the value is valid */ | ||
787 | if (value & ~known_bits) | ||
788 | return -EOPNOTSUPP; | ||
789 | |||
790 | /* Setting FRE without FR is not supported. */ | ||
791 | if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE) | ||
792 | return -EOPNOTSUPP; | ||
793 | |||
794 | /* Avoid inadvertently triggering emulation */ | ||
795 | if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu && | ||
796 | !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64)) | ||
797 | return -EOPNOTSUPP; | ||
798 | if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre) | ||
799 | return -EOPNOTSUPP; | ||
800 | |||
801 | /* FR = 0 not supported in MIPS R6 */ | ||
802 | if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6) | ||
803 | return -EOPNOTSUPP; | ||
804 | |||
805 | /* Indicate the new FP mode in each thread */ | ||
806 | for_each_thread(task, t) { | ||
807 | /* Update desired FP register width */ | ||
808 | if (value & PR_FP_MODE_FR) { | ||
809 | clear_tsk_thread_flag(t, TIF_32BIT_FPREGS); | ||
810 | } else { | ||
811 | set_tsk_thread_flag(t, TIF_32BIT_FPREGS); | ||
812 | clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE); | ||
813 | } | ||
814 | |||
815 | /* Update desired FP single layout */ | ||
816 | if (value & PR_FP_MODE_FRE) | ||
817 | set_tsk_thread_flag(t, TIF_HYBRID_FPREGS); | ||
818 | else | ||
819 | clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS); | ||
820 | } | ||
821 | |||
822 | /* | ||
823 | * We need to ensure that all threads in the process have switched mode | ||
824 | * before returning, in order to allow userland to not worry about | ||
825 | * races. We can do this by forcing all CPUs that any thread in the | ||
826 | * process may be running on to schedule something else - in this case | ||
827 | * prepare_for_fp_mode_switch(). | ||
828 | * | ||
829 | * We begin by generating a mask of all CPUs that any thread in the | ||
830 | * process may be running on. | ||
831 | */ | ||
832 | cpumask_clear(&process_cpus); | ||
833 | for_each_thread(task, t) | ||
834 | cpumask_set_cpu(task_cpu(t), &process_cpus); | ||
835 | |||
836 | /* | ||
837 | * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs. | ||
838 | * | ||
839 | * The CPUs may have rescheduled already since we switched mode or | ||
840 | * generated the cpumask, but that doesn't matter. If the task in this | ||
841 | * process is scheduled out then our scheduling | ||
842 | * prepare_for_fp_mode_switch() will simply be redundant. If it's | ||
843 | * scheduled in then it will already have picked up the new FP mode | ||
844 | * whilst doing so. | ||
845 | */ | ||
846 | get_online_cpus(); | ||
847 | for_each_cpu_and(cpu, &process_cpus, cpu_online_mask) | ||
848 | work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL); | ||
849 | put_online_cpus(); | ||
850 | |||
851 | return 0; | ||
852 | } | ||
853 | |||
854 | #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) | ||
855 | void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs) | ||
856 | { | ||
857 | unsigned int i; | ||
858 | |||
859 | for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) { | ||
860 | /* k0/k1 are copied as zero. */ | ||
861 | if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27) | ||
862 | uregs[i] = 0; | ||
863 | else | ||
864 | uregs[i] = regs->regs[i - MIPS32_EF_R0]; | ||
865 | } | ||
866 | |||
867 | uregs[MIPS32_EF_LO] = regs->lo; | ||
868 | uregs[MIPS32_EF_HI] = regs->hi; | ||
869 | uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc; | ||
870 | uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr; | ||
871 | uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status; | ||
872 | uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause; | ||
873 | } | ||
874 | #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ | ||
875 | |||
876 | #ifdef CONFIG_64BIT | ||
877 | void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs) | ||
878 | { | ||
879 | unsigned int i; | ||
880 | |||
881 | for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) { | ||
882 | /* k0/k1 are copied as zero. */ | ||
883 | if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27) | ||
884 | uregs[i] = 0; | ||
885 | else | ||
886 | uregs[i] = regs->regs[i - MIPS64_EF_R0]; | ||
887 | } | ||
888 | |||
889 | uregs[MIPS64_EF_LO] = regs->lo; | ||
890 | uregs[MIPS64_EF_HI] = regs->hi; | ||
891 | uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc; | ||
892 | uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr; | ||
893 | uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status; | ||
894 | uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause; | ||
895 | } | ||
896 | #endif /* CONFIG_64BIT */ | ||
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c new file mode 100644 index 000000000..6abebd57b --- /dev/null +++ b/arch/mips/kernel/prom.c | |||
@@ -0,0 +1,67 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * MIPS support for CONFIG_OF device tree support | ||
4 | * | ||
5 | * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/export.h> | ||
10 | #include <linux/errno.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/memblock.h> | ||
13 | #include <linux/debugfs.h> | ||
14 | #include <linux/of.h> | ||
15 | #include <linux/of_fdt.h> | ||
16 | #include <linux/of_platform.h> | ||
17 | |||
18 | #include <asm/bootinfo.h> | ||
19 | #include <asm/page.h> | ||
20 | #include <asm/prom.h> | ||
21 | |||
22 | static char mips_machine_name[64] = "Unknown"; | ||
23 | |||
24 | __init void mips_set_machine_name(const char *name) | ||
25 | { | ||
26 | if (name == NULL) | ||
27 | return; | ||
28 | |||
29 | strlcpy(mips_machine_name, name, sizeof(mips_machine_name)); | ||
30 | pr_info("MIPS: machine is %s\n", mips_get_machine_name()); | ||
31 | } | ||
32 | |||
33 | char *mips_get_machine_name(void) | ||
34 | { | ||
35 | return mips_machine_name; | ||
36 | } | ||
37 | |||
38 | #ifdef CONFIG_USE_OF | ||
39 | |||
40 | void __init __dt_setup_arch(void *bph) | ||
41 | { | ||
42 | if (!early_init_dt_scan(bph)) | ||
43 | return; | ||
44 | |||
45 | mips_set_machine_name(of_flat_dt_get_machine_name()); | ||
46 | } | ||
47 | |||
48 | int __init __dt_register_buses(const char *bus0, const char *bus1) | ||
49 | { | ||
50 | static struct of_device_id of_ids[3]; | ||
51 | |||
52 | if (!of_have_populated_dt()) | ||
53 | panic("device tree not present"); | ||
54 | |||
55 | strlcpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible)); | ||
56 | if (bus1) { | ||
57 | strlcpy(of_ids[1].compatible, bus1, | ||
58 | sizeof(of_ids[1].compatible)); | ||
59 | } | ||
60 | |||
61 | if (of_platform_populate(NULL, of_ids, NULL, NULL)) | ||
62 | panic("failed to populate DT"); | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | #endif | ||
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c new file mode 100644 index 000000000..db7c5be1d --- /dev/null +++ b/arch/mips/kernel/ptrace.c | |||
@@ -0,0 +1,1382 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 Ross Biro | ||
7 | * Copyright (C) Linus Torvalds | ||
8 | * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle | ||
9 | * Copyright (C) 1996 David S. Miller | ||
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | ||
11 | * Copyright (C) 1999 MIPS Technologies, Inc. | ||
12 | * Copyright (C) 2000 Ulf Carlsson | ||
13 | * | ||
14 | * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit | ||
15 | * binaries. | ||
16 | */ | ||
17 | #include <linux/compiler.h> | ||
18 | #include <linux/context_tracking.h> | ||
19 | #include <linux/elf.h> | ||
20 | #include <linux/kernel.h> | ||
21 | #include <linux/sched.h> | ||
22 | #include <linux/sched/task_stack.h> | ||
23 | #include <linux/mm.h> | ||
24 | #include <linux/errno.h> | ||
25 | #include <linux/ptrace.h> | ||
26 | #include <linux/regset.h> | ||
27 | #include <linux/smp.h> | ||
28 | #include <linux/security.h> | ||
29 | #include <linux/stddef.h> | ||
30 | #include <linux/tracehook.h> | ||
31 | #include <linux/audit.h> | ||
32 | #include <linux/seccomp.h> | ||
33 | #include <linux/ftrace.h> | ||
34 | |||
35 | #include <asm/byteorder.h> | ||
36 | #include <asm/cpu.h> | ||
37 | #include <asm/cpu-info.h> | ||
38 | #include <asm/dsp.h> | ||
39 | #include <asm/fpu.h> | ||
40 | #include <asm/mipsregs.h> | ||
41 | #include <asm/mipsmtregs.h> | ||
42 | #include <asm/page.h> | ||
43 | #include <asm/processor.h> | ||
44 | #include <asm/syscall.h> | ||
45 | #include <linux/uaccess.h> | ||
46 | #include <asm/bootinfo.h> | ||
47 | #include <asm/reg.h> | ||
48 | |||
49 | #define CREATE_TRACE_POINTS | ||
50 | #include <trace/events/syscalls.h> | ||
51 | |||
52 | /* | ||
53 | * Called by kernel/ptrace.c when detaching.. | ||
54 | * | ||
55 | * Make sure single step bits etc are not set. | ||
56 | */ | ||
57 | void ptrace_disable(struct task_struct *child) | ||
58 | { | ||
59 | /* Don't load the watchpoint registers for the ex-child. */ | ||
60 | clear_tsk_thread_flag(child, TIF_LOAD_WATCH); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Read a general register set. We always use the 64-bit format, even | ||
65 | * for 32-bit kernels and for 32-bit processes on a 64-bit kernel. | ||
66 | * Registers are sign extended to fill the available space. | ||
67 | */ | ||
68 | int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data) | ||
69 | { | ||
70 | struct pt_regs *regs; | ||
71 | int i; | ||
72 | |||
73 | if (!access_ok(data, 38 * 8)) | ||
74 | return -EIO; | ||
75 | |||
76 | regs = task_pt_regs(child); | ||
77 | |||
78 | for (i = 0; i < 32; i++) | ||
79 | __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]); | ||
80 | __put_user((long)regs->lo, (__s64 __user *)&data->lo); | ||
81 | __put_user((long)regs->hi, (__s64 __user *)&data->hi); | ||
82 | __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc); | ||
83 | __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr); | ||
84 | __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status); | ||
85 | __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause); | ||
86 | |||
87 | return 0; | ||
88 | } | ||
89 | |||
90 | /* | ||
91 | * Write a general register set. As for PTRACE_GETREGS, we always use | ||
92 | * the 64-bit format. On a 32-bit kernel only the lower order half | ||
93 | * (according to endianness) will be used. | ||
94 | */ | ||
95 | int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data) | ||
96 | { | ||
97 | struct pt_regs *regs; | ||
98 | int i; | ||
99 | |||
100 | if (!access_ok(data, 38 * 8)) | ||
101 | return -EIO; | ||
102 | |||
103 | regs = task_pt_regs(child); | ||
104 | |||
105 | for (i = 0; i < 32; i++) | ||
106 | __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]); | ||
107 | __get_user(regs->lo, (__s64 __user *)&data->lo); | ||
108 | __get_user(regs->hi, (__s64 __user *)&data->hi); | ||
109 | __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc); | ||
110 | |||
111 | /* badvaddr, status, and cause may not be written. */ | ||
112 | |||
113 | /* System call number may have been changed */ | ||
114 | mips_syscall_update_nr(child, regs); | ||
115 | |||
116 | return 0; | ||
117 | } | ||
118 | |||
119 | int ptrace_get_watch_regs(struct task_struct *child, | ||
120 | struct pt_watch_regs __user *addr) | ||
121 | { | ||
122 | enum pt_watch_style style; | ||
123 | int i; | ||
124 | |||
125 | if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) | ||
126 | return -EIO; | ||
127 | if (!access_ok(addr, sizeof(struct pt_watch_regs))) | ||
128 | return -EIO; | ||
129 | |||
130 | #ifdef CONFIG_32BIT | ||
131 | style = pt_watch_style_mips32; | ||
132 | #define WATCH_STYLE mips32 | ||
133 | #else | ||
134 | style = pt_watch_style_mips64; | ||
135 | #define WATCH_STYLE mips64 | ||
136 | #endif | ||
137 | |||
138 | __put_user(style, &addr->style); | ||
139 | __put_user(boot_cpu_data.watch_reg_use_cnt, | ||
140 | &addr->WATCH_STYLE.num_valid); | ||
141 | for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { | ||
142 | __put_user(child->thread.watch.mips3264.watchlo[i], | ||
143 | &addr->WATCH_STYLE.watchlo[i]); | ||
144 | __put_user(child->thread.watch.mips3264.watchhi[i] & | ||
145 | (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW), | ||
146 | &addr->WATCH_STYLE.watchhi[i]); | ||
147 | __put_user(boot_cpu_data.watch_reg_masks[i], | ||
148 | &addr->WATCH_STYLE.watch_masks[i]); | ||
149 | } | ||
150 | for (; i < 8; i++) { | ||
151 | __put_user(0, &addr->WATCH_STYLE.watchlo[i]); | ||
152 | __put_user(0, &addr->WATCH_STYLE.watchhi[i]); | ||
153 | __put_user(0, &addr->WATCH_STYLE.watch_masks[i]); | ||
154 | } | ||
155 | |||
156 | return 0; | ||
157 | } | ||
158 | |||
159 | int ptrace_set_watch_regs(struct task_struct *child, | ||
160 | struct pt_watch_regs __user *addr) | ||
161 | { | ||
162 | int i; | ||
163 | int watch_active = 0; | ||
164 | unsigned long lt[NUM_WATCH_REGS]; | ||
165 | u16 ht[NUM_WATCH_REGS]; | ||
166 | |||
167 | if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0) | ||
168 | return -EIO; | ||
169 | if (!access_ok(addr, sizeof(struct pt_watch_regs))) | ||
170 | return -EIO; | ||
171 | /* Check the values. */ | ||
172 | for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { | ||
173 | __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]); | ||
174 | #ifdef CONFIG_32BIT | ||
175 | if (lt[i] & __UA_LIMIT) | ||
176 | return -EINVAL; | ||
177 | #else | ||
178 | if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) { | ||
179 | if (lt[i] & 0xffffffff80000000UL) | ||
180 | return -EINVAL; | ||
181 | } else { | ||
182 | if (lt[i] & __UA_LIMIT) | ||
183 | return -EINVAL; | ||
184 | } | ||
185 | #endif | ||
186 | __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]); | ||
187 | if (ht[i] & ~MIPS_WATCHHI_MASK) | ||
188 | return -EINVAL; | ||
189 | } | ||
190 | /* Install them. */ | ||
191 | for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) { | ||
192 | if (lt[i] & MIPS_WATCHLO_IRW) | ||
193 | watch_active = 1; | ||
194 | child->thread.watch.mips3264.watchlo[i] = lt[i]; | ||
195 | /* Set the G bit. */ | ||
196 | child->thread.watch.mips3264.watchhi[i] = ht[i]; | ||
197 | } | ||
198 | |||
199 | if (watch_active) | ||
200 | set_tsk_thread_flag(child, TIF_LOAD_WATCH); | ||
201 | else | ||
202 | clear_tsk_thread_flag(child, TIF_LOAD_WATCH); | ||
203 | |||
204 | return 0; | ||
205 | } | ||
206 | |||
207 | /* regset get/set implementations */ | ||
208 | |||
209 | #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) | ||
210 | |||
211 | static int gpr32_get(struct task_struct *target, | ||
212 | const struct user_regset *regset, | ||
213 | struct membuf to) | ||
214 | { | ||
215 | struct pt_regs *regs = task_pt_regs(target); | ||
216 | u32 uregs[ELF_NGREG] = {}; | ||
217 | |||
218 | mips_dump_regs32(uregs, regs); | ||
219 | return membuf_write(&to, uregs, sizeof(uregs)); | ||
220 | } | ||
221 | |||
222 | static int gpr32_set(struct task_struct *target, | ||
223 | const struct user_regset *regset, | ||
224 | unsigned int pos, unsigned int count, | ||
225 | const void *kbuf, const void __user *ubuf) | ||
226 | { | ||
227 | struct pt_regs *regs = task_pt_regs(target); | ||
228 | u32 uregs[ELF_NGREG]; | ||
229 | unsigned start, num_regs, i; | ||
230 | int err; | ||
231 | |||
232 | start = pos / sizeof(u32); | ||
233 | num_regs = count / sizeof(u32); | ||
234 | |||
235 | if (start + num_regs > ELF_NGREG) | ||
236 | return -EIO; | ||
237 | |||
238 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, | ||
239 | sizeof(uregs)); | ||
240 | if (err) | ||
241 | return err; | ||
242 | |||
243 | for (i = start; i < num_regs; i++) { | ||
244 | /* | ||
245 | * Cast all values to signed here so that if this is a 64-bit | ||
246 | * kernel, the supplied 32-bit values will be sign extended. | ||
247 | */ | ||
248 | switch (i) { | ||
249 | case MIPS32_EF_R1 ... MIPS32_EF_R25: | ||
250 | /* k0/k1 are ignored. */ | ||
251 | case MIPS32_EF_R28 ... MIPS32_EF_R31: | ||
252 | regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i]; | ||
253 | break; | ||
254 | case MIPS32_EF_LO: | ||
255 | regs->lo = (s32)uregs[i]; | ||
256 | break; | ||
257 | case MIPS32_EF_HI: | ||
258 | regs->hi = (s32)uregs[i]; | ||
259 | break; | ||
260 | case MIPS32_EF_CP0_EPC: | ||
261 | regs->cp0_epc = (s32)uregs[i]; | ||
262 | break; | ||
263 | } | ||
264 | } | ||
265 | |||
266 | /* System call number may have been changed */ | ||
267 | mips_syscall_update_nr(target, regs); | ||
268 | |||
269 | return 0; | ||
270 | } | ||
271 | |||
272 | #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ | ||
273 | |||
274 | #ifdef CONFIG_64BIT | ||
275 | |||
276 | static int gpr64_get(struct task_struct *target, | ||
277 | const struct user_regset *regset, | ||
278 | struct membuf to) | ||
279 | { | ||
280 | struct pt_regs *regs = task_pt_regs(target); | ||
281 | u64 uregs[ELF_NGREG] = {}; | ||
282 | |||
283 | mips_dump_regs64(uregs, regs); | ||
284 | return membuf_write(&to, uregs, sizeof(uregs)); | ||
285 | } | ||
286 | |||
287 | static int gpr64_set(struct task_struct *target, | ||
288 | const struct user_regset *regset, | ||
289 | unsigned int pos, unsigned int count, | ||
290 | const void *kbuf, const void __user *ubuf) | ||
291 | { | ||
292 | struct pt_regs *regs = task_pt_regs(target); | ||
293 | u64 uregs[ELF_NGREG]; | ||
294 | unsigned start, num_regs, i; | ||
295 | int err; | ||
296 | |||
297 | start = pos / sizeof(u64); | ||
298 | num_regs = count / sizeof(u64); | ||
299 | |||
300 | if (start + num_regs > ELF_NGREG) | ||
301 | return -EIO; | ||
302 | |||
303 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0, | ||
304 | sizeof(uregs)); | ||
305 | if (err) | ||
306 | return err; | ||
307 | |||
308 | for (i = start; i < num_regs; i++) { | ||
309 | switch (i) { | ||
310 | case MIPS64_EF_R1 ... MIPS64_EF_R25: | ||
311 | /* k0/k1 are ignored. */ | ||
312 | case MIPS64_EF_R28 ... MIPS64_EF_R31: | ||
313 | regs->regs[i - MIPS64_EF_R0] = uregs[i]; | ||
314 | break; | ||
315 | case MIPS64_EF_LO: | ||
316 | regs->lo = uregs[i]; | ||
317 | break; | ||
318 | case MIPS64_EF_HI: | ||
319 | regs->hi = uregs[i]; | ||
320 | break; | ||
321 | case MIPS64_EF_CP0_EPC: | ||
322 | regs->cp0_epc = uregs[i]; | ||
323 | break; | ||
324 | } | ||
325 | } | ||
326 | |||
327 | /* System call number may have been changed */ | ||
328 | mips_syscall_update_nr(target, regs); | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | #endif /* CONFIG_64BIT */ | ||
334 | |||
335 | |||
336 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
337 | |||
338 | /* | ||
339 | * Poke at FCSR according to its mask. Set the Cause bits even | ||
340 | * if a corresponding Enable bit is set. This will be noticed at | ||
341 | * the time the thread is switched to and SIGFPE thrown accordingly. | ||
342 | */ | ||
343 | static void ptrace_setfcr31(struct task_struct *child, u32 value) | ||
344 | { | ||
345 | u32 fcr31; | ||
346 | u32 mask; | ||
347 | |||
348 | fcr31 = child->thread.fpu.fcr31; | ||
349 | mask = boot_cpu_data.fpu_msk31; | ||
350 | child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask); | ||
351 | } | ||
352 | |||
353 | int ptrace_getfpregs(struct task_struct *child, __u32 __user *data) | ||
354 | { | ||
355 | int i; | ||
356 | |||
357 | if (!access_ok(data, 33 * 8)) | ||
358 | return -EIO; | ||
359 | |||
360 | if (tsk_used_math(child)) { | ||
361 | union fpureg *fregs = get_fpu_regs(child); | ||
362 | for (i = 0; i < 32; i++) | ||
363 | __put_user(get_fpr64(&fregs[i], 0), | ||
364 | i + (__u64 __user *)data); | ||
365 | } else { | ||
366 | for (i = 0; i < 32; i++) | ||
367 | __put_user((__u64) -1, i + (__u64 __user *) data); | ||
368 | } | ||
369 | |||
370 | __put_user(child->thread.fpu.fcr31, data + 64); | ||
371 | __put_user(boot_cpu_data.fpu_id, data + 65); | ||
372 | |||
373 | return 0; | ||
374 | } | ||
375 | |||
376 | int ptrace_setfpregs(struct task_struct *child, __u32 __user *data) | ||
377 | { | ||
378 | union fpureg *fregs; | ||
379 | u64 fpr_val; | ||
380 | u32 value; | ||
381 | int i; | ||
382 | |||
383 | if (!access_ok(data, 33 * 8)) | ||
384 | return -EIO; | ||
385 | |||
386 | init_fp_ctx(child); | ||
387 | fregs = get_fpu_regs(child); | ||
388 | |||
389 | for (i = 0; i < 32; i++) { | ||
390 | __get_user(fpr_val, i + (__u64 __user *)data); | ||
391 | set_fpr64(&fregs[i], 0, fpr_val); | ||
392 | } | ||
393 | |||
394 | __get_user(value, data + 64); | ||
395 | ptrace_setfcr31(child, value); | ||
396 | |||
397 | /* FIR may not be written. */ | ||
398 | |||
399 | return 0; | ||
400 | } | ||
401 | |||
402 | /* | ||
403 | * Copy the floating-point context to the supplied NT_PRFPREG buffer, | ||
404 | * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots | ||
405 | * correspond 1:1 to buffer slots. Only general registers are copied. | ||
406 | */ | ||
407 | static void fpr_get_fpa(struct task_struct *target, | ||
408 | struct membuf *to) | ||
409 | { | ||
410 | membuf_write(to, &target->thread.fpu, | ||
411 | NUM_FPU_REGS * sizeof(elf_fpreg_t)); | ||
412 | } | ||
413 | |||
414 | /* | ||
415 | * Copy the floating-point context to the supplied NT_PRFPREG buffer, | ||
416 | * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's | ||
417 | * general register slots are copied to buffer slots. Only general | ||
418 | * registers are copied. | ||
419 | */ | ||
420 | static void fpr_get_msa(struct task_struct *target, struct membuf *to) | ||
421 | { | ||
422 | unsigned int i; | ||
423 | |||
424 | BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t)); | ||
425 | for (i = 0; i < NUM_FPU_REGS; i++) | ||
426 | membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0)); | ||
427 | } | ||
428 | |||
429 | /* | ||
430 | * Copy the floating-point context to the supplied NT_PRFPREG buffer. | ||
431 | * Choose the appropriate helper for general registers, and then copy | ||
432 | * the FCSR and FIR registers separately. | ||
433 | */ | ||
434 | static int fpr_get(struct task_struct *target, | ||
435 | const struct user_regset *regset, | ||
436 | struct membuf to) | ||
437 | { | ||
438 | if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) | ||
439 | fpr_get_fpa(target, &to); | ||
440 | else | ||
441 | fpr_get_msa(target, &to); | ||
442 | |||
443 | membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32)); | ||
444 | membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32)); | ||
445 | return 0; | ||
446 | } | ||
447 | |||
448 | /* | ||
449 | * Copy the supplied NT_PRFPREG buffer to the floating-point context, | ||
450 | * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP | ||
451 | * context's general register slots. Only general registers are copied. | ||
452 | */ | ||
453 | static int fpr_set_fpa(struct task_struct *target, | ||
454 | unsigned int *pos, unsigned int *count, | ||
455 | const void **kbuf, const void __user **ubuf) | ||
456 | { | ||
457 | return user_regset_copyin(pos, count, kbuf, ubuf, | ||
458 | &target->thread.fpu, | ||
459 | 0, NUM_FPU_REGS * sizeof(elf_fpreg_t)); | ||
460 | } | ||
461 | |||
462 | /* | ||
463 | * Copy the supplied NT_PRFPREG buffer to the floating-point context, | ||
464 | * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64 | ||
465 | * bits only of FP context's general register slots. Only general | ||
466 | * registers are copied. | ||
467 | */ | ||
468 | static int fpr_set_msa(struct task_struct *target, | ||
469 | unsigned int *pos, unsigned int *count, | ||
470 | const void **kbuf, const void __user **ubuf) | ||
471 | { | ||
472 | unsigned int i; | ||
473 | u64 fpr_val; | ||
474 | int err; | ||
475 | |||
476 | BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t)); | ||
477 | for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) { | ||
478 | err = user_regset_copyin(pos, count, kbuf, ubuf, | ||
479 | &fpr_val, i * sizeof(elf_fpreg_t), | ||
480 | (i + 1) * sizeof(elf_fpreg_t)); | ||
481 | if (err) | ||
482 | return err; | ||
483 | set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val); | ||
484 | } | ||
485 | |||
486 | return 0; | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * Copy the supplied NT_PRFPREG buffer to the floating-point context. | ||
491 | * Choose the appropriate helper for general registers, and then copy | ||
492 | * the FCSR register separately. Ignore the incoming FIR register | ||
493 | * contents though, as the register is read-only. | ||
494 | * | ||
495 | * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0', | ||
496 | * which is supposed to have been guaranteed by the kernel before | ||
497 | * calling us, e.g. in `ptrace_regset'. We enforce that requirement, | ||
498 | * so that we can safely avoid preinitializing temporaries for | ||
499 | * partial register writes. | ||
500 | */ | ||
501 | static int fpr_set(struct task_struct *target, | ||
502 | const struct user_regset *regset, | ||
503 | unsigned int pos, unsigned int count, | ||
504 | const void *kbuf, const void __user *ubuf) | ||
505 | { | ||
506 | const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t); | ||
507 | const int fir_pos = fcr31_pos + sizeof(u32); | ||
508 | u32 fcr31; | ||
509 | int err; | ||
510 | |||
511 | BUG_ON(count % sizeof(elf_fpreg_t)); | ||
512 | |||
513 | if (pos + count > sizeof(elf_fpregset_t)) | ||
514 | return -EIO; | ||
515 | |||
516 | init_fp_ctx(target); | ||
517 | |||
518 | if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t)) | ||
519 | err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf); | ||
520 | else | ||
521 | err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf); | ||
522 | if (err) | ||
523 | return err; | ||
524 | |||
525 | if (count > 0) { | ||
526 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
527 | &fcr31, | ||
528 | fcr31_pos, fcr31_pos + sizeof(u32)); | ||
529 | if (err) | ||
530 | return err; | ||
531 | |||
532 | ptrace_setfcr31(target, fcr31); | ||
533 | } | ||
534 | |||
535 | if (count > 0) | ||
536 | err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, | ||
537 | fir_pos, | ||
538 | fir_pos + sizeof(u32)); | ||
539 | |||
540 | return err; | ||
541 | } | ||
542 | |||
543 | /* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */ | ||
544 | static int fp_mode_get(struct task_struct *target, | ||
545 | const struct user_regset *regset, | ||
546 | struct membuf to) | ||
547 | { | ||
548 | return membuf_store(&to, (int)mips_get_process_fp_mode(target)); | ||
549 | } | ||
550 | |||
551 | /* | ||
552 | * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting. | ||
553 | * | ||
554 | * We optimize for the case where `count % sizeof(int) == 0', which | ||
555 | * is supposed to have been guaranteed by the kernel before calling | ||
556 | * us, e.g. in `ptrace_regset'. We enforce that requirement, so | ||
557 | * that we can safely avoid preinitializing temporaries for partial | ||
558 | * mode writes. | ||
559 | */ | ||
560 | static int fp_mode_set(struct task_struct *target, | ||
561 | const struct user_regset *regset, | ||
562 | unsigned int pos, unsigned int count, | ||
563 | const void *kbuf, const void __user *ubuf) | ||
564 | { | ||
565 | int fp_mode; | ||
566 | int err; | ||
567 | |||
568 | BUG_ON(count % sizeof(int)); | ||
569 | |||
570 | if (pos + count > sizeof(fp_mode)) | ||
571 | return -EIO; | ||
572 | |||
573 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0, | ||
574 | sizeof(fp_mode)); | ||
575 | if (err) | ||
576 | return err; | ||
577 | |||
578 | if (count > 0) | ||
579 | err = mips_set_process_fp_mode(target, fp_mode); | ||
580 | |||
581 | return err; | ||
582 | } | ||
583 | |||
584 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
585 | |||
586 | #ifdef CONFIG_CPU_HAS_MSA | ||
587 | |||
588 | struct msa_control_regs { | ||
589 | unsigned int fir; | ||
590 | unsigned int fcsr; | ||
591 | unsigned int msair; | ||
592 | unsigned int msacsr; | ||
593 | }; | ||
594 | |||
595 | static void copy_pad_fprs(struct task_struct *target, | ||
596 | const struct user_regset *regset, | ||
597 | struct membuf *to, | ||
598 | unsigned int live_sz) | ||
599 | { | ||
600 | int i, j; | ||
601 | unsigned long long fill = ~0ull; | ||
602 | unsigned int cp_sz, pad_sz; | ||
603 | |||
604 | cp_sz = min(regset->size, live_sz); | ||
605 | pad_sz = regset->size - cp_sz; | ||
606 | WARN_ON(pad_sz % sizeof(fill)); | ||
607 | |||
608 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
609 | membuf_write(to, &target->thread.fpu.fpr[i], cp_sz); | ||
610 | for (j = 0; j < (pad_sz / sizeof(fill)); j++) | ||
611 | membuf_store(to, fill); | ||
612 | } | ||
613 | } | ||
614 | |||
615 | static int msa_get(struct task_struct *target, | ||
616 | const struct user_regset *regset, | ||
617 | struct membuf to) | ||
618 | { | ||
619 | const unsigned int wr_size = NUM_FPU_REGS * regset->size; | ||
620 | const struct msa_control_regs ctrl_regs = { | ||
621 | .fir = boot_cpu_data.fpu_id, | ||
622 | .fcsr = target->thread.fpu.fcr31, | ||
623 | .msair = boot_cpu_data.msa_id, | ||
624 | .msacsr = target->thread.fpu.msacsr, | ||
625 | }; | ||
626 | |||
627 | if (!tsk_used_math(target)) { | ||
628 | /* The task hasn't used FP or MSA, fill with 0xff */ | ||
629 | copy_pad_fprs(target, regset, &to, 0); | ||
630 | } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) { | ||
631 | /* Copy scalar FP context, fill the rest with 0xff */ | ||
632 | copy_pad_fprs(target, regset, &to, 8); | ||
633 | } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { | ||
634 | /* Trivially copy the vector registers */ | ||
635 | membuf_write(&to, &target->thread.fpu.fpr, wr_size); | ||
636 | } else { | ||
637 | /* Copy as much context as possible, fill the rest with 0xff */ | ||
638 | copy_pad_fprs(target, regset, &to, | ||
639 | sizeof(target->thread.fpu.fpr[0])); | ||
640 | } | ||
641 | |||
642 | return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs)); | ||
643 | } | ||
644 | |||
645 | static int msa_set(struct task_struct *target, | ||
646 | const struct user_regset *regset, | ||
647 | unsigned int pos, unsigned int count, | ||
648 | const void *kbuf, const void __user *ubuf) | ||
649 | { | ||
650 | const unsigned int wr_size = NUM_FPU_REGS * regset->size; | ||
651 | struct msa_control_regs ctrl_regs; | ||
652 | unsigned int cp_sz; | ||
653 | int i, err, start; | ||
654 | |||
655 | init_fp_ctx(target); | ||
656 | |||
657 | if (sizeof(target->thread.fpu.fpr[0]) == regset->size) { | ||
658 | /* Trivially copy the vector registers */ | ||
659 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
660 | &target->thread.fpu.fpr, | ||
661 | 0, wr_size); | ||
662 | } else { | ||
663 | /* Copy as much context as possible */ | ||
664 | cp_sz = min_t(unsigned int, regset->size, | ||
665 | sizeof(target->thread.fpu.fpr[0])); | ||
666 | |||
667 | i = start = err = 0; | ||
668 | for (; i < NUM_FPU_REGS; i++, start += regset->size) { | ||
669 | err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf, | ||
670 | &target->thread.fpu.fpr[i], | ||
671 | start, start + cp_sz); | ||
672 | } | ||
673 | } | ||
674 | |||
675 | if (!err) | ||
676 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs, | ||
677 | wr_size, wr_size + sizeof(ctrl_regs)); | ||
678 | if (!err) { | ||
679 | target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X; | ||
680 | target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF; | ||
681 | } | ||
682 | |||
683 | return err; | ||
684 | } | ||
685 | |||
686 | #endif /* CONFIG_CPU_HAS_MSA */ | ||
687 | |||
688 | #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) | ||
689 | |||
690 | /* | ||
691 | * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer. | ||
692 | */ | ||
693 | static int dsp32_get(struct task_struct *target, | ||
694 | const struct user_regset *regset, | ||
695 | struct membuf to) | ||
696 | { | ||
697 | u32 dspregs[NUM_DSP_REGS + 1]; | ||
698 | unsigned int i; | ||
699 | |||
700 | BUG_ON(to.left % sizeof(u32)); | ||
701 | |||
702 | if (!cpu_has_dsp) | ||
703 | return -EIO; | ||
704 | |||
705 | for (i = 0; i < NUM_DSP_REGS; i++) | ||
706 | dspregs[i] = target->thread.dsp.dspr[i]; | ||
707 | dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; | ||
708 | return membuf_write(&to, dspregs, sizeof(dspregs)); | ||
709 | } | ||
710 | |||
711 | /* | ||
712 | * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context. | ||
713 | */ | ||
714 | static int dsp32_set(struct task_struct *target, | ||
715 | const struct user_regset *regset, | ||
716 | unsigned int pos, unsigned int count, | ||
717 | const void *kbuf, const void __user *ubuf) | ||
718 | { | ||
719 | unsigned int start, num_regs, i; | ||
720 | u32 dspregs[NUM_DSP_REGS + 1]; | ||
721 | int err; | ||
722 | |||
723 | BUG_ON(count % sizeof(u32)); | ||
724 | |||
725 | if (!cpu_has_dsp) | ||
726 | return -EIO; | ||
727 | |||
728 | start = pos / sizeof(u32); | ||
729 | num_regs = count / sizeof(u32); | ||
730 | |||
731 | if (start + num_regs > NUM_DSP_REGS + 1) | ||
732 | return -EIO; | ||
733 | |||
734 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, | ||
735 | sizeof(dspregs)); | ||
736 | if (err) | ||
737 | return err; | ||
738 | |||
739 | for (i = start; i < num_regs; i++) | ||
740 | switch (i) { | ||
741 | case 0 ... NUM_DSP_REGS - 1: | ||
742 | target->thread.dsp.dspr[i] = (s32)dspregs[i]; | ||
743 | break; | ||
744 | case NUM_DSP_REGS: | ||
745 | target->thread.dsp.dspcontrol = (s32)dspregs[i]; | ||
746 | break; | ||
747 | } | ||
748 | |||
749 | return 0; | ||
750 | } | ||
751 | |||
752 | #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ | ||
753 | |||
754 | #ifdef CONFIG_64BIT | ||
755 | |||
756 | /* | ||
757 | * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer. | ||
758 | */ | ||
759 | static int dsp64_get(struct task_struct *target, | ||
760 | const struct user_regset *regset, | ||
761 | struct membuf to) | ||
762 | { | ||
763 | u64 dspregs[NUM_DSP_REGS + 1]; | ||
764 | unsigned int i; | ||
765 | |||
766 | BUG_ON(to.left % sizeof(u64)); | ||
767 | |||
768 | if (!cpu_has_dsp) | ||
769 | return -EIO; | ||
770 | |||
771 | for (i = 0; i < NUM_DSP_REGS; i++) | ||
772 | dspregs[i] = target->thread.dsp.dspr[i]; | ||
773 | dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol; | ||
774 | return membuf_write(&to, dspregs, sizeof(dspregs)); | ||
775 | } | ||
776 | |||
777 | /* | ||
778 | * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context. | ||
779 | */ | ||
780 | static int dsp64_set(struct task_struct *target, | ||
781 | const struct user_regset *regset, | ||
782 | unsigned int pos, unsigned int count, | ||
783 | const void *kbuf, const void __user *ubuf) | ||
784 | { | ||
785 | unsigned int start, num_regs, i; | ||
786 | u64 dspregs[NUM_DSP_REGS + 1]; | ||
787 | int err; | ||
788 | |||
789 | BUG_ON(count % sizeof(u64)); | ||
790 | |||
791 | if (!cpu_has_dsp) | ||
792 | return -EIO; | ||
793 | |||
794 | start = pos / sizeof(u64); | ||
795 | num_regs = count / sizeof(u64); | ||
796 | |||
797 | if (start + num_regs > NUM_DSP_REGS + 1) | ||
798 | return -EIO; | ||
799 | |||
800 | err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0, | ||
801 | sizeof(dspregs)); | ||
802 | if (err) | ||
803 | return err; | ||
804 | |||
805 | for (i = start; i < num_regs; i++) | ||
806 | switch (i) { | ||
807 | case 0 ... NUM_DSP_REGS - 1: | ||
808 | target->thread.dsp.dspr[i] = dspregs[i]; | ||
809 | break; | ||
810 | case NUM_DSP_REGS: | ||
811 | target->thread.dsp.dspcontrol = dspregs[i]; | ||
812 | break; | ||
813 | } | ||
814 | |||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | #endif /* CONFIG_64BIT */ | ||
819 | |||
820 | /* | ||
821 | * Determine whether the DSP context is present. | ||
822 | */ | ||
823 | static int dsp_active(struct task_struct *target, | ||
824 | const struct user_regset *regset) | ||
825 | { | ||
826 | return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV; | ||
827 | } | ||
828 | |||
829 | enum mips_regset { | ||
830 | REGSET_GPR, | ||
831 | REGSET_DSP, | ||
832 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
833 | REGSET_FPR, | ||
834 | REGSET_FP_MODE, | ||
835 | #endif | ||
836 | #ifdef CONFIG_CPU_HAS_MSA | ||
837 | REGSET_MSA, | ||
838 | #endif | ||
839 | }; | ||
840 | |||
841 | struct pt_regs_offset { | ||
842 | const char *name; | ||
843 | int offset; | ||
844 | }; | ||
845 | |||
846 | #define REG_OFFSET_NAME(reg, r) { \ | ||
847 | .name = #reg, \ | ||
848 | .offset = offsetof(struct pt_regs, r) \ | ||
849 | } | ||
850 | |||
851 | #define REG_OFFSET_END { \ | ||
852 | .name = NULL, \ | ||
853 | .offset = 0 \ | ||
854 | } | ||
855 | |||
856 | static const struct pt_regs_offset regoffset_table[] = { | ||
857 | REG_OFFSET_NAME(r0, regs[0]), | ||
858 | REG_OFFSET_NAME(r1, regs[1]), | ||
859 | REG_OFFSET_NAME(r2, regs[2]), | ||
860 | REG_OFFSET_NAME(r3, regs[3]), | ||
861 | REG_OFFSET_NAME(r4, regs[4]), | ||
862 | REG_OFFSET_NAME(r5, regs[5]), | ||
863 | REG_OFFSET_NAME(r6, regs[6]), | ||
864 | REG_OFFSET_NAME(r7, regs[7]), | ||
865 | REG_OFFSET_NAME(r8, regs[8]), | ||
866 | REG_OFFSET_NAME(r9, regs[9]), | ||
867 | REG_OFFSET_NAME(r10, regs[10]), | ||
868 | REG_OFFSET_NAME(r11, regs[11]), | ||
869 | REG_OFFSET_NAME(r12, regs[12]), | ||
870 | REG_OFFSET_NAME(r13, regs[13]), | ||
871 | REG_OFFSET_NAME(r14, regs[14]), | ||
872 | REG_OFFSET_NAME(r15, regs[15]), | ||
873 | REG_OFFSET_NAME(r16, regs[16]), | ||
874 | REG_OFFSET_NAME(r17, regs[17]), | ||
875 | REG_OFFSET_NAME(r18, regs[18]), | ||
876 | REG_OFFSET_NAME(r19, regs[19]), | ||
877 | REG_OFFSET_NAME(r20, regs[20]), | ||
878 | REG_OFFSET_NAME(r21, regs[21]), | ||
879 | REG_OFFSET_NAME(r22, regs[22]), | ||
880 | REG_OFFSET_NAME(r23, regs[23]), | ||
881 | REG_OFFSET_NAME(r24, regs[24]), | ||
882 | REG_OFFSET_NAME(r25, regs[25]), | ||
883 | REG_OFFSET_NAME(r26, regs[26]), | ||
884 | REG_OFFSET_NAME(r27, regs[27]), | ||
885 | REG_OFFSET_NAME(r28, regs[28]), | ||
886 | REG_OFFSET_NAME(r29, regs[29]), | ||
887 | REG_OFFSET_NAME(r30, regs[30]), | ||
888 | REG_OFFSET_NAME(r31, regs[31]), | ||
889 | REG_OFFSET_NAME(c0_status, cp0_status), | ||
890 | REG_OFFSET_NAME(hi, hi), | ||
891 | REG_OFFSET_NAME(lo, lo), | ||
892 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
893 | REG_OFFSET_NAME(acx, acx), | ||
894 | #endif | ||
895 | REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr), | ||
896 | REG_OFFSET_NAME(c0_cause, cp0_cause), | ||
897 | REG_OFFSET_NAME(c0_epc, cp0_epc), | ||
898 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
899 | REG_OFFSET_NAME(mpl0, mpl[0]), | ||
900 | REG_OFFSET_NAME(mpl1, mpl[1]), | ||
901 | REG_OFFSET_NAME(mpl2, mpl[2]), | ||
902 | REG_OFFSET_NAME(mtp0, mtp[0]), | ||
903 | REG_OFFSET_NAME(mtp1, mtp[1]), | ||
904 | REG_OFFSET_NAME(mtp2, mtp[2]), | ||
905 | #endif | ||
906 | REG_OFFSET_END, | ||
907 | }; | ||
908 | |||
909 | /** | ||
910 | * regs_query_register_offset() - query register offset from its name | ||
911 | * @name: the name of a register | ||
912 | * | ||
913 | * regs_query_register_offset() returns the offset of a register in struct | ||
914 | * pt_regs from its name. If the name is invalid, this returns -EINVAL; | ||
915 | */ | ||
916 | int regs_query_register_offset(const char *name) | ||
917 | { | ||
918 | const struct pt_regs_offset *roff; | ||
919 | for (roff = regoffset_table; roff->name != NULL; roff++) | ||
920 | if (!strcmp(roff->name, name)) | ||
921 | return roff->offset; | ||
922 | return -EINVAL; | ||
923 | } | ||
924 | |||
925 | #if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32) | ||
926 | |||
927 | static const struct user_regset mips_regsets[] = { | ||
928 | [REGSET_GPR] = { | ||
929 | .core_note_type = NT_PRSTATUS, | ||
930 | .n = ELF_NGREG, | ||
931 | .size = sizeof(unsigned int), | ||
932 | .align = sizeof(unsigned int), | ||
933 | .regset_get = gpr32_get, | ||
934 | .set = gpr32_set, | ||
935 | }, | ||
936 | [REGSET_DSP] = { | ||
937 | .core_note_type = NT_MIPS_DSP, | ||
938 | .n = NUM_DSP_REGS + 1, | ||
939 | .size = sizeof(u32), | ||
940 | .align = sizeof(u32), | ||
941 | .regset_get = dsp32_get, | ||
942 | .set = dsp32_set, | ||
943 | .active = dsp_active, | ||
944 | }, | ||
945 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
946 | [REGSET_FPR] = { | ||
947 | .core_note_type = NT_PRFPREG, | ||
948 | .n = ELF_NFPREG, | ||
949 | .size = sizeof(elf_fpreg_t), | ||
950 | .align = sizeof(elf_fpreg_t), | ||
951 | .regset_get = fpr_get, | ||
952 | .set = fpr_set, | ||
953 | }, | ||
954 | [REGSET_FP_MODE] = { | ||
955 | .core_note_type = NT_MIPS_FP_MODE, | ||
956 | .n = 1, | ||
957 | .size = sizeof(int), | ||
958 | .align = sizeof(int), | ||
959 | .regset_get = fp_mode_get, | ||
960 | .set = fp_mode_set, | ||
961 | }, | ||
962 | #endif | ||
963 | #ifdef CONFIG_CPU_HAS_MSA | ||
964 | [REGSET_MSA] = { | ||
965 | .core_note_type = NT_MIPS_MSA, | ||
966 | .n = NUM_FPU_REGS + 1, | ||
967 | .size = 16, | ||
968 | .align = 16, | ||
969 | .regset_get = msa_get, | ||
970 | .set = msa_set, | ||
971 | }, | ||
972 | #endif | ||
973 | }; | ||
974 | |||
975 | static const struct user_regset_view user_mips_view = { | ||
976 | .name = "mips", | ||
977 | .e_machine = ELF_ARCH, | ||
978 | .ei_osabi = ELF_OSABI, | ||
979 | .regsets = mips_regsets, | ||
980 | .n = ARRAY_SIZE(mips_regsets), | ||
981 | }; | ||
982 | |||
983 | #endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */ | ||
984 | |||
985 | #ifdef CONFIG_64BIT | ||
986 | |||
987 | static const struct user_regset mips64_regsets[] = { | ||
988 | [REGSET_GPR] = { | ||
989 | .core_note_type = NT_PRSTATUS, | ||
990 | .n = ELF_NGREG, | ||
991 | .size = sizeof(unsigned long), | ||
992 | .align = sizeof(unsigned long), | ||
993 | .regset_get = gpr64_get, | ||
994 | .set = gpr64_set, | ||
995 | }, | ||
996 | [REGSET_DSP] = { | ||
997 | .core_note_type = NT_MIPS_DSP, | ||
998 | .n = NUM_DSP_REGS + 1, | ||
999 | .size = sizeof(u64), | ||
1000 | .align = sizeof(u64), | ||
1001 | .regset_get = dsp64_get, | ||
1002 | .set = dsp64_set, | ||
1003 | .active = dsp_active, | ||
1004 | }, | ||
1005 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
1006 | [REGSET_FP_MODE] = { | ||
1007 | .core_note_type = NT_MIPS_FP_MODE, | ||
1008 | .n = 1, | ||
1009 | .size = sizeof(int), | ||
1010 | .align = sizeof(int), | ||
1011 | .regset_get = fp_mode_get, | ||
1012 | .set = fp_mode_set, | ||
1013 | }, | ||
1014 | [REGSET_FPR] = { | ||
1015 | .core_note_type = NT_PRFPREG, | ||
1016 | .n = ELF_NFPREG, | ||
1017 | .size = sizeof(elf_fpreg_t), | ||
1018 | .align = sizeof(elf_fpreg_t), | ||
1019 | .regset_get = fpr_get, | ||
1020 | .set = fpr_set, | ||
1021 | }, | ||
1022 | #endif | ||
1023 | #ifdef CONFIG_CPU_HAS_MSA | ||
1024 | [REGSET_MSA] = { | ||
1025 | .core_note_type = NT_MIPS_MSA, | ||
1026 | .n = NUM_FPU_REGS + 1, | ||
1027 | .size = 16, | ||
1028 | .align = 16, | ||
1029 | .regset_get = msa_get, | ||
1030 | .set = msa_set, | ||
1031 | }, | ||
1032 | #endif | ||
1033 | }; | ||
1034 | |||
1035 | static const struct user_regset_view user_mips64_view = { | ||
1036 | .name = "mips64", | ||
1037 | .e_machine = ELF_ARCH, | ||
1038 | .ei_osabi = ELF_OSABI, | ||
1039 | .regsets = mips64_regsets, | ||
1040 | .n = ARRAY_SIZE(mips64_regsets), | ||
1041 | }; | ||
1042 | |||
1043 | #ifdef CONFIG_MIPS32_N32 | ||
1044 | |||
1045 | static const struct user_regset_view user_mipsn32_view = { | ||
1046 | .name = "mipsn32", | ||
1047 | .e_flags = EF_MIPS_ABI2, | ||
1048 | .e_machine = ELF_ARCH, | ||
1049 | .ei_osabi = ELF_OSABI, | ||
1050 | .regsets = mips64_regsets, | ||
1051 | .n = ARRAY_SIZE(mips64_regsets), | ||
1052 | }; | ||
1053 | |||
1054 | #endif /* CONFIG_MIPS32_N32 */ | ||
1055 | |||
1056 | #endif /* CONFIG_64BIT */ | ||
1057 | |||
1058 | const struct user_regset_view *task_user_regset_view(struct task_struct *task) | ||
1059 | { | ||
1060 | #ifdef CONFIG_32BIT | ||
1061 | return &user_mips_view; | ||
1062 | #else | ||
1063 | #ifdef CONFIG_MIPS32_O32 | ||
1064 | if (test_tsk_thread_flag(task, TIF_32BIT_REGS)) | ||
1065 | return &user_mips_view; | ||
1066 | #endif | ||
1067 | #ifdef CONFIG_MIPS32_N32 | ||
1068 | if (test_tsk_thread_flag(task, TIF_32BIT_ADDR)) | ||
1069 | return &user_mipsn32_view; | ||
1070 | #endif | ||
1071 | return &user_mips64_view; | ||
1072 | #endif | ||
1073 | } | ||
1074 | |||
1075 | long arch_ptrace(struct task_struct *child, long request, | ||
1076 | unsigned long addr, unsigned long data) | ||
1077 | { | ||
1078 | int ret; | ||
1079 | void __user *addrp = (void __user *) addr; | ||
1080 | void __user *datavp = (void __user *) data; | ||
1081 | unsigned long __user *datalp = (void __user *) data; | ||
1082 | |||
1083 | switch (request) { | ||
1084 | /* when I and D space are separate, these will need to be fixed. */ | ||
1085 | case PTRACE_PEEKTEXT: /* read word at location addr. */ | ||
1086 | case PTRACE_PEEKDATA: | ||
1087 | ret = generic_ptrace_peekdata(child, addr, data); | ||
1088 | break; | ||
1089 | |||
1090 | /* Read the word at location addr in the USER area. */ | ||
1091 | case PTRACE_PEEKUSR: { | ||
1092 | struct pt_regs *regs; | ||
1093 | unsigned long tmp = 0; | ||
1094 | |||
1095 | regs = task_pt_regs(child); | ||
1096 | ret = 0; /* Default return value. */ | ||
1097 | |||
1098 | switch (addr) { | ||
1099 | case 0 ... 31: | ||
1100 | tmp = regs->regs[addr]; | ||
1101 | break; | ||
1102 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
1103 | case FPR_BASE ... FPR_BASE + 31: { | ||
1104 | union fpureg *fregs; | ||
1105 | |||
1106 | if (!tsk_used_math(child)) { | ||
1107 | /* FP not yet used */ | ||
1108 | tmp = -1; | ||
1109 | break; | ||
1110 | } | ||
1111 | fregs = get_fpu_regs(child); | ||
1112 | |||
1113 | #ifdef CONFIG_32BIT | ||
1114 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { | ||
1115 | /* | ||
1116 | * The odd registers are actually the high | ||
1117 | * order bits of the values stored in the even | ||
1118 | * registers. | ||
1119 | */ | ||
1120 | tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], | ||
1121 | addr & 1); | ||
1122 | break; | ||
1123 | } | ||
1124 | #endif | ||
1125 | tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); | ||
1126 | break; | ||
1127 | } | ||
1128 | case FPC_CSR: | ||
1129 | tmp = child->thread.fpu.fcr31; | ||
1130 | break; | ||
1131 | case FPC_EIR: | ||
1132 | /* implementation / version register */ | ||
1133 | tmp = boot_cpu_data.fpu_id; | ||
1134 | break; | ||
1135 | #endif | ||
1136 | case PC: | ||
1137 | tmp = regs->cp0_epc; | ||
1138 | break; | ||
1139 | case CAUSE: | ||
1140 | tmp = regs->cp0_cause; | ||
1141 | break; | ||
1142 | case BADVADDR: | ||
1143 | tmp = regs->cp0_badvaddr; | ||
1144 | break; | ||
1145 | case MMHI: | ||
1146 | tmp = regs->hi; | ||
1147 | break; | ||
1148 | case MMLO: | ||
1149 | tmp = regs->lo; | ||
1150 | break; | ||
1151 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
1152 | case ACX: | ||
1153 | tmp = regs->acx; | ||
1154 | break; | ||
1155 | #endif | ||
1156 | case DSP_BASE ... DSP_BASE + 5: { | ||
1157 | dspreg_t *dregs; | ||
1158 | |||
1159 | if (!cpu_has_dsp) { | ||
1160 | tmp = 0; | ||
1161 | ret = -EIO; | ||
1162 | goto out; | ||
1163 | } | ||
1164 | dregs = __get_dsp_regs(child); | ||
1165 | tmp = dregs[addr - DSP_BASE]; | ||
1166 | break; | ||
1167 | } | ||
1168 | case DSP_CONTROL: | ||
1169 | if (!cpu_has_dsp) { | ||
1170 | tmp = 0; | ||
1171 | ret = -EIO; | ||
1172 | goto out; | ||
1173 | } | ||
1174 | tmp = child->thread.dsp.dspcontrol; | ||
1175 | break; | ||
1176 | default: | ||
1177 | tmp = 0; | ||
1178 | ret = -EIO; | ||
1179 | goto out; | ||
1180 | } | ||
1181 | ret = put_user(tmp, datalp); | ||
1182 | break; | ||
1183 | } | ||
1184 | |||
1185 | /* when I and D space are separate, this will have to be fixed. */ | ||
1186 | case PTRACE_POKETEXT: /* write the word at location addr. */ | ||
1187 | case PTRACE_POKEDATA: | ||
1188 | ret = generic_ptrace_pokedata(child, addr, data); | ||
1189 | break; | ||
1190 | |||
1191 | case PTRACE_POKEUSR: { | ||
1192 | struct pt_regs *regs; | ||
1193 | ret = 0; | ||
1194 | regs = task_pt_regs(child); | ||
1195 | |||
1196 | switch (addr) { | ||
1197 | case 0 ... 31: | ||
1198 | regs->regs[addr] = data; | ||
1199 | /* System call number may have been changed */ | ||
1200 | if (addr == 2) | ||
1201 | mips_syscall_update_nr(child, regs); | ||
1202 | else if (addr == 4 && | ||
1203 | mips_syscall_is_indirect(child, regs)) | ||
1204 | mips_syscall_update_nr(child, regs); | ||
1205 | break; | ||
1206 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
1207 | case FPR_BASE ... FPR_BASE + 31: { | ||
1208 | union fpureg *fregs = get_fpu_regs(child); | ||
1209 | |||
1210 | init_fp_ctx(child); | ||
1211 | #ifdef CONFIG_32BIT | ||
1212 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { | ||
1213 | /* | ||
1214 | * The odd registers are actually the high | ||
1215 | * order bits of the values stored in the even | ||
1216 | * registers. | ||
1217 | */ | ||
1218 | set_fpr32(&fregs[(addr & ~1) - FPR_BASE], | ||
1219 | addr & 1, data); | ||
1220 | break; | ||
1221 | } | ||
1222 | #endif | ||
1223 | set_fpr64(&fregs[addr - FPR_BASE], 0, data); | ||
1224 | break; | ||
1225 | } | ||
1226 | case FPC_CSR: | ||
1227 | init_fp_ctx(child); | ||
1228 | ptrace_setfcr31(child, data); | ||
1229 | break; | ||
1230 | #endif | ||
1231 | case PC: | ||
1232 | regs->cp0_epc = data; | ||
1233 | break; | ||
1234 | case MMHI: | ||
1235 | regs->hi = data; | ||
1236 | break; | ||
1237 | case MMLO: | ||
1238 | regs->lo = data; | ||
1239 | break; | ||
1240 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
1241 | case ACX: | ||
1242 | regs->acx = data; | ||
1243 | break; | ||
1244 | #endif | ||
1245 | case DSP_BASE ... DSP_BASE + 5: { | ||
1246 | dspreg_t *dregs; | ||
1247 | |||
1248 | if (!cpu_has_dsp) { | ||
1249 | ret = -EIO; | ||
1250 | break; | ||
1251 | } | ||
1252 | |||
1253 | dregs = __get_dsp_regs(child); | ||
1254 | dregs[addr - DSP_BASE] = data; | ||
1255 | break; | ||
1256 | } | ||
1257 | case DSP_CONTROL: | ||
1258 | if (!cpu_has_dsp) { | ||
1259 | ret = -EIO; | ||
1260 | break; | ||
1261 | } | ||
1262 | child->thread.dsp.dspcontrol = data; | ||
1263 | break; | ||
1264 | default: | ||
1265 | /* The rest are not allowed. */ | ||
1266 | ret = -EIO; | ||
1267 | break; | ||
1268 | } | ||
1269 | break; | ||
1270 | } | ||
1271 | |||
1272 | case PTRACE_GETREGS: | ||
1273 | ret = ptrace_getregs(child, datavp); | ||
1274 | break; | ||
1275 | |||
1276 | case PTRACE_SETREGS: | ||
1277 | ret = ptrace_setregs(child, datavp); | ||
1278 | break; | ||
1279 | |||
1280 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
1281 | case PTRACE_GETFPREGS: | ||
1282 | ret = ptrace_getfpregs(child, datavp); | ||
1283 | break; | ||
1284 | |||
1285 | case PTRACE_SETFPREGS: | ||
1286 | ret = ptrace_setfpregs(child, datavp); | ||
1287 | break; | ||
1288 | #endif | ||
1289 | case PTRACE_GET_THREAD_AREA: | ||
1290 | ret = put_user(task_thread_info(child)->tp_value, datalp); | ||
1291 | break; | ||
1292 | |||
1293 | case PTRACE_GET_WATCH_REGS: | ||
1294 | ret = ptrace_get_watch_regs(child, addrp); | ||
1295 | break; | ||
1296 | |||
1297 | case PTRACE_SET_WATCH_REGS: | ||
1298 | ret = ptrace_set_watch_regs(child, addrp); | ||
1299 | break; | ||
1300 | |||
1301 | default: | ||
1302 | ret = ptrace_request(child, request, addr, data); | ||
1303 | break; | ||
1304 | } | ||
1305 | out: | ||
1306 | return ret; | ||
1307 | } | ||
1308 | |||
1309 | /* | ||
1310 | * Notification of system call entry/exit | ||
1311 | * - triggered by current->work.syscall_trace | ||
1312 | */ | ||
1313 | asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall) | ||
1314 | { | ||
1315 | user_exit(); | ||
1316 | |||
1317 | current_thread_info()->syscall = syscall; | ||
1318 | |||
1319 | if (test_thread_flag(TIF_SYSCALL_TRACE)) { | ||
1320 | if (tracehook_report_syscall_entry(regs)) | ||
1321 | return -1; | ||
1322 | syscall = current_thread_info()->syscall; | ||
1323 | } | ||
1324 | |||
1325 | #ifdef CONFIG_SECCOMP | ||
1326 | if (unlikely(test_thread_flag(TIF_SECCOMP))) { | ||
1327 | int ret, i; | ||
1328 | struct seccomp_data sd; | ||
1329 | unsigned long args[6]; | ||
1330 | |||
1331 | sd.nr = syscall; | ||
1332 | sd.arch = syscall_get_arch(current); | ||
1333 | syscall_get_arguments(current, regs, args); | ||
1334 | for (i = 0; i < 6; i++) | ||
1335 | sd.args[i] = args[i]; | ||
1336 | sd.instruction_pointer = KSTK_EIP(current); | ||
1337 | |||
1338 | ret = __secure_computing(&sd); | ||
1339 | if (ret == -1) | ||
1340 | return ret; | ||
1341 | syscall = current_thread_info()->syscall; | ||
1342 | } | ||
1343 | #endif | ||
1344 | |||
1345 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1346 | trace_sys_enter(regs, regs->regs[2]); | ||
1347 | |||
1348 | audit_syscall_entry(syscall, regs->regs[4], regs->regs[5], | ||
1349 | regs->regs[6], regs->regs[7]); | ||
1350 | |||
1351 | /* | ||
1352 | * Negative syscall numbers are mistaken for rejected syscalls, but | ||
1353 | * won't have had the return value set appropriately, so we do so now. | ||
1354 | */ | ||
1355 | if (syscall < 0) | ||
1356 | syscall_set_return_value(current, regs, -ENOSYS, 0); | ||
1357 | return syscall; | ||
1358 | } | ||
1359 | |||
1360 | /* | ||
1361 | * Notification of system call entry/exit | ||
1362 | * - triggered by current->work.syscall_trace | ||
1363 | */ | ||
1364 | asmlinkage void syscall_trace_leave(struct pt_regs *regs) | ||
1365 | { | ||
1366 | /* | ||
1367 | * We may come here right after calling schedule_user() | ||
1368 | * or do_notify_resume(), in which case we can be in RCU | ||
1369 | * user mode. | ||
1370 | */ | ||
1371 | user_exit(); | ||
1372 | |||
1373 | audit_syscall_exit(regs); | ||
1374 | |||
1375 | if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT))) | ||
1376 | trace_sys_exit(regs, regs_return_value(regs)); | ||
1377 | |||
1378 | if (test_thread_flag(TIF_SYSCALL_TRACE)) | ||
1379 | tracehook_report_syscall_exit(regs, 0); | ||
1380 | |||
1381 | user_enter(); | ||
1382 | } | ||
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c new file mode 100644 index 000000000..afcf27a87 --- /dev/null +++ b/arch/mips/kernel/ptrace32.c | |||
@@ -0,0 +1,317 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1992 Ross Biro | ||
7 | * Copyright (C) Linus Torvalds | ||
8 | * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle | ||
9 | * Copyright (C) 1996 David S. Miller | ||
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | ||
11 | * Copyright (C) 1999 MIPS Technologies, Inc. | ||
12 | * Copyright (C) 2000 Ulf Carlsson | ||
13 | * | ||
14 | * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit | ||
15 | * binaries. | ||
16 | */ | ||
17 | #include <linux/compiler.h> | ||
18 | #include <linux/compat.h> | ||
19 | #include <linux/kernel.h> | ||
20 | #include <linux/sched.h> | ||
21 | #include <linux/sched/task_stack.h> | ||
22 | #include <linux/mm.h> | ||
23 | #include <linux/errno.h> | ||
24 | #include <linux/ptrace.h> | ||
25 | #include <linux/smp.h> | ||
26 | #include <linux/security.h> | ||
27 | |||
28 | #include <asm/cpu.h> | ||
29 | #include <asm/dsp.h> | ||
30 | #include <asm/fpu.h> | ||
31 | #include <asm/mipsregs.h> | ||
32 | #include <asm/mipsmtregs.h> | ||
33 | #include <asm/page.h> | ||
34 | #include <asm/reg.h> | ||
35 | #include <asm/syscall.h> | ||
36 | #include <linux/uaccess.h> | ||
37 | #include <asm/bootinfo.h> | ||
38 | |||
39 | /* | ||
40 | * Tracing a 32-bit process with a 64-bit strace and vice versa will not | ||
41 | * work. I don't know how to fix this. | ||
42 | */ | ||
43 | long compat_arch_ptrace(struct task_struct *child, compat_long_t request, | ||
44 | compat_ulong_t caddr, compat_ulong_t cdata) | ||
45 | { | ||
46 | int addr = caddr; | ||
47 | int data = cdata; | ||
48 | int ret; | ||
49 | |||
50 | switch (request) { | ||
51 | |||
52 | /* | ||
53 | * Read 4 bytes of the other process' storage | ||
54 | * data is a pointer specifying where the user wants the | ||
55 | * 4 bytes copied into | ||
56 | * addr is a pointer in the user's storage that contains an 8 byte | ||
57 | * address in the other process of the 4 bytes that is to be read | ||
58 | * (this is run in a 32-bit process looking at a 64-bit process) | ||
59 | * when I and D space are separate, these will need to be fixed. | ||
60 | */ | ||
61 | case PTRACE_PEEKTEXT_3264: | ||
62 | case PTRACE_PEEKDATA_3264: { | ||
63 | u32 tmp; | ||
64 | int copied; | ||
65 | u32 __user * addrOthers; | ||
66 | |||
67 | ret = -EIO; | ||
68 | |||
69 | /* Get the addr in the other process that we want to read */ | ||
70 | if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0) | ||
71 | break; | ||
72 | |||
73 | copied = ptrace_access_vm(child, (u64)addrOthers, &tmp, | ||
74 | sizeof(tmp), FOLL_FORCE); | ||
75 | if (copied != sizeof(tmp)) | ||
76 | break; | ||
77 | ret = put_user(tmp, (u32 __user *) (unsigned long) data); | ||
78 | break; | ||
79 | } | ||
80 | |||
81 | /* Read the word at location addr in the USER area. */ | ||
82 | case PTRACE_PEEKUSR: { | ||
83 | struct pt_regs *regs; | ||
84 | unsigned int tmp; | ||
85 | |||
86 | regs = task_pt_regs(child); | ||
87 | ret = 0; /* Default return value. */ | ||
88 | |||
89 | switch (addr) { | ||
90 | case 0 ... 31: | ||
91 | tmp = regs->regs[addr]; | ||
92 | break; | ||
93 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
94 | case FPR_BASE ... FPR_BASE + 31: { | ||
95 | union fpureg *fregs; | ||
96 | |||
97 | if (!tsk_used_math(child)) { | ||
98 | /* FP not yet used */ | ||
99 | tmp = -1; | ||
100 | break; | ||
101 | } | ||
102 | fregs = get_fpu_regs(child); | ||
103 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { | ||
104 | /* | ||
105 | * The odd registers are actually the high | ||
106 | * order bits of the values stored in the even | ||
107 | * registers. | ||
108 | */ | ||
109 | tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE], | ||
110 | addr & 1); | ||
111 | break; | ||
112 | } | ||
113 | tmp = get_fpr64(&fregs[addr - FPR_BASE], 0); | ||
114 | break; | ||
115 | } | ||
116 | case FPC_CSR: | ||
117 | tmp = child->thread.fpu.fcr31; | ||
118 | break; | ||
119 | case FPC_EIR: | ||
120 | /* implementation / version register */ | ||
121 | tmp = boot_cpu_data.fpu_id; | ||
122 | break; | ||
123 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
124 | case PC: | ||
125 | tmp = regs->cp0_epc; | ||
126 | break; | ||
127 | case CAUSE: | ||
128 | tmp = regs->cp0_cause; | ||
129 | break; | ||
130 | case BADVADDR: | ||
131 | tmp = regs->cp0_badvaddr; | ||
132 | break; | ||
133 | case MMHI: | ||
134 | tmp = regs->hi; | ||
135 | break; | ||
136 | case MMLO: | ||
137 | tmp = regs->lo; | ||
138 | break; | ||
139 | case DSP_BASE ... DSP_BASE + 5: { | ||
140 | dspreg_t *dregs; | ||
141 | |||
142 | if (!cpu_has_dsp) { | ||
143 | tmp = 0; | ||
144 | ret = -EIO; | ||
145 | goto out; | ||
146 | } | ||
147 | dregs = __get_dsp_regs(child); | ||
148 | tmp = dregs[addr - DSP_BASE]; | ||
149 | break; | ||
150 | } | ||
151 | case DSP_CONTROL: | ||
152 | if (!cpu_has_dsp) { | ||
153 | tmp = 0; | ||
154 | ret = -EIO; | ||
155 | goto out; | ||
156 | } | ||
157 | tmp = child->thread.dsp.dspcontrol; | ||
158 | break; | ||
159 | default: | ||
160 | tmp = 0; | ||
161 | ret = -EIO; | ||
162 | goto out; | ||
163 | } | ||
164 | ret = put_user(tmp, (unsigned __user *) (unsigned long) data); | ||
165 | break; | ||
166 | } | ||
167 | |||
168 | /* | ||
169 | * Write 4 bytes into the other process' storage | ||
170 | * data is the 4 bytes that the user wants written | ||
171 | * addr is a pointer in the user's storage that contains an | ||
172 | * 8 byte address in the other process where the 4 bytes | ||
173 | * that is to be written | ||
174 | * (this is run in a 32-bit process looking at a 64-bit process) | ||
175 | * when I and D space are separate, these will need to be fixed. | ||
176 | */ | ||
177 | case PTRACE_POKETEXT_3264: | ||
178 | case PTRACE_POKEDATA_3264: { | ||
179 | u32 __user * addrOthers; | ||
180 | |||
181 | /* Get the addr in the other process that we want to write into */ | ||
182 | ret = -EIO; | ||
183 | if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0) | ||
184 | break; | ||
185 | ret = 0; | ||
186 | if (ptrace_access_vm(child, (u64)addrOthers, &data, | ||
187 | sizeof(data), | ||
188 | FOLL_FORCE | FOLL_WRITE) == sizeof(data)) | ||
189 | break; | ||
190 | ret = -EIO; | ||
191 | break; | ||
192 | } | ||
193 | |||
194 | case PTRACE_POKEUSR: { | ||
195 | struct pt_regs *regs; | ||
196 | ret = 0; | ||
197 | regs = task_pt_regs(child); | ||
198 | |||
199 | switch (addr) { | ||
200 | case 0 ... 31: | ||
201 | regs->regs[addr] = data; | ||
202 | /* System call number may have been changed */ | ||
203 | if (addr == 2) | ||
204 | mips_syscall_update_nr(child, regs); | ||
205 | else if (addr == 4 && | ||
206 | mips_syscall_is_indirect(child, regs)) | ||
207 | mips_syscall_update_nr(child, regs); | ||
208 | break; | ||
209 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
210 | case FPR_BASE ... FPR_BASE + 31: { | ||
211 | union fpureg *fregs = get_fpu_regs(child); | ||
212 | |||
213 | if (!tsk_used_math(child)) { | ||
214 | /* FP not yet used */ | ||
215 | memset(&child->thread.fpu, ~0, | ||
216 | sizeof(child->thread.fpu)); | ||
217 | child->thread.fpu.fcr31 = 0; | ||
218 | } | ||
219 | if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) { | ||
220 | /* | ||
221 | * The odd registers are actually the high | ||
222 | * order bits of the values stored in the even | ||
223 | * registers. | ||
224 | */ | ||
225 | set_fpr32(&fregs[(addr & ~1) - FPR_BASE], | ||
226 | addr & 1, data); | ||
227 | break; | ||
228 | } | ||
229 | set_fpr64(&fregs[addr - FPR_BASE], 0, data); | ||
230 | break; | ||
231 | } | ||
232 | case FPC_CSR: | ||
233 | child->thread.fpu.fcr31 = data; | ||
234 | break; | ||
235 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
236 | case PC: | ||
237 | regs->cp0_epc = data; | ||
238 | break; | ||
239 | case MMHI: | ||
240 | regs->hi = data; | ||
241 | break; | ||
242 | case MMLO: | ||
243 | regs->lo = data; | ||
244 | break; | ||
245 | case DSP_BASE ... DSP_BASE + 5: { | ||
246 | dspreg_t *dregs; | ||
247 | |||
248 | if (!cpu_has_dsp) { | ||
249 | ret = -EIO; | ||
250 | break; | ||
251 | } | ||
252 | |||
253 | dregs = __get_dsp_regs(child); | ||
254 | dregs[addr - DSP_BASE] = data; | ||
255 | break; | ||
256 | } | ||
257 | case DSP_CONTROL: | ||
258 | if (!cpu_has_dsp) { | ||
259 | ret = -EIO; | ||
260 | break; | ||
261 | } | ||
262 | child->thread.dsp.dspcontrol = data; | ||
263 | break; | ||
264 | default: | ||
265 | /* The rest are not allowed. */ | ||
266 | ret = -EIO; | ||
267 | break; | ||
268 | } | ||
269 | break; | ||
270 | } | ||
271 | |||
272 | case PTRACE_GETREGS: | ||
273 | ret = ptrace_getregs(child, | ||
274 | (struct user_pt_regs __user *) (__u64) data); | ||
275 | break; | ||
276 | |||
277 | case PTRACE_SETREGS: | ||
278 | ret = ptrace_setregs(child, | ||
279 | (struct user_pt_regs __user *) (__u64) data); | ||
280 | break; | ||
281 | |||
282 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
283 | case PTRACE_GETFPREGS: | ||
284 | ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data); | ||
285 | break; | ||
286 | |||
287 | case PTRACE_SETFPREGS: | ||
288 | ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data); | ||
289 | break; | ||
290 | #endif | ||
291 | case PTRACE_GET_THREAD_AREA: | ||
292 | ret = put_user(task_thread_info(child)->tp_value, | ||
293 | (unsigned int __user *) (unsigned long) data); | ||
294 | break; | ||
295 | |||
296 | case PTRACE_GET_THREAD_AREA_3264: | ||
297 | ret = put_user(task_thread_info(child)->tp_value, | ||
298 | (unsigned long __user *) (unsigned long) data); | ||
299 | break; | ||
300 | |||
301 | case PTRACE_GET_WATCH_REGS: | ||
302 | ret = ptrace_get_watch_regs(child, | ||
303 | (struct pt_watch_regs __user *) (unsigned long) addr); | ||
304 | break; | ||
305 | |||
306 | case PTRACE_SET_WATCH_REGS: | ||
307 | ret = ptrace_set_watch_regs(child, | ||
308 | (struct pt_watch_regs __user *) (unsigned long) addr); | ||
309 | break; | ||
310 | |||
311 | default: | ||
312 | ret = compat_ptrace_request(child, request, addr, data); | ||
313 | break; | ||
314 | } | ||
315 | out: | ||
316 | return ret; | ||
317 | } | ||
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S new file mode 100644 index 000000000..cbf6db98c --- /dev/null +++ b/arch/mips/kernel/r2300_fpu.S | |||
@@ -0,0 +1,130 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1996, 1998 by Ralf Baechle | ||
7 | * | ||
8 | * Multi-arch abstraction and asm macros for easier reading: | ||
9 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) | ||
10 | * | ||
11 | * Further modifications to make this work: | ||
12 | * Copyright (c) 1998 Harald Koerfgen | ||
13 | */ | ||
14 | #include <asm/asm.h> | ||
15 | #include <asm/asmmacro.h> | ||
16 | #include <asm/errno.h> | ||
17 | #include <asm/export.h> | ||
18 | #include <asm/fpregdef.h> | ||
19 | #include <asm/mipsregs.h> | ||
20 | #include <asm/asm-offsets.h> | ||
21 | #include <asm/regdef.h> | ||
22 | |||
23 | #define EX(a,b) \ | ||
24 | 9: a,##b; \ | ||
25 | .section __ex_table,"a"; \ | ||
26 | PTR 9b,fault; \ | ||
27 | .previous | ||
28 | |||
29 | #define EX2(a,b) \ | ||
30 | 9: a,##b; \ | ||
31 | .section __ex_table,"a"; \ | ||
32 | PTR 9b,fault; \ | ||
33 | PTR 9b+4,fault; \ | ||
34 | .previous | ||
35 | |||
36 | .set mips1 | ||
37 | |||
38 | /* | ||
39 | * Save a thread's fp context. | ||
40 | */ | ||
41 | LEAF(_save_fp) | ||
42 | EXPORT_SYMBOL(_save_fp) | ||
43 | fpu_save_single a0, t1 # clobbers t1 | ||
44 | jr ra | ||
45 | END(_save_fp) | ||
46 | |||
47 | /* | ||
48 | * Restore a thread's fp context. | ||
49 | */ | ||
50 | LEAF(_restore_fp) | ||
51 | fpu_restore_single a0, t1 # clobbers t1 | ||
52 | jr ra | ||
53 | END(_restore_fp) | ||
54 | |||
55 | .set noreorder | ||
56 | |||
57 | /** | ||
58 | * _save_fp_context() - save FP context from the FPU | ||
59 | * @a0 - pointer to fpregs field of sigcontext | ||
60 | * @a1 - pointer to fpc_csr field of sigcontext | ||
61 | * | ||
62 | * Save FP context, including the 32 FP data registers and the FP | ||
63 | * control & status register, from the FPU to signal context. | ||
64 | */ | ||
65 | LEAF(_save_fp_context) | ||
66 | .set push | ||
67 | SET_HARDFLOAT | ||
68 | li v0, 0 # assume success | ||
69 | cfc1 t1, fcr31 | ||
70 | EX2(s.d $f0, 0(a0)) | ||
71 | EX2(s.d $f2, 16(a0)) | ||
72 | EX2(s.d $f4, 32(a0)) | ||
73 | EX2(s.d $f6, 48(a0)) | ||
74 | EX2(s.d $f8, 64(a0)) | ||
75 | EX2(s.d $f10, 80(a0)) | ||
76 | EX2(s.d $f12, 96(a0)) | ||
77 | EX2(s.d $f14, 112(a0)) | ||
78 | EX2(s.d $f16, 128(a0)) | ||
79 | EX2(s.d $f18, 144(a0)) | ||
80 | EX2(s.d $f20, 160(a0)) | ||
81 | EX2(s.d $f22, 176(a0)) | ||
82 | EX2(s.d $f24, 192(a0)) | ||
83 | EX2(s.d $f26, 208(a0)) | ||
84 | EX2(s.d $f28, 224(a0)) | ||
85 | EX2(s.d $f30, 240(a0)) | ||
86 | jr ra | ||
87 | EX(sw t1, (a1)) | ||
88 | .set pop | ||
89 | END(_save_fp_context) | ||
90 | |||
91 | /** | ||
92 | * _restore_fp_context() - restore FP context to the FPU | ||
93 | * @a0 - pointer to fpregs field of sigcontext | ||
94 | * @a1 - pointer to fpc_csr field of sigcontext | ||
95 | * | ||
96 | * Restore FP context, including the 32 FP data registers and the FP | ||
97 | * control & status register, from signal context to the FPU. | ||
98 | */ | ||
99 | LEAF(_restore_fp_context) | ||
100 | .set push | ||
101 | SET_HARDFLOAT | ||
102 | li v0, 0 # assume success | ||
103 | EX(lw t0, (a1)) | ||
104 | EX2(l.d $f0, 0(a0)) | ||
105 | EX2(l.d $f2, 16(a0)) | ||
106 | EX2(l.d $f4, 32(a0)) | ||
107 | EX2(l.d $f6, 48(a0)) | ||
108 | EX2(l.d $f8, 64(a0)) | ||
109 | EX2(l.d $f10, 80(a0)) | ||
110 | EX2(l.d $f12, 96(a0)) | ||
111 | EX2(l.d $f14, 112(a0)) | ||
112 | EX2(l.d $f16, 128(a0)) | ||
113 | EX2(l.d $f18, 144(a0)) | ||
114 | EX2(l.d $f20, 160(a0)) | ||
115 | EX2(l.d $f22, 176(a0)) | ||
116 | EX2(l.d $f24, 192(a0)) | ||
117 | EX2(l.d $f26, 208(a0)) | ||
118 | EX2(l.d $f28, 224(a0)) | ||
119 | EX2(l.d $f30, 240(a0)) | ||
120 | jr ra | ||
121 | ctc1 t0, fcr31 | ||
122 | .set pop | ||
123 | END(_restore_fp_context) | ||
124 | .set reorder | ||
125 | |||
126 | .type fault, @function | ||
127 | .ent fault | ||
128 | fault: li v0, -EFAULT | ||
129 | jr ra | ||
130 | .end fault | ||
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S new file mode 100644 index 000000000..71b1aafae --- /dev/null +++ b/arch/mips/kernel/r2300_switch.S | |||
@@ -0,0 +1,65 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | /* | ||
3 | * r2300_switch.S: R2300 specific task switching code. | ||
4 | * | ||
5 | * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle | ||
6 | * Copyright (C) 1994, 1995, 1996 by Andreas Busse | ||
7 | * | ||
8 | * Multi-cpu abstraction and macros for easier reading: | ||
9 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) | ||
10 | * | ||
11 | * Further modifications to make this work: | ||
12 | * Copyright (c) 1998-2000 Harald Koerfgen | ||
13 | */ | ||
14 | #include <asm/asm.h> | ||
15 | #include <asm/cachectl.h> | ||
16 | #include <asm/export.h> | ||
17 | #include <asm/fpregdef.h> | ||
18 | #include <asm/mipsregs.h> | ||
19 | #include <asm/asm-offsets.h> | ||
20 | #include <asm/regdef.h> | ||
21 | #include <asm/stackframe.h> | ||
22 | #include <asm/thread_info.h> | ||
23 | |||
24 | #include <asm/asmmacro.h> | ||
25 | |||
26 | .set mips1 | ||
27 | .align 5 | ||
28 | |||
29 | /* | ||
30 | * task_struct *resume(task_struct *prev, task_struct *next, | ||
31 | * struct thread_info *next_ti) | ||
32 | */ | ||
33 | LEAF(resume) | ||
34 | mfc0 t1, CP0_STATUS | ||
35 | sw t1, THREAD_STATUS(a0) | ||
36 | cpu_save_nonscratch a0 | ||
37 | sw ra, THREAD_REG31(a0) | ||
38 | |||
39 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) | ||
40 | PTR_LA t8, __stack_chk_guard | ||
41 | LONG_L t9, TASK_STACK_CANARY(a1) | ||
42 | LONG_S t9, 0(t8) | ||
43 | #endif | ||
44 | |||
45 | /* | ||
46 | * The order of restoring the registers takes care of the race | ||
47 | * updating $28, $29 and kernelsp without disabling ints. | ||
48 | */ | ||
49 | move $28, a2 | ||
50 | cpu_restore_nonscratch a1 | ||
51 | |||
52 | addiu t1, $28, _THREAD_SIZE - 32 | ||
53 | sw t1, kernelsp | ||
54 | |||
55 | mfc0 t1, CP0_STATUS /* Do we really need this? */ | ||
56 | li a3, 0xff01 | ||
57 | and t1, a3 | ||
58 | lw a2, THREAD_STATUS(a1) | ||
59 | nor a3, $0, a3 | ||
60 | and a2, a3 | ||
61 | or a2, t1 | ||
62 | mtc0 a2, CP0_STATUS | ||
63 | move v0, a0 | ||
64 | jr ra | ||
65 | END(resume) | ||
diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c new file mode 100644 index 000000000..1ff19f1ea --- /dev/null +++ b/arch/mips/kernel/r4k-bugs64.c | |||
@@ -0,0 +1,322 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki | ||
4 | */ | ||
5 | #include <linux/context_tracking.h> | ||
6 | #include <linux/init.h> | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/ptrace.h> | ||
9 | #include <linux/stddef.h> | ||
10 | |||
11 | #include <asm/bugs.h> | ||
12 | #include <asm/compiler.h> | ||
13 | #include <asm/cpu.h> | ||
14 | #include <asm/fpu.h> | ||
15 | #include <asm/mipsregs.h> | ||
16 | #include <asm/setup.h> | ||
17 | |||
18 | static char bug64hit[] __initdata = | ||
19 | "reliable operation impossible!\n%s"; | ||
20 | static char nowar[] __initdata = | ||
21 | "Please report to <linux-mips@linux-mips.org>."; | ||
22 | static char r4kwar[] __initdata = | ||
23 | "Enable CPU_R4000_WORKAROUNDS to rectify."; | ||
24 | static char daddiwar[] __initdata = | ||
25 | "Enable CPU_DADDI_WORKAROUNDS to rectify."; | ||
26 | |||
27 | static __always_inline __init | ||
28 | void align_mod(const int align, const int mod) | ||
29 | { | ||
30 | asm volatile( | ||
31 | ".set push\n\t" | ||
32 | ".set noreorder\n\t" | ||
33 | ".balign %0\n\t" | ||
34 | ".rept %1\n\t" | ||
35 | "nop\n\t" | ||
36 | ".endr\n\t" | ||
37 | ".set pop" | ||
38 | : | ||
39 | : "n"(align), "n"(mod)); | ||
40 | } | ||
41 | |||
42 | static __always_inline __init | ||
43 | void mult_sh_align_mod(long *v1, long *v2, long *w, | ||
44 | const int align, const int mod) | ||
45 | { | ||
46 | unsigned long flags; | ||
47 | int m1, m2; | ||
48 | long p, s, lv1, lv2, lw; | ||
49 | |||
50 | /* | ||
51 | * We want the multiply and the shift to be isolated from the | ||
52 | * rest of the code to disable gcc optimizations. Hence the | ||
53 | * asm statements that execute nothing, but make gcc not know | ||
54 | * what the values of m1, m2 and s are and what lv2 and p are | ||
55 | * used for. | ||
56 | */ | ||
57 | |||
58 | local_irq_save(flags); | ||
59 | /* | ||
60 | * The following code leads to a wrong result of the first | ||
61 | * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId | ||
62 | * 00000422 or 00000430, respectively). | ||
63 | * | ||
64 | * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and | ||
65 | * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for | ||
66 | * details. I got no permission to duplicate them here, | ||
67 | * sigh... --macro | ||
68 | */ | ||
69 | asm volatile( | ||
70 | "" | ||
71 | : "=r" (m1), "=r" (m2), "=r" (s) | ||
72 | : "0" (5), "1" (8), "2" (5)); | ||
73 | align_mod(align, mod); | ||
74 | /* | ||
75 | * The trailing nop is needed to fulfill the two-instruction | ||
76 | * requirement between reading hi/lo and staring a mult/div. | ||
77 | * Leaving it out may cause gas insert a nop itself breaking | ||
78 | * the desired alignment of the next chunk. | ||
79 | */ | ||
80 | asm volatile( | ||
81 | ".set push\n\t" | ||
82 | ".set noat\n\t" | ||
83 | ".set noreorder\n\t" | ||
84 | ".set nomacro\n\t" | ||
85 | "mult %2, %3\n\t" | ||
86 | "dsll32 %0, %4, %5\n\t" | ||
87 | "mflo $0\n\t" | ||
88 | "dsll32 %1, %4, %5\n\t" | ||
89 | "nop\n\t" | ||
90 | ".set pop" | ||
91 | : "=&r" (lv1), "=r" (lw) | ||
92 | : "r" (m1), "r" (m2), "r" (s), "I" (0) | ||
93 | : "hi", "lo", "$0"); | ||
94 | /* We have to use single integers for m1 and m2 and a double | ||
95 | * one for p to be sure the mulsidi3 gcc's RTL multiplication | ||
96 | * instruction has the workaround applied. Older versions of | ||
97 | * gcc have correct umulsi3 and mulsi3, but other | ||
98 | * multiplication variants lack the workaround. | ||
99 | */ | ||
100 | asm volatile( | ||
101 | "" | ||
102 | : "=r" (m1), "=r" (m2), "=r" (s) | ||
103 | : "0" (m1), "1" (m2), "2" (s)); | ||
104 | align_mod(align, mod); | ||
105 | p = m1 * m2; | ||
106 | lv2 = s << 32; | ||
107 | asm volatile( | ||
108 | "" | ||
109 | : "=r" (lv2) | ||
110 | : "0" (lv2), "r" (p)); | ||
111 | local_irq_restore(flags); | ||
112 | |||
113 | *v1 = lv1; | ||
114 | *v2 = lv2; | ||
115 | *w = lw; | ||
116 | } | ||
117 | |||
118 | static __always_inline __init void check_mult_sh(void) | ||
119 | { | ||
120 | long v1[8], v2[8], w[8]; | ||
121 | int bug, fix, i; | ||
122 | |||
123 | printk("Checking for the multiply/shift bug... "); | ||
124 | |||
125 | /* | ||
126 | * Testing discovered false negatives for certain code offsets | ||
127 | * into cache lines. Hence we test all possible offsets for | ||
128 | * the worst assumption of an R4000 I-cache line width of 32 | ||
129 | * bytes. | ||
130 | * | ||
131 | * We can't use a loop as alignment directives need to be | ||
132 | * immediates. | ||
133 | */ | ||
134 | mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0); | ||
135 | mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1); | ||
136 | mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2); | ||
137 | mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3); | ||
138 | mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4); | ||
139 | mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5); | ||
140 | mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6); | ||
141 | mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7); | ||
142 | |||
143 | bug = 0; | ||
144 | for (i = 0; i < 8; i++) | ||
145 | if (v1[i] != w[i]) | ||
146 | bug = 1; | ||
147 | |||
148 | if (bug == 0) { | ||
149 | pr_cont("no.\n"); | ||
150 | return; | ||
151 | } | ||
152 | |||
153 | pr_cont("yes, workaround... "); | ||
154 | |||
155 | fix = 1; | ||
156 | for (i = 0; i < 8; i++) | ||
157 | if (v2[i] != w[i]) | ||
158 | fix = 0; | ||
159 | |||
160 | if (fix == 1) { | ||
161 | pr_cont("yes.\n"); | ||
162 | return; | ||
163 | } | ||
164 | |||
165 | pr_cont("no.\n"); | ||
166 | panic(bug64hit, !R4000_WAR ? r4kwar : nowar); | ||
167 | } | ||
168 | |||
169 | static volatile int daddi_ov; | ||
170 | |||
171 | asmlinkage void __init do_daddi_ov(struct pt_regs *regs) | ||
172 | { | ||
173 | enum ctx_state prev_state; | ||
174 | |||
175 | prev_state = exception_enter(); | ||
176 | daddi_ov = 1; | ||
177 | regs->cp0_epc += 4; | ||
178 | exception_exit(prev_state); | ||
179 | } | ||
180 | |||
181 | static __init void check_daddi(void) | ||
182 | { | ||
183 | extern asmlinkage void handle_daddi_ov(void); | ||
184 | unsigned long flags; | ||
185 | void *handler; | ||
186 | long v, tmp; | ||
187 | |||
188 | printk("Checking for the daddi bug... "); | ||
189 | |||
190 | local_irq_save(flags); | ||
191 | handler = set_except_vector(EXCCODE_OV, handle_daddi_ov); | ||
192 | /* | ||
193 | * The following code fails to trigger an overflow exception | ||
194 | * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or | ||
195 | * 00000430, respectively). | ||
196 | * | ||
197 | * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and | ||
198 | * 3.0" by MIPS Technologies, Inc., erratum #23 for details. | ||
199 | * I got no permission to duplicate it here, sigh... --macro | ||
200 | */ | ||
201 | asm volatile( | ||
202 | ".set push\n\t" | ||
203 | ".set noat\n\t" | ||
204 | ".set noreorder\n\t" | ||
205 | ".set nomacro\n\t" | ||
206 | "addiu %1, $0, %2\n\t" | ||
207 | "dsrl %1, %1, 1\n\t" | ||
208 | #ifdef HAVE_AS_SET_DADDI | ||
209 | ".set daddi\n\t" | ||
210 | #endif | ||
211 | "daddi %0, %1, %3\n\t" | ||
212 | ".set pop" | ||
213 | : "=r" (v), "=&r" (tmp) | ||
214 | : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); | ||
215 | set_except_vector(EXCCODE_OV, handler); | ||
216 | local_irq_restore(flags); | ||
217 | |||
218 | if (daddi_ov) { | ||
219 | pr_cont("no.\n"); | ||
220 | return; | ||
221 | } | ||
222 | |||
223 | pr_cont("yes, workaround... "); | ||
224 | |||
225 | local_irq_save(flags); | ||
226 | handler = set_except_vector(EXCCODE_OV, handle_daddi_ov); | ||
227 | asm volatile( | ||
228 | "addiu %1, $0, %2\n\t" | ||
229 | "dsrl %1, %1, 1\n\t" | ||
230 | "daddi %0, %1, %3" | ||
231 | : "=r" (v), "=&r" (tmp) | ||
232 | : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); | ||
233 | set_except_vector(EXCCODE_OV, handler); | ||
234 | local_irq_restore(flags); | ||
235 | |||
236 | if (daddi_ov) { | ||
237 | pr_cont("yes.\n"); | ||
238 | return; | ||
239 | } | ||
240 | |||
241 | pr_cont("no.\n"); | ||
242 | panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); | ||
243 | } | ||
244 | |||
245 | int daddiu_bug = -1; | ||
246 | |||
247 | static __init void check_daddiu(void) | ||
248 | { | ||
249 | long v, w, tmp; | ||
250 | |||
251 | printk("Checking for the daddiu bug... "); | ||
252 | |||
253 | /* | ||
254 | * The following code leads to a wrong result of daddiu when | ||
255 | * executed on R4400 rev. 1.0 (PRId 00000440). | ||
256 | * | ||
257 | * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by | ||
258 | * MIPS Technologies, Inc., erratum #7 for details. | ||
259 | * | ||
260 | * According to "MIPS R4000PC/SC Errata, Processor Revision | ||
261 | * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this | ||
262 | * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and | ||
263 | * 00000430, respectively), too. Testing failed to trigger it | ||
264 | * so far. | ||
265 | * | ||
266 | * I got no permission to duplicate the errata here, sigh... | ||
267 | * --macro | ||
268 | */ | ||
269 | asm volatile( | ||
270 | ".set push\n\t" | ||
271 | ".set noat\n\t" | ||
272 | ".set noreorder\n\t" | ||
273 | ".set nomacro\n\t" | ||
274 | "addiu %2, $0, %3\n\t" | ||
275 | "dsrl %2, %2, 1\n\t" | ||
276 | #ifdef HAVE_AS_SET_DADDI | ||
277 | ".set daddi\n\t" | ||
278 | #endif | ||
279 | "daddiu %0, %2, %4\n\t" | ||
280 | "addiu %1, $0, %4\n\t" | ||
281 | "daddu %1, %2\n\t" | ||
282 | ".set pop" | ||
283 | : "=&r" (v), "=&r" (w), "=&r" (tmp) | ||
284 | : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); | ||
285 | |||
286 | daddiu_bug = v != w; | ||
287 | |||
288 | if (!daddiu_bug) { | ||
289 | pr_cont("no.\n"); | ||
290 | return; | ||
291 | } | ||
292 | |||
293 | pr_cont("yes, workaround... "); | ||
294 | |||
295 | asm volatile( | ||
296 | "addiu %2, $0, %3\n\t" | ||
297 | "dsrl %2, %2, 1\n\t" | ||
298 | "daddiu %0, %2, %4\n\t" | ||
299 | "addiu %1, $0, %4\n\t" | ||
300 | "daddu %1, %2" | ||
301 | : "=&r" (v), "=&r" (w), "=&r" (tmp) | ||
302 | : "I" (0xffffffffffffdb9aUL), "I" (0x1234)); | ||
303 | |||
304 | if (v == w) { | ||
305 | pr_cont("yes.\n"); | ||
306 | return; | ||
307 | } | ||
308 | |||
309 | pr_cont("no.\n"); | ||
310 | panic(bug64hit, !DADDI_WAR ? daddiwar : nowar); | ||
311 | } | ||
312 | |||
313 | void __init check_bugs64_early(void) | ||
314 | { | ||
315 | check_mult_sh(); | ||
316 | check_daddiu(); | ||
317 | } | ||
318 | |||
319 | void __init check_bugs64(void) | ||
320 | { | ||
321 | check_daddi(); | ||
322 | } | ||
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S new file mode 100644 index 000000000..b91e91106 --- /dev/null +++ b/arch/mips/kernel/r4k_fpu.S | |||
@@ -0,0 +1,417 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle | ||
7 | * | ||
8 | * Multi-arch abstraction and asm macros for easier reading: | ||
9 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) | ||
10 | * | ||
11 | * Carsten Langgaard, carstenl@mips.com | ||
12 | * Copyright (C) 2000 MIPS Technologies, Inc. | ||
13 | * Copyright (C) 1999, 2001 Silicon Graphics, Inc. | ||
14 | */ | ||
15 | #include <asm/asm.h> | ||
16 | #include <asm/asmmacro.h> | ||
17 | #include <asm/errno.h> | ||
18 | #include <asm/export.h> | ||
19 | #include <asm/fpregdef.h> | ||
20 | #include <asm/mipsregs.h> | ||
21 | #include <asm/asm-offsets.h> | ||
22 | #include <asm/regdef.h> | ||
23 | |||
24 | /* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */ | ||
25 | #undef fp | ||
26 | |||
27 | .macro EX insn, reg, src | ||
28 | .set push | ||
29 | SET_HARDFLOAT | ||
30 | .set nomacro | ||
31 | .ex\@: \insn \reg, \src | ||
32 | .set pop | ||
33 | .section __ex_table,"a" | ||
34 | PTR .ex\@, fault | ||
35 | .previous | ||
36 | .endm | ||
37 | |||
38 | /* | ||
39 | * Save a thread's fp context. | ||
40 | */ | ||
41 | LEAF(_save_fp) | ||
42 | EXPORT_SYMBOL(_save_fp) | ||
43 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ | ||
44 | defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) | ||
45 | mfc0 t0, CP0_STATUS | ||
46 | #endif | ||
47 | fpu_save_double a0 t0 t1 # clobbers t1 | ||
48 | jr ra | ||
49 | END(_save_fp) | ||
50 | |||
51 | /* | ||
52 | * Restore a thread's fp context. | ||
53 | */ | ||
54 | LEAF(_restore_fp) | ||
55 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ | ||
56 | defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) | ||
57 | mfc0 t0, CP0_STATUS | ||
58 | #endif | ||
59 | fpu_restore_double a0 t0 t1 # clobbers t1 | ||
60 | jr ra | ||
61 | END(_restore_fp) | ||
62 | |||
63 | #ifdef CONFIG_CPU_HAS_MSA | ||
64 | |||
65 | /* | ||
66 | * Save a thread's MSA vector context. | ||
67 | */ | ||
68 | LEAF(_save_msa) | ||
69 | EXPORT_SYMBOL(_save_msa) | ||
70 | msa_save_all a0 | ||
71 | jr ra | ||
72 | END(_save_msa) | ||
73 | |||
74 | /* | ||
75 | * Restore a thread's MSA vector context. | ||
76 | */ | ||
77 | LEAF(_restore_msa) | ||
78 | msa_restore_all a0 | ||
79 | jr ra | ||
80 | END(_restore_msa) | ||
81 | |||
82 | LEAF(_init_msa_upper) | ||
83 | msa_init_all_upper | ||
84 | jr ra | ||
85 | END(_init_msa_upper) | ||
86 | |||
87 | #endif | ||
88 | |||
89 | .set noreorder | ||
90 | |||
91 | /** | ||
92 | * _save_fp_context() - save FP context from the FPU | ||
93 | * @a0 - pointer to fpregs field of sigcontext | ||
94 | * @a1 - pointer to fpc_csr field of sigcontext | ||
95 | * | ||
96 | * Save FP context, including the 32 FP data registers and the FP | ||
97 | * control & status register, from the FPU to signal context. | ||
98 | */ | ||
99 | LEAF(_save_fp_context) | ||
100 | .set push | ||
101 | SET_HARDFLOAT | ||
102 | cfc1 t1, fcr31 | ||
103 | .set pop | ||
104 | |||
105 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ | ||
106 | defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) | ||
107 | .set push | ||
108 | SET_HARDFLOAT | ||
109 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) | ||
110 | .set mips32r2 | ||
111 | .set fp=64 | ||
112 | mfc0 t0, CP0_STATUS | ||
113 | sll t0, t0, 5 | ||
114 | bgez t0, 1f # skip storing odd if FR=0 | ||
115 | nop | ||
116 | #endif | ||
117 | /* Store the 16 odd double precision registers */ | ||
118 | EX sdc1 $f1, 8(a0) | ||
119 | EX sdc1 $f3, 24(a0) | ||
120 | EX sdc1 $f5, 40(a0) | ||
121 | EX sdc1 $f7, 56(a0) | ||
122 | EX sdc1 $f9, 72(a0) | ||
123 | EX sdc1 $f11, 88(a0) | ||
124 | EX sdc1 $f13, 104(a0) | ||
125 | EX sdc1 $f15, 120(a0) | ||
126 | EX sdc1 $f17, 136(a0) | ||
127 | EX sdc1 $f19, 152(a0) | ||
128 | EX sdc1 $f21, 168(a0) | ||
129 | EX sdc1 $f23, 184(a0) | ||
130 | EX sdc1 $f25, 200(a0) | ||
131 | EX sdc1 $f27, 216(a0) | ||
132 | EX sdc1 $f29, 232(a0) | ||
133 | EX sdc1 $f31, 248(a0) | ||
134 | 1: .set pop | ||
135 | #endif | ||
136 | |||
137 | .set push | ||
138 | SET_HARDFLOAT | ||
139 | /* Store the 16 even double precision registers */ | ||
140 | EX sdc1 $f0, 0(a0) | ||
141 | EX sdc1 $f2, 16(a0) | ||
142 | EX sdc1 $f4, 32(a0) | ||
143 | EX sdc1 $f6, 48(a0) | ||
144 | EX sdc1 $f8, 64(a0) | ||
145 | EX sdc1 $f10, 80(a0) | ||
146 | EX sdc1 $f12, 96(a0) | ||
147 | EX sdc1 $f14, 112(a0) | ||
148 | EX sdc1 $f16, 128(a0) | ||
149 | EX sdc1 $f18, 144(a0) | ||
150 | EX sdc1 $f20, 160(a0) | ||
151 | EX sdc1 $f22, 176(a0) | ||
152 | EX sdc1 $f24, 192(a0) | ||
153 | EX sdc1 $f26, 208(a0) | ||
154 | EX sdc1 $f28, 224(a0) | ||
155 | EX sdc1 $f30, 240(a0) | ||
156 | EX sw t1, 0(a1) | ||
157 | jr ra | ||
158 | li v0, 0 # success | ||
159 | .set pop | ||
160 | END(_save_fp_context) | ||
161 | |||
162 | /** | ||
163 | * _restore_fp_context() - restore FP context to the FPU | ||
164 | * @a0 - pointer to fpregs field of sigcontext | ||
165 | * @a1 - pointer to fpc_csr field of sigcontext | ||
166 | * | ||
167 | * Restore FP context, including the 32 FP data registers and the FP | ||
168 | * control & status register, from signal context to the FPU. | ||
169 | */ | ||
170 | LEAF(_restore_fp_context) | ||
171 | EX lw t1, 0(a1) | ||
172 | |||
173 | #if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \ | ||
174 | defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6) | ||
175 | .set push | ||
176 | SET_HARDFLOAT | ||
177 | #if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) | ||
178 | .set mips32r2 | ||
179 | .set fp=64 | ||
180 | mfc0 t0, CP0_STATUS | ||
181 | sll t0, t0, 5 | ||
182 | bgez t0, 1f # skip loading odd if FR=0 | ||
183 | nop | ||
184 | #endif | ||
185 | EX ldc1 $f1, 8(a0) | ||
186 | EX ldc1 $f3, 24(a0) | ||
187 | EX ldc1 $f5, 40(a0) | ||
188 | EX ldc1 $f7, 56(a0) | ||
189 | EX ldc1 $f9, 72(a0) | ||
190 | EX ldc1 $f11, 88(a0) | ||
191 | EX ldc1 $f13, 104(a0) | ||
192 | EX ldc1 $f15, 120(a0) | ||
193 | EX ldc1 $f17, 136(a0) | ||
194 | EX ldc1 $f19, 152(a0) | ||
195 | EX ldc1 $f21, 168(a0) | ||
196 | EX ldc1 $f23, 184(a0) | ||
197 | EX ldc1 $f25, 200(a0) | ||
198 | EX ldc1 $f27, 216(a0) | ||
199 | EX ldc1 $f29, 232(a0) | ||
200 | EX ldc1 $f31, 248(a0) | ||
201 | 1: .set pop | ||
202 | #endif | ||
203 | .set push | ||
204 | SET_HARDFLOAT | ||
205 | EX ldc1 $f0, 0(a0) | ||
206 | EX ldc1 $f2, 16(a0) | ||
207 | EX ldc1 $f4, 32(a0) | ||
208 | EX ldc1 $f6, 48(a0) | ||
209 | EX ldc1 $f8, 64(a0) | ||
210 | EX ldc1 $f10, 80(a0) | ||
211 | EX ldc1 $f12, 96(a0) | ||
212 | EX ldc1 $f14, 112(a0) | ||
213 | EX ldc1 $f16, 128(a0) | ||
214 | EX ldc1 $f18, 144(a0) | ||
215 | EX ldc1 $f20, 160(a0) | ||
216 | EX ldc1 $f22, 176(a0) | ||
217 | EX ldc1 $f24, 192(a0) | ||
218 | EX ldc1 $f26, 208(a0) | ||
219 | EX ldc1 $f28, 224(a0) | ||
220 | EX ldc1 $f30, 240(a0) | ||
221 | ctc1 t1, fcr31 | ||
222 | .set pop | ||
223 | jr ra | ||
224 | li v0, 0 # success | ||
225 | END(_restore_fp_context) | ||
226 | |||
227 | #ifdef CONFIG_CPU_HAS_MSA | ||
228 | |||
229 | .macro op_one_wr op, idx, base | ||
230 | .align 4 | ||
231 | \idx: \op \idx, 0, \base | ||
232 | jr ra | ||
233 | nop | ||
234 | .endm | ||
235 | |||
236 | .macro op_msa_wr name, op | ||
237 | LEAF(\name) | ||
238 | .set push | ||
239 | .set noreorder | ||
240 | sll t0, a0, 4 | ||
241 | PTR_LA t1, 0f | ||
242 | PTR_ADDU t0, t0, t1 | ||
243 | jr t0 | ||
244 | nop | ||
245 | op_one_wr \op, 0, a1 | ||
246 | op_one_wr \op, 1, a1 | ||
247 | op_one_wr \op, 2, a1 | ||
248 | op_one_wr \op, 3, a1 | ||
249 | op_one_wr \op, 4, a1 | ||
250 | op_one_wr \op, 5, a1 | ||
251 | op_one_wr \op, 6, a1 | ||
252 | op_one_wr \op, 7, a1 | ||
253 | op_one_wr \op, 8, a1 | ||
254 | op_one_wr \op, 9, a1 | ||
255 | op_one_wr \op, 10, a1 | ||
256 | op_one_wr \op, 11, a1 | ||
257 | op_one_wr \op, 12, a1 | ||
258 | op_one_wr \op, 13, a1 | ||
259 | op_one_wr \op, 14, a1 | ||
260 | op_one_wr \op, 15, a1 | ||
261 | op_one_wr \op, 16, a1 | ||
262 | op_one_wr \op, 17, a1 | ||
263 | op_one_wr \op, 18, a1 | ||
264 | op_one_wr \op, 19, a1 | ||
265 | op_one_wr \op, 20, a1 | ||
266 | op_one_wr \op, 21, a1 | ||
267 | op_one_wr \op, 22, a1 | ||
268 | op_one_wr \op, 23, a1 | ||
269 | op_one_wr \op, 24, a1 | ||
270 | op_one_wr \op, 25, a1 | ||
271 | op_one_wr \op, 26, a1 | ||
272 | op_one_wr \op, 27, a1 | ||
273 | op_one_wr \op, 28, a1 | ||
274 | op_one_wr \op, 29, a1 | ||
275 | op_one_wr \op, 30, a1 | ||
276 | op_one_wr \op, 31, a1 | ||
277 | .set pop | ||
278 | END(\name) | ||
279 | .endm | ||
280 | |||
281 | op_msa_wr read_msa_wr_b, st_b | ||
282 | op_msa_wr read_msa_wr_h, st_h | ||
283 | op_msa_wr read_msa_wr_w, st_w | ||
284 | op_msa_wr read_msa_wr_d, st_d | ||
285 | |||
286 | op_msa_wr write_msa_wr_b, ld_b | ||
287 | op_msa_wr write_msa_wr_h, ld_h | ||
288 | op_msa_wr write_msa_wr_w, ld_w | ||
289 | op_msa_wr write_msa_wr_d, ld_d | ||
290 | |||
291 | #endif /* CONFIG_CPU_HAS_MSA */ | ||
292 | |||
293 | #ifdef CONFIG_CPU_HAS_MSA | ||
294 | |||
295 | .macro save_msa_upper wr, off, base | ||
296 | .set push | ||
297 | .set noat | ||
298 | #ifdef CONFIG_64BIT | ||
299 | copy_s_d \wr, 1 | ||
300 | EX sd $1, \off(\base) | ||
301 | #elif defined(CONFIG_CPU_LITTLE_ENDIAN) | ||
302 | copy_s_w \wr, 2 | ||
303 | EX sw $1, \off(\base) | ||
304 | copy_s_w \wr, 3 | ||
305 | EX sw $1, (\off+4)(\base) | ||
306 | #else /* CONFIG_CPU_BIG_ENDIAN */ | ||
307 | copy_s_w \wr, 2 | ||
308 | EX sw $1, (\off+4)(\base) | ||
309 | copy_s_w \wr, 3 | ||
310 | EX sw $1, \off(\base) | ||
311 | #endif | ||
312 | .set pop | ||
313 | .endm | ||
314 | |||
315 | LEAF(_save_msa_all_upper) | ||
316 | save_msa_upper 0, 0x00, a0 | ||
317 | save_msa_upper 1, 0x08, a0 | ||
318 | save_msa_upper 2, 0x10, a0 | ||
319 | save_msa_upper 3, 0x18, a0 | ||
320 | save_msa_upper 4, 0x20, a0 | ||
321 | save_msa_upper 5, 0x28, a0 | ||
322 | save_msa_upper 6, 0x30, a0 | ||
323 | save_msa_upper 7, 0x38, a0 | ||
324 | save_msa_upper 8, 0x40, a0 | ||
325 | save_msa_upper 9, 0x48, a0 | ||
326 | save_msa_upper 10, 0x50, a0 | ||
327 | save_msa_upper 11, 0x58, a0 | ||
328 | save_msa_upper 12, 0x60, a0 | ||
329 | save_msa_upper 13, 0x68, a0 | ||
330 | save_msa_upper 14, 0x70, a0 | ||
331 | save_msa_upper 15, 0x78, a0 | ||
332 | save_msa_upper 16, 0x80, a0 | ||
333 | save_msa_upper 17, 0x88, a0 | ||
334 | save_msa_upper 18, 0x90, a0 | ||
335 | save_msa_upper 19, 0x98, a0 | ||
336 | save_msa_upper 20, 0xa0, a0 | ||
337 | save_msa_upper 21, 0xa8, a0 | ||
338 | save_msa_upper 22, 0xb0, a0 | ||
339 | save_msa_upper 23, 0xb8, a0 | ||
340 | save_msa_upper 24, 0xc0, a0 | ||
341 | save_msa_upper 25, 0xc8, a0 | ||
342 | save_msa_upper 26, 0xd0, a0 | ||
343 | save_msa_upper 27, 0xd8, a0 | ||
344 | save_msa_upper 28, 0xe0, a0 | ||
345 | save_msa_upper 29, 0xe8, a0 | ||
346 | save_msa_upper 30, 0xf0, a0 | ||
347 | save_msa_upper 31, 0xf8, a0 | ||
348 | jr ra | ||
349 | li v0, 0 | ||
350 | END(_save_msa_all_upper) | ||
351 | |||
352 | .macro restore_msa_upper wr, off, base | ||
353 | .set push | ||
354 | .set noat | ||
355 | #ifdef CONFIG_64BIT | ||
356 | EX ld $1, \off(\base) | ||
357 | insert_d \wr, 1 | ||
358 | #elif defined(CONFIG_CPU_LITTLE_ENDIAN) | ||
359 | EX lw $1, \off(\base) | ||
360 | insert_w \wr, 2 | ||
361 | EX lw $1, (\off+4)(\base) | ||
362 | insert_w \wr, 3 | ||
363 | #else /* CONFIG_CPU_BIG_ENDIAN */ | ||
364 | EX lw $1, (\off+4)(\base) | ||
365 | insert_w \wr, 2 | ||
366 | EX lw $1, \off(\base) | ||
367 | insert_w \wr, 3 | ||
368 | #endif | ||
369 | .set pop | ||
370 | .endm | ||
371 | |||
372 | LEAF(_restore_msa_all_upper) | ||
373 | restore_msa_upper 0, 0x00, a0 | ||
374 | restore_msa_upper 1, 0x08, a0 | ||
375 | restore_msa_upper 2, 0x10, a0 | ||
376 | restore_msa_upper 3, 0x18, a0 | ||
377 | restore_msa_upper 4, 0x20, a0 | ||
378 | restore_msa_upper 5, 0x28, a0 | ||
379 | restore_msa_upper 6, 0x30, a0 | ||
380 | restore_msa_upper 7, 0x38, a0 | ||
381 | restore_msa_upper 8, 0x40, a0 | ||
382 | restore_msa_upper 9, 0x48, a0 | ||
383 | restore_msa_upper 10, 0x50, a0 | ||
384 | restore_msa_upper 11, 0x58, a0 | ||
385 | restore_msa_upper 12, 0x60, a0 | ||
386 | restore_msa_upper 13, 0x68, a0 | ||
387 | restore_msa_upper 14, 0x70, a0 | ||
388 | restore_msa_upper 15, 0x78, a0 | ||
389 | restore_msa_upper 16, 0x80, a0 | ||
390 | restore_msa_upper 17, 0x88, a0 | ||
391 | restore_msa_upper 18, 0x90, a0 | ||
392 | restore_msa_upper 19, 0x98, a0 | ||
393 | restore_msa_upper 20, 0xa0, a0 | ||
394 | restore_msa_upper 21, 0xa8, a0 | ||
395 | restore_msa_upper 22, 0xb0, a0 | ||
396 | restore_msa_upper 23, 0xb8, a0 | ||
397 | restore_msa_upper 24, 0xc0, a0 | ||
398 | restore_msa_upper 25, 0xc8, a0 | ||
399 | restore_msa_upper 26, 0xd0, a0 | ||
400 | restore_msa_upper 27, 0xd8, a0 | ||
401 | restore_msa_upper 28, 0xe0, a0 | ||
402 | restore_msa_upper 29, 0xe8, a0 | ||
403 | restore_msa_upper 30, 0xf0, a0 | ||
404 | restore_msa_upper 31, 0xf8, a0 | ||
405 | jr ra | ||
406 | li v0, 0 | ||
407 | END(_restore_msa_all_upper) | ||
408 | |||
409 | #endif /* CONFIG_CPU_HAS_MSA */ | ||
410 | |||
411 | .set reorder | ||
412 | |||
413 | .type fault, @function | ||
414 | .ent fault | ||
415 | fault: li v0, -EFAULT # failure | ||
416 | jr ra | ||
417 | .end fault | ||
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S new file mode 100644 index 000000000..58232ae6c --- /dev/null +++ b/arch/mips/kernel/r4k_switch.S | |||
@@ -0,0 +1,59 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle | ||
7 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) | ||
8 | * Copyright (C) 1994, 1995, 1996, by Andreas Busse | ||
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
10 | * Copyright (C) 2000 MIPS Technologies, Inc. | ||
11 | * written by Carsten Langgaard, carstenl@mips.com | ||
12 | */ | ||
13 | #include <asm/asm.h> | ||
14 | #include <asm/cachectl.h> | ||
15 | #include <asm/mipsregs.h> | ||
16 | #include <asm/asm-offsets.h> | ||
17 | #include <asm/regdef.h> | ||
18 | #include <asm/stackframe.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | |||
21 | #include <asm/asmmacro.h> | ||
22 | |||
23 | /* | ||
24 | * task_struct *resume(task_struct *prev, task_struct *next, | ||
25 | * struct thread_info *next_ti) | ||
26 | */ | ||
27 | .align 5 | ||
28 | LEAF(resume) | ||
29 | mfc0 t1, CP0_STATUS | ||
30 | LONG_S t1, THREAD_STATUS(a0) | ||
31 | cpu_save_nonscratch a0 | ||
32 | LONG_S ra, THREAD_REG31(a0) | ||
33 | |||
34 | #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP) | ||
35 | PTR_LA t8, __stack_chk_guard | ||
36 | LONG_L t9, TASK_STACK_CANARY(a1) | ||
37 | LONG_S t9, 0(t8) | ||
38 | #endif | ||
39 | |||
40 | /* | ||
41 | * The order of restoring the registers takes care of the race | ||
42 | * updating $28, $29 and kernelsp without disabling ints. | ||
43 | */ | ||
44 | move $28, a2 | ||
45 | cpu_restore_nonscratch a1 | ||
46 | |||
47 | PTR_ADDU t0, $28, _THREAD_SIZE - 32 | ||
48 | set_saved_sp t0, t1, t2 | ||
49 | mfc0 t1, CP0_STATUS /* Do we really need this? */ | ||
50 | li a3, 0xff01 | ||
51 | and t1, a3 | ||
52 | LONG_L a2, THREAD_STATUS(a1) | ||
53 | nor a3, $0, a3 | ||
54 | and a2, a3 | ||
55 | or a2, t1 | ||
56 | mtc0 a2, CP0_STATUS | ||
57 | move v0, a0 | ||
58 | jr ra | ||
59 | END(resume) | ||
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c new file mode 100644 index 000000000..dab8febb5 --- /dev/null +++ b/arch/mips/kernel/relocate.c | |||
@@ -0,0 +1,446 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Support for Kernel relocation at boot time | ||
7 | * | ||
8 | * Copyright (C) 2015, Imagination Technologies Ltd. | ||
9 | * Authors: Matt Redfearn (matt.redfearn@mips.com) | ||
10 | */ | ||
11 | #include <asm/bootinfo.h> | ||
12 | #include <asm/cacheflush.h> | ||
13 | #include <asm/fw/fw.h> | ||
14 | #include <asm/sections.h> | ||
15 | #include <asm/setup.h> | ||
16 | #include <asm/timex.h> | ||
17 | #include <linux/elf.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/libfdt.h> | ||
20 | #include <linux/of_fdt.h> | ||
21 | #include <linux/sched/task.h> | ||
22 | #include <linux/start_kernel.h> | ||
23 | #include <linux/string.h> | ||
24 | #include <linux/printk.h> | ||
25 | |||
26 | #define RELOCATED(x) ((void *)((long)x + offset)) | ||
27 | |||
28 | extern u32 _relocation_start[]; /* End kernel image / start relocation table */ | ||
29 | extern u32 _relocation_end[]; /* End relocation table */ | ||
30 | |||
31 | extern long __start___ex_table; /* Start exception table */ | ||
32 | extern long __stop___ex_table; /* End exception table */ | ||
33 | |||
34 | extern void __weak plat_fdt_relocated(void *new_location); | ||
35 | |||
36 | /* | ||
37 | * This function may be defined for a platform to perform any post-relocation | ||
38 | * fixup necessary. | ||
39 | * Return non-zero to abort relocation | ||
40 | */ | ||
41 | int __weak plat_post_relocation(long offset) | ||
42 | { | ||
43 | return 0; | ||
44 | } | ||
45 | |||
46 | static inline u32 __init get_synci_step(void) | ||
47 | { | ||
48 | u32 res; | ||
49 | |||
50 | __asm__("rdhwr %0, $1" : "=r" (res)); | ||
51 | |||
52 | return res; | ||
53 | } | ||
54 | |||
55 | static void __init sync_icache(void *kbase, unsigned long kernel_length) | ||
56 | { | ||
57 | void *kend = kbase + kernel_length; | ||
58 | u32 step = get_synci_step(); | ||
59 | |||
60 | do { | ||
61 | __asm__ __volatile__( | ||
62 | "synci 0(%0)" | ||
63 | : /* no output */ | ||
64 | : "r" (kbase)); | ||
65 | |||
66 | kbase += step; | ||
67 | } while (kbase < kend); | ||
68 | |||
69 | /* Completion barrier */ | ||
70 | __sync(); | ||
71 | } | ||
72 | |||
73 | static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset) | ||
74 | { | ||
75 | *(u64 *)loc_new += offset; | ||
76 | |||
77 | return 0; | ||
78 | } | ||
79 | |||
80 | static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset) | ||
81 | { | ||
82 | *loc_new += offset; | ||
83 | |||
84 | return 0; | ||
85 | } | ||
86 | |||
87 | static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset) | ||
88 | { | ||
89 | unsigned long target_addr = (*loc_orig) & 0x03ffffff; | ||
90 | |||
91 | if (offset % 4) { | ||
92 | pr_err("Dangerous R_MIPS_26 REL relocation\n"); | ||
93 | return -ENOEXEC; | ||
94 | } | ||
95 | |||
96 | /* Original target address */ | ||
97 | target_addr <<= 2; | ||
98 | target_addr += (unsigned long)loc_orig & ~0x03ffffff; | ||
99 | |||
100 | /* Get the new target address */ | ||
101 | target_addr += offset; | ||
102 | |||
103 | if ((target_addr & 0xf0000000) != ((unsigned long)loc_new & 0xf0000000)) { | ||
104 | pr_err("R_MIPS_26 REL relocation overflow\n"); | ||
105 | return -ENOEXEC; | ||
106 | } | ||
107 | |||
108 | target_addr -= (unsigned long)loc_new & ~0x03ffffff; | ||
109 | target_addr >>= 2; | ||
110 | |||
111 | *loc_new = (*loc_new & ~0x03ffffff) | (target_addr & 0x03ffffff); | ||
112 | |||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | |||
117 | static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset) | ||
118 | { | ||
119 | unsigned long insn = *loc_orig; | ||
120 | unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */ | ||
121 | |||
122 | target += offset; | ||
123 | |||
124 | *loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff); | ||
125 | return 0; | ||
126 | } | ||
127 | |||
128 | static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = { | ||
129 | [R_MIPS_64] = apply_r_mips_64_rel, | ||
130 | [R_MIPS_32] = apply_r_mips_32_rel, | ||
131 | [R_MIPS_26] = apply_r_mips_26_rel, | ||
132 | [R_MIPS_HI16] = apply_r_mips_hi16_rel, | ||
133 | }; | ||
134 | |||
135 | int __init do_relocations(void *kbase_old, void *kbase_new, long offset) | ||
136 | { | ||
137 | u32 *r; | ||
138 | u32 *loc_orig; | ||
139 | u32 *loc_new; | ||
140 | int type; | ||
141 | int res; | ||
142 | |||
143 | for (r = _relocation_start; r < _relocation_end; r++) { | ||
144 | /* Sentinel for last relocation */ | ||
145 | if (*r == 0) | ||
146 | break; | ||
147 | |||
148 | type = (*r >> 24) & 0xff; | ||
149 | loc_orig = kbase_old + ((*r & 0x00ffffff) << 2); | ||
150 | loc_new = RELOCATED(loc_orig); | ||
151 | |||
152 | if (reloc_handlers_rel[type] == NULL) { | ||
153 | /* Unsupported relocation */ | ||
154 | pr_err("Unhandled relocation type %d at 0x%pK\n", | ||
155 | type, loc_orig); | ||
156 | return -ENOEXEC; | ||
157 | } | ||
158 | |||
159 | res = reloc_handlers_rel[type](loc_orig, loc_new, offset); | ||
160 | if (res) | ||
161 | return res; | ||
162 | } | ||
163 | |||
164 | return 0; | ||
165 | } | ||
166 | |||
167 | /* | ||
168 | * The exception table is filled in by the relocs tool after vmlinux is linked. | ||
169 | * It must be relocated separately since there will not be any relocation | ||
170 | * information for it filled in by the linker. | ||
171 | */ | ||
172 | static int __init relocate_exception_table(long offset) | ||
173 | { | ||
174 | unsigned long *etable_start, *etable_end, *e; | ||
175 | |||
176 | etable_start = RELOCATED(&__start___ex_table); | ||
177 | etable_end = RELOCATED(&__stop___ex_table); | ||
178 | |||
179 | for (e = etable_start; e < etable_end; e++) | ||
180 | *e += offset; | ||
181 | |||
182 | return 0; | ||
183 | } | ||
184 | |||
185 | #ifdef CONFIG_RANDOMIZE_BASE | ||
186 | |||
187 | static inline __init unsigned long rotate_xor(unsigned long hash, | ||
188 | const void *area, size_t size) | ||
189 | { | ||
190 | const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash)); | ||
191 | size_t diff, i; | ||
192 | |||
193 | diff = (void *)ptr - area; | ||
194 | if (unlikely(size < diff + sizeof(hash))) | ||
195 | return hash; | ||
196 | |||
197 | size = ALIGN_DOWN(size - diff, sizeof(hash)); | ||
198 | |||
199 | for (i = 0; i < size / sizeof(hash); i++) { | ||
200 | /* Rotate by odd number of bits and XOR. */ | ||
201 | hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7); | ||
202 | hash ^= ptr[i]; | ||
203 | } | ||
204 | |||
205 | return hash; | ||
206 | } | ||
207 | |||
208 | static inline __init unsigned long get_random_boot(void) | ||
209 | { | ||
210 | unsigned long entropy = random_get_entropy(); | ||
211 | unsigned long hash = 0; | ||
212 | |||
213 | /* Attempt to create a simple but unpredictable starting entropy. */ | ||
214 | hash = rotate_xor(hash, linux_banner, strlen(linux_banner)); | ||
215 | |||
216 | /* Add in any runtime entropy we can get */ | ||
217 | hash = rotate_xor(hash, &entropy, sizeof(entropy)); | ||
218 | |||
219 | #if defined(CONFIG_USE_OF) | ||
220 | /* Get any additional entropy passed in device tree */ | ||
221 | if (initial_boot_params) { | ||
222 | int node, len; | ||
223 | u64 *prop; | ||
224 | |||
225 | node = fdt_path_offset(initial_boot_params, "/chosen"); | ||
226 | if (node >= 0) { | ||
227 | prop = fdt_getprop_w(initial_boot_params, node, | ||
228 | "kaslr-seed", &len); | ||
229 | if (prop && (len == sizeof(u64))) | ||
230 | hash = rotate_xor(hash, prop, sizeof(*prop)); | ||
231 | } | ||
232 | } | ||
233 | #endif /* CONFIG_USE_OF */ | ||
234 | |||
235 | return hash; | ||
236 | } | ||
237 | |||
238 | static inline __init bool kaslr_disabled(void) | ||
239 | { | ||
240 | char *str; | ||
241 | |||
242 | #if defined(CONFIG_CMDLINE_BOOL) | ||
243 | const char *builtin_cmdline = CONFIG_CMDLINE; | ||
244 | |||
245 | str = strstr(builtin_cmdline, "nokaslr"); | ||
246 | if (str == builtin_cmdline || | ||
247 | (str > builtin_cmdline && *(str - 1) == ' ')) | ||
248 | return true; | ||
249 | #endif | ||
250 | str = strstr(arcs_cmdline, "nokaslr"); | ||
251 | if (str == arcs_cmdline || (str > arcs_cmdline && *(str - 1) == ' ')) | ||
252 | return true; | ||
253 | |||
254 | return false; | ||
255 | } | ||
256 | |||
257 | static inline void __init *determine_relocation_address(void) | ||
258 | { | ||
259 | /* Choose a new address for the kernel */ | ||
260 | unsigned long kernel_length; | ||
261 | void *dest = &_text; | ||
262 | unsigned long offset; | ||
263 | |||
264 | if (kaslr_disabled()) | ||
265 | return dest; | ||
266 | |||
267 | kernel_length = (long)_end - (long)(&_text); | ||
268 | |||
269 | offset = get_random_boot() << 16; | ||
270 | offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1); | ||
271 | if (offset < kernel_length) | ||
272 | offset += ALIGN(kernel_length, 0xffff); | ||
273 | |||
274 | return RELOCATED(dest); | ||
275 | } | ||
276 | |||
277 | #else | ||
278 | |||
279 | static inline void __init *determine_relocation_address(void) | ||
280 | { | ||
281 | /* | ||
282 | * Choose a new address for the kernel | ||
283 | * For now we'll hard code the destination | ||
284 | */ | ||
285 | return (void *)0xffffffff81000000; | ||
286 | } | ||
287 | |||
288 | #endif | ||
289 | |||
290 | static inline int __init relocation_addr_valid(void *loc_new) | ||
291 | { | ||
292 | if ((unsigned long)loc_new & 0x0000ffff) { | ||
293 | /* Inappropriately aligned new location */ | ||
294 | return 0; | ||
295 | } | ||
296 | if ((unsigned long)loc_new < (unsigned long)&_end) { | ||
297 | /* New location overlaps original kernel */ | ||
298 | return 0; | ||
299 | } | ||
300 | return 1; | ||
301 | } | ||
302 | |||
303 | void *__init relocate_kernel(void) | ||
304 | { | ||
305 | void *loc_new; | ||
306 | unsigned long kernel_length; | ||
307 | unsigned long bss_length; | ||
308 | long offset = 0; | ||
309 | int res = 1; | ||
310 | /* Default to original kernel entry point */ | ||
311 | void *kernel_entry = start_kernel; | ||
312 | void *fdt = NULL; | ||
313 | |||
314 | /* Get the command line */ | ||
315 | fw_init_cmdline(); | ||
316 | #if defined(CONFIG_USE_OF) | ||
317 | /* Deal with the device tree */ | ||
318 | fdt = plat_get_fdt(); | ||
319 | early_init_dt_scan(fdt); | ||
320 | if (boot_command_line[0]) { | ||
321 | /* Boot command line was passed in device tree */ | ||
322 | strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE); | ||
323 | } | ||
324 | #endif /* CONFIG_USE_OF */ | ||
325 | |||
326 | kernel_length = (long)(&_relocation_start) - (long)(&_text); | ||
327 | bss_length = (long)&__bss_stop - (long)&__bss_start; | ||
328 | |||
329 | loc_new = determine_relocation_address(); | ||
330 | |||
331 | /* Sanity check relocation address */ | ||
332 | if (relocation_addr_valid(loc_new)) | ||
333 | offset = (unsigned long)loc_new - (unsigned long)(&_text); | ||
334 | |||
335 | /* Reset the command line now so we don't end up with a duplicate */ | ||
336 | arcs_cmdline[0] = '\0'; | ||
337 | |||
338 | if (offset) { | ||
339 | void (*fdt_relocated_)(void *) = NULL; | ||
340 | #if defined(CONFIG_USE_OF) | ||
341 | unsigned long fdt_phys = virt_to_phys(fdt); | ||
342 | |||
343 | /* | ||
344 | * If built-in dtb is used then it will have been relocated | ||
345 | * during kernel _text relocation. If appended DTB is used | ||
346 | * then it will not be relocated, but it should remain | ||
347 | * intact in the original location. If dtb is loaded by | ||
348 | * the bootloader then it may need to be moved if it crosses | ||
349 | * the target memory area | ||
350 | */ | ||
351 | |||
352 | if (fdt_phys >= virt_to_phys(RELOCATED(&_text)) && | ||
353 | fdt_phys <= virt_to_phys(RELOCATED(&_end))) { | ||
354 | void *fdt_relocated = | ||
355 | RELOCATED(ALIGN((long)&_end, PAGE_SIZE)); | ||
356 | memcpy(fdt_relocated, fdt, fdt_totalsize(fdt)); | ||
357 | fdt = fdt_relocated; | ||
358 | fdt_relocated_ = RELOCATED(&plat_fdt_relocated); | ||
359 | } | ||
360 | #endif /* CONFIG_USE_OF */ | ||
361 | |||
362 | /* Copy the kernel to it's new location */ | ||
363 | memcpy(loc_new, &_text, kernel_length); | ||
364 | |||
365 | /* Perform relocations on the new kernel */ | ||
366 | res = do_relocations(&_text, loc_new, offset); | ||
367 | if (res < 0) | ||
368 | goto out; | ||
369 | |||
370 | /* Sync the caches ready for execution of new kernel */ | ||
371 | sync_icache(loc_new, kernel_length); | ||
372 | |||
373 | res = relocate_exception_table(offset); | ||
374 | if (res < 0) | ||
375 | goto out; | ||
376 | |||
377 | /* | ||
378 | * The original .bss has already been cleared, and | ||
379 | * some variables such as command line parameters | ||
380 | * stored to it so make a copy in the new location. | ||
381 | */ | ||
382 | memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length); | ||
383 | |||
384 | /* | ||
385 | * If fdt was stored outside of the kernel image and | ||
386 | * had to be moved then update platform's state data | ||
387 | * with the new fdt location | ||
388 | */ | ||
389 | if (fdt_relocated_) | ||
390 | fdt_relocated_(fdt); | ||
391 | |||
392 | /* | ||
393 | * Last chance for the platform to abort relocation. | ||
394 | * This may also be used by the platform to perform any | ||
395 | * initialisation required now that the new kernel is | ||
396 | * resident in memory and ready to be executed. | ||
397 | */ | ||
398 | if (plat_post_relocation(offset)) | ||
399 | goto out; | ||
400 | |||
401 | /* The current thread is now within the relocated image */ | ||
402 | __current_thread_info = RELOCATED(&init_thread_union); | ||
403 | |||
404 | /* Return the new kernel's entry point */ | ||
405 | kernel_entry = RELOCATED(start_kernel); | ||
406 | } | ||
407 | out: | ||
408 | return kernel_entry; | ||
409 | } | ||
410 | |||
411 | /* | ||
412 | * Show relocation information on panic. | ||
413 | */ | ||
414 | void show_kernel_relocation(const char *level) | ||
415 | { | ||
416 | unsigned long offset; | ||
417 | |||
418 | offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS); | ||
419 | |||
420 | if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) { | ||
421 | printk(level); | ||
422 | pr_cont("Kernel relocated by 0x%pK\n", (void *)offset); | ||
423 | pr_cont(" .text @ 0x%pK\n", _text); | ||
424 | pr_cont(" .data @ 0x%pK\n", _sdata); | ||
425 | pr_cont(" .bss @ 0x%pK\n", __bss_start); | ||
426 | } | ||
427 | } | ||
428 | |||
429 | static int kernel_location_notifier_fn(struct notifier_block *self, | ||
430 | unsigned long v, void *p) | ||
431 | { | ||
432 | show_kernel_relocation(KERN_EMERG); | ||
433 | return NOTIFY_DONE; | ||
434 | } | ||
435 | |||
436 | static struct notifier_block kernel_location_notifier = { | ||
437 | .notifier_call = kernel_location_notifier_fn | ||
438 | }; | ||
439 | |||
440 | static int __init register_kernel_offset_dumper(void) | ||
441 | { | ||
442 | atomic_notifier_chain_register(&panic_notifier_list, | ||
443 | &kernel_location_notifier); | ||
444 | return 0; | ||
445 | } | ||
446 | __initcall(register_kernel_offset_dumper); | ||
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S new file mode 100644 index 000000000..ac870893b --- /dev/null +++ b/arch/mips/kernel/relocate_kernel.S | |||
@@ -0,0 +1,190 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0-only */ | ||
2 | /* | ||
3 | * relocate_kernel.S for kexec | ||
4 | * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006 | ||
5 | */ | ||
6 | |||
7 | #include <asm/asm.h> | ||
8 | #include <asm/asmmacro.h> | ||
9 | #include <asm/regdef.h> | ||
10 | #include <asm/mipsregs.h> | ||
11 | #include <asm/stackframe.h> | ||
12 | #include <asm/addrspace.h> | ||
13 | |||
14 | LEAF(relocate_new_kernel) | ||
15 | PTR_L a0, arg0 | ||
16 | PTR_L a1, arg1 | ||
17 | PTR_L a2, arg2 | ||
18 | PTR_L a3, arg3 | ||
19 | |||
20 | PTR_L s0, kexec_indirection_page | ||
21 | PTR_L s1, kexec_start_address | ||
22 | |||
23 | process_entry: | ||
24 | PTR_L s2, (s0) | ||
25 | PTR_ADDIU s0, s0, SZREG | ||
26 | |||
27 | /* | ||
28 | * In case of a kdump/crash kernel, the indirection page is not | ||
29 | * populated as the kernel is directly copied to a reserved location | ||
30 | */ | ||
31 | beqz s2, done | ||
32 | |||
33 | /* destination page */ | ||
34 | and s3, s2, 0x1 | ||
35 | beq s3, zero, 1f | ||
36 | and s4, s2, ~0x1 /* store destination addr in s4 */ | ||
37 | b process_entry | ||
38 | |||
39 | 1: | ||
40 | /* indirection page, update s0 */ | ||
41 | and s3, s2, 0x2 | ||
42 | beq s3, zero, 1f | ||
43 | and s0, s2, ~0x2 | ||
44 | b process_entry | ||
45 | |||
46 | 1: | ||
47 | /* done page */ | ||
48 | and s3, s2, 0x4 | ||
49 | beq s3, zero, 1f | ||
50 | b done | ||
51 | 1: | ||
52 | /* source page */ | ||
53 | and s3, s2, 0x8 | ||
54 | beq s3, zero, process_entry | ||
55 | and s2, s2, ~0x8 | ||
56 | li s6, (1 << _PAGE_SHIFT) / SZREG | ||
57 | |||
58 | copy_word: | ||
59 | /* copy page word by word */ | ||
60 | REG_L s5, (s2) | ||
61 | REG_S s5, (s4) | ||
62 | PTR_ADDIU s4, s4, SZREG | ||
63 | PTR_ADDIU s2, s2, SZREG | ||
64 | LONG_ADDIU s6, s6, -1 | ||
65 | beq s6, zero, process_entry | ||
66 | b copy_word | ||
67 | b process_entry | ||
68 | |||
69 | done: | ||
70 | #ifdef CONFIG_SMP | ||
71 | /* kexec_flag reset is signal to other CPUs what kernel | ||
72 | was moved to it's location. Note - we need relocated address | ||
73 | of kexec_flag. */ | ||
74 | |||
75 | bal 1f | ||
76 | 1: move t1,ra; | ||
77 | PTR_LA t2,1b | ||
78 | PTR_LA t0,kexec_flag | ||
79 | PTR_SUB t0,t0,t2; | ||
80 | PTR_ADD t0,t1,t0; | ||
81 | LONG_S zero,(t0) | ||
82 | #endif | ||
83 | |||
84 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
85 | /* We need to flush I-cache before jumping to new kernel. | ||
86 | * Unfortunately, this code is cpu-specific. | ||
87 | */ | ||
88 | .set push | ||
89 | .set noreorder | ||
90 | syncw | ||
91 | syncw | ||
92 | synci 0($0) | ||
93 | .set pop | ||
94 | #else | ||
95 | sync | ||
96 | #endif | ||
97 | /* jump to kexec_start_address */ | ||
98 | j s1 | ||
99 | END(relocate_new_kernel) | ||
100 | |||
101 | #ifdef CONFIG_SMP | ||
102 | /* | ||
103 | * Other CPUs should wait until code is relocated and | ||
104 | * then start at entry (?) point. | ||
105 | */ | ||
106 | LEAF(kexec_smp_wait) | ||
107 | PTR_L a0, s_arg0 | ||
108 | PTR_L a1, s_arg1 | ||
109 | PTR_L a2, s_arg2 | ||
110 | PTR_L a3, s_arg3 | ||
111 | PTR_L s1, kexec_start_address | ||
112 | |||
113 | /* Non-relocated address works for args and kexec_start_address ( old | ||
114 | * kernel is not overwritten). But we need relocated address of | ||
115 | * kexec_flag. | ||
116 | */ | ||
117 | |||
118 | bal 1f | ||
119 | 1: move t1,ra; | ||
120 | PTR_LA t2,1b | ||
121 | PTR_LA t0,kexec_flag | ||
122 | PTR_SUB t0,t0,t2; | ||
123 | PTR_ADD t0,t1,t0; | ||
124 | |||
125 | 1: LONG_L s0, (t0) | ||
126 | bne s0, zero,1b | ||
127 | |||
128 | #ifdef CONFIG_CPU_CAVIUM_OCTEON | ||
129 | .set push | ||
130 | .set noreorder | ||
131 | synci 0($0) | ||
132 | .set pop | ||
133 | #else | ||
134 | sync | ||
135 | #endif | ||
136 | j s1 | ||
137 | END(kexec_smp_wait) | ||
138 | #endif | ||
139 | |||
140 | #ifdef __mips64 | ||
141 | /* all PTR's must be aligned to 8 byte in 64-bit mode */ | ||
142 | .align 3 | ||
143 | #endif | ||
144 | |||
145 | /* All parameters to new kernel are passed in registers a0-a3. | ||
146 | * kexec_args[0..3] are used to prepare register values. | ||
147 | */ | ||
148 | |||
149 | kexec_args: | ||
150 | EXPORT(kexec_args) | ||
151 | arg0: PTR 0x0 | ||
152 | arg1: PTR 0x0 | ||
153 | arg2: PTR 0x0 | ||
154 | arg3: PTR 0x0 | ||
155 | .size kexec_args,PTRSIZE*4 | ||
156 | |||
157 | #ifdef CONFIG_SMP | ||
158 | /* | ||
159 | * Secondary CPUs may have different kernel parameters in | ||
160 | * their registers a0-a3. secondary_kexec_args[0..3] are used | ||
161 | * to prepare register values. | ||
162 | */ | ||
163 | secondary_kexec_args: | ||
164 | EXPORT(secondary_kexec_args) | ||
165 | s_arg0: PTR 0x0 | ||
166 | s_arg1: PTR 0x0 | ||
167 | s_arg2: PTR 0x0 | ||
168 | s_arg3: PTR 0x0 | ||
169 | .size secondary_kexec_args,PTRSIZE*4 | ||
170 | kexec_flag: | ||
171 | LONG 0x1 | ||
172 | |||
173 | #endif | ||
174 | |||
175 | kexec_start_address: | ||
176 | EXPORT(kexec_start_address) | ||
177 | PTR 0x0 | ||
178 | .size kexec_start_address, PTRSIZE | ||
179 | |||
180 | kexec_indirection_page: | ||
181 | EXPORT(kexec_indirection_page) | ||
182 | PTR 0 | ||
183 | .size kexec_indirection_page, PTRSIZE | ||
184 | |||
185 | relocate_new_kernel_end: | ||
186 | |||
187 | relocate_new_kernel_size: | ||
188 | EXPORT(relocate_new_kernel_size) | ||
189 | PTR relocate_new_kernel_end - relocate_new_kernel | ||
190 | .size relocate_new_kernel_size, PTRSIZE | ||
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c new file mode 100644 index 000000000..6288780b7 --- /dev/null +++ b/arch/mips/kernel/reset.c | |||
@@ -0,0 +1,125 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2001, 06 by Ralf Baechle (ralf@linux-mips.org) | ||
7 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/export.h> | ||
11 | #include <linux/pm.h> | ||
12 | #include <linux/types.h> | ||
13 | #include <linux/reboot.h> | ||
14 | #include <linux/delay.h> | ||
15 | |||
16 | #include <asm/compiler.h> | ||
17 | #include <asm/idle.h> | ||
18 | #include <asm/mipsregs.h> | ||
19 | #include <asm/reboot.h> | ||
20 | |||
21 | /* | ||
22 | * Urgs ... Too many MIPS machines to handle this in a generic way. | ||
23 | * So handle all using function pointers to machine specific | ||
24 | * functions. | ||
25 | */ | ||
26 | void (*_machine_restart)(char *command); | ||
27 | void (*_machine_halt)(void); | ||
28 | void (*pm_power_off)(void); | ||
29 | |||
30 | EXPORT_SYMBOL(pm_power_off); | ||
31 | |||
32 | static void machine_hang(void) | ||
33 | { | ||
34 | /* | ||
35 | * We're hanging the system so we don't want to be interrupted anymore. | ||
36 | * Any interrupt handlers that ran would at best be useless & at worst | ||
37 | * go awry because the system isn't in a functional state. | ||
38 | */ | ||
39 | local_irq_disable(); | ||
40 | |||
41 | /* | ||
42 | * Mask all interrupts, giving us a better chance of remaining in the | ||
43 | * low power wait state. | ||
44 | */ | ||
45 | clear_c0_status(ST0_IM); | ||
46 | |||
47 | while (true) { | ||
48 | if (cpu_has_mips_r) { | ||
49 | /* | ||
50 | * We know that the wait instruction is supported so | ||
51 | * make use of it directly, leaving interrupts | ||
52 | * disabled. | ||
53 | */ | ||
54 | asm volatile( | ||
55 | ".set push\n\t" | ||
56 | ".set " MIPS_ISA_ARCH_LEVEL "\n\t" | ||
57 | "wait\n\t" | ||
58 | ".set pop"); | ||
59 | } else if (cpu_wait) { | ||
60 | /* | ||
61 | * Try the cpu_wait() callback. This isn't ideal since | ||
62 | * it'll re-enable interrupts, but that ought to be | ||
63 | * harmless given that they're all masked. | ||
64 | */ | ||
65 | cpu_wait(); | ||
66 | local_irq_disable(); | ||
67 | } else { | ||
68 | /* | ||
69 | * We're going to burn some power running round the | ||
70 | * loop, but we don't really have a choice. This isn't | ||
71 | * a path we should expect to run for long during | ||
72 | * typical use anyway. | ||
73 | */ | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * In most modern MIPS CPUs interrupts will cause the wait | ||
78 | * instruction to graduate even when disabled, and in some | ||
79 | * cases even when masked. In order to prevent a timer | ||
80 | * interrupt from continuously taking us out of the low power | ||
81 | * wait state, we clear any pending timer interrupt here. | ||
82 | */ | ||
83 | if (cpu_has_counter) | ||
84 | write_c0_compare(0); | ||
85 | } | ||
86 | } | ||
87 | |||
88 | void machine_restart(char *command) | ||
89 | { | ||
90 | if (_machine_restart) | ||
91 | _machine_restart(command); | ||
92 | |||
93 | #ifdef CONFIG_SMP | ||
94 | preempt_disable(); | ||
95 | smp_send_stop(); | ||
96 | #endif | ||
97 | do_kernel_restart(command); | ||
98 | mdelay(1000); | ||
99 | pr_emerg("Reboot failed -- System halted\n"); | ||
100 | machine_hang(); | ||
101 | } | ||
102 | |||
103 | void machine_halt(void) | ||
104 | { | ||
105 | if (_machine_halt) | ||
106 | _machine_halt(); | ||
107 | |||
108 | #ifdef CONFIG_SMP | ||
109 | preempt_disable(); | ||
110 | smp_send_stop(); | ||
111 | #endif | ||
112 | machine_hang(); | ||
113 | } | ||
114 | |||
115 | void machine_power_off(void) | ||
116 | { | ||
117 | if (pm_power_off) | ||
118 | pm_power_off(); | ||
119 | |||
120 | #ifdef CONFIG_SMP | ||
121 | preempt_disable(); | ||
122 | smp_send_stop(); | ||
123 | #endif | ||
124 | machine_hang(); | ||
125 | } | ||
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c new file mode 100644 index 000000000..d26dcc4b4 --- /dev/null +++ b/arch/mips/kernel/rtlx-cmp.c | |||
@@ -0,0 +1,122 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
8 | */ | ||
9 | #include <linux/device.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/err.h> | ||
12 | #include <linux/wait.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/smp.h> | ||
15 | |||
16 | #include <asm/mips_mt.h> | ||
17 | #include <asm/vpe.h> | ||
18 | #include <asm/rtlx.h> | ||
19 | |||
20 | static int major; | ||
21 | |||
22 | static void rtlx_interrupt(void) | ||
23 | { | ||
24 | int i; | ||
25 | struct rtlx_info *info; | ||
26 | struct rtlx_info **p = vpe_get_shared(aprp_cpu_index()); | ||
27 | |||
28 | if (p == NULL || *p == NULL) | ||
29 | return; | ||
30 | |||
31 | info = *p; | ||
32 | |||
33 | if (info->ap_int_pending == 1 && smp_processor_id() == 0) { | ||
34 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
35 | wake_up(&channel_wqs[i].lx_queue); | ||
36 | wake_up(&channel_wqs[i].rt_queue); | ||
37 | } | ||
38 | info->ap_int_pending = 0; | ||
39 | } | ||
40 | } | ||
41 | |||
42 | void _interrupt_sp(void) | ||
43 | { | ||
44 | smp_send_reschedule(aprp_cpu_index()); | ||
45 | } | ||
46 | |||
47 | int __init rtlx_module_init(void) | ||
48 | { | ||
49 | struct device *dev; | ||
50 | int i, err; | ||
51 | |||
52 | if (!cpu_has_mipsmt) { | ||
53 | pr_warn("VPE loader: not a MIPS MT capable processor\n"); | ||
54 | return -ENODEV; | ||
55 | } | ||
56 | |||
57 | if (num_possible_cpus() - aprp_cpu_index() < 1) { | ||
58 | pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n" | ||
59 | "Pass maxcpus=<n> argument as kernel argument\n"); | ||
60 | |||
61 | return -ENODEV; | ||
62 | } | ||
63 | |||
64 | major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops); | ||
65 | if (major < 0) { | ||
66 | pr_err("rtlx_module_init: unable to register device\n"); | ||
67 | return major; | ||
68 | } | ||
69 | |||
70 | /* initialise the wait queues */ | ||
71 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
72 | init_waitqueue_head(&channel_wqs[i].rt_queue); | ||
73 | init_waitqueue_head(&channel_wqs[i].lx_queue); | ||
74 | atomic_set(&channel_wqs[i].in_open, 0); | ||
75 | mutex_init(&channel_wqs[i].mutex); | ||
76 | |||
77 | dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, | ||
78 | "%s%d", RTLX_MODULE_NAME, i); | ||
79 | if (IS_ERR(dev)) { | ||
80 | while (i--) | ||
81 | device_destroy(mt_class, MKDEV(major, i)); | ||
82 | |||
83 | err = PTR_ERR(dev); | ||
84 | goto out_chrdev; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* set up notifiers */ | ||
89 | rtlx_notify.start = rtlx_starting; | ||
90 | rtlx_notify.stop = rtlx_stopping; | ||
91 | vpe_notify(aprp_cpu_index(), &rtlx_notify); | ||
92 | |||
93 | if (cpu_has_vint) { | ||
94 | aprp_hook = rtlx_interrupt; | ||
95 | } else { | ||
96 | pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); | ||
97 | err = -ENODEV; | ||
98 | goto out_class; | ||
99 | } | ||
100 | |||
101 | return 0; | ||
102 | |||
103 | out_class: | ||
104 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
105 | device_destroy(mt_class, MKDEV(major, i)); | ||
106 | out_chrdev: | ||
107 | unregister_chrdev(major, RTLX_MODULE_NAME); | ||
108 | |||
109 | return err; | ||
110 | } | ||
111 | |||
112 | void __exit rtlx_module_exit(void) | ||
113 | { | ||
114 | int i; | ||
115 | |||
116 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
117 | device_destroy(mt_class, MKDEV(major, i)); | ||
118 | |||
119 | unregister_chrdev(major, RTLX_MODULE_NAME); | ||
120 | |||
121 | aprp_hook = NULL; | ||
122 | } | ||
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c new file mode 100644 index 000000000..38c6925a1 --- /dev/null +++ b/arch/mips/kernel/rtlx-mt.c | |||
@@ -0,0 +1,147 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
8 | */ | ||
9 | #include <linux/device.h> | ||
10 | #include <linux/fs.h> | ||
11 | #include <linux/err.h> | ||
12 | #include <linux/wait.h> | ||
13 | #include <linux/sched.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/irq.h> | ||
16 | |||
17 | #include <asm/mips_mt.h> | ||
18 | #include <asm/vpe.h> | ||
19 | #include <asm/rtlx.h> | ||
20 | |||
21 | static int major; | ||
22 | |||
23 | static void rtlx_dispatch(void) | ||
24 | { | ||
25 | if (read_c0_cause() & read_c0_status() & C_SW0) | ||
26 | do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); | ||
27 | } | ||
28 | |||
29 | /* | ||
30 | * Interrupt handler may be called before rtlx_init has otherwise had | ||
31 | * a chance to run. | ||
32 | */ | ||
33 | static irqreturn_t rtlx_interrupt(int irq, void *dev_id) | ||
34 | { | ||
35 | unsigned int vpeflags; | ||
36 | unsigned long flags; | ||
37 | int i; | ||
38 | |||
39 | local_irq_save(flags); | ||
40 | vpeflags = dvpe(); | ||
41 | set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); | ||
42 | irq_enable_hazard(); | ||
43 | evpe(vpeflags); | ||
44 | local_irq_restore(flags); | ||
45 | |||
46 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
47 | wake_up(&channel_wqs[i].lx_queue); | ||
48 | wake_up(&channel_wqs[i].rt_queue); | ||
49 | } | ||
50 | |||
51 | return IRQ_HANDLED; | ||
52 | } | ||
53 | |||
54 | static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; | ||
55 | |||
56 | void _interrupt_sp(void) | ||
57 | { | ||
58 | unsigned long flags; | ||
59 | |||
60 | local_irq_save(flags); | ||
61 | dvpe(); | ||
62 | settc(1); | ||
63 | write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0); | ||
64 | evpe(EVPE_ENABLE); | ||
65 | local_irq_restore(flags); | ||
66 | } | ||
67 | |||
68 | int __init rtlx_module_init(void) | ||
69 | { | ||
70 | struct device *dev; | ||
71 | int i, err; | ||
72 | |||
73 | if (!cpu_has_mipsmt) { | ||
74 | pr_warn("VPE loader: not a MIPS MT capable processor\n"); | ||
75 | return -ENODEV; | ||
76 | } | ||
77 | |||
78 | if (aprp_cpu_index() == 0) { | ||
79 | pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n" | ||
80 | "Pass maxtcs=<n> argument as kernel argument\n"); | ||
81 | |||
82 | return -ENODEV; | ||
83 | } | ||
84 | |||
85 | major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops); | ||
86 | if (major < 0) { | ||
87 | pr_err("rtlx_module_init: unable to register device\n"); | ||
88 | return major; | ||
89 | } | ||
90 | |||
91 | /* initialise the wait queues */ | ||
92 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
93 | init_waitqueue_head(&channel_wqs[i].rt_queue); | ||
94 | init_waitqueue_head(&channel_wqs[i].lx_queue); | ||
95 | atomic_set(&channel_wqs[i].in_open, 0); | ||
96 | mutex_init(&channel_wqs[i].mutex); | ||
97 | |||
98 | dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, | ||
99 | "%s%d", RTLX_MODULE_NAME, i); | ||
100 | if (IS_ERR(dev)) { | ||
101 | while (i--) | ||
102 | device_destroy(mt_class, MKDEV(major, i)); | ||
103 | |||
104 | err = PTR_ERR(dev); | ||
105 | goto out_chrdev; | ||
106 | } | ||
107 | } | ||
108 | |||
109 | /* set up notifiers */ | ||
110 | rtlx_notify.start = rtlx_starting; | ||
111 | rtlx_notify.stop = rtlx_stopping; | ||
112 | vpe_notify(aprp_cpu_index(), &rtlx_notify); | ||
113 | |||
114 | if (cpu_has_vint) { | ||
115 | aprp_hook = rtlx_dispatch; | ||
116 | } else { | ||
117 | pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); | ||
118 | err = -ENODEV; | ||
119 | goto out_class; | ||
120 | } | ||
121 | |||
122 | err = request_irq(rtlx_irq_num, rtlx_interrupt, 0, "RTLX", rtlx); | ||
123 | if (err) | ||
124 | goto out_class; | ||
125 | |||
126 | return 0; | ||
127 | |||
128 | out_class: | ||
129 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
130 | device_destroy(mt_class, MKDEV(major, i)); | ||
131 | out_chrdev: | ||
132 | unregister_chrdev(major, RTLX_MODULE_NAME); | ||
133 | |||
134 | return err; | ||
135 | } | ||
136 | |||
137 | void __exit rtlx_module_exit(void) | ||
138 | { | ||
139 | int i; | ||
140 | |||
141 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
142 | device_destroy(mt_class, MKDEV(major, i)); | ||
143 | |||
144 | unregister_chrdev(major, RTLX_MODULE_NAME); | ||
145 | |||
146 | aprp_hook = NULL; | ||
147 | } | ||
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c new file mode 100644 index 000000000..18c509c59 --- /dev/null +++ b/arch/mips/kernel/rtlx.c | |||
@@ -0,0 +1,409 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) | ||
8 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/syscalls.h> | ||
13 | #include <linux/moduleloader.h> | ||
14 | #include <linux/atomic.h> | ||
15 | #include <linux/sched/signal.h> | ||
16 | |||
17 | #include <asm/mipsmtregs.h> | ||
18 | #include <asm/mips_mt.h> | ||
19 | #include <asm/processor.h> | ||
20 | #include <asm/rtlx.h> | ||
21 | #include <asm/setup.h> | ||
22 | #include <asm/vpe.h> | ||
23 | |||
24 | static int sp_stopping; | ||
25 | struct rtlx_info *rtlx; | ||
26 | struct chan_waitqueues channel_wqs[RTLX_CHANNELS]; | ||
27 | struct vpe_notifications rtlx_notify; | ||
28 | void (*aprp_hook)(void) = NULL; | ||
29 | EXPORT_SYMBOL(aprp_hook); | ||
30 | |||
31 | static void __used dump_rtlx(void) | ||
32 | { | ||
33 | int i; | ||
34 | |||
35 | pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state); | ||
36 | |||
37 | for (i = 0; i < RTLX_CHANNELS; i++) { | ||
38 | struct rtlx_channel *chan = &rtlx->channel[i]; | ||
39 | |||
40 | pr_info(" rt_state %d lx_state %d buffer_size %d\n", | ||
41 | chan->rt_state, chan->lx_state, chan->buffer_size); | ||
42 | |||
43 | pr_info(" rt_read %d rt_write %d\n", | ||
44 | chan->rt_read, chan->rt_write); | ||
45 | |||
46 | pr_info(" lx_read %d lx_write %d\n", | ||
47 | chan->lx_read, chan->lx_write); | ||
48 | |||
49 | pr_info(" rt_buffer <%s>\n", chan->rt_buffer); | ||
50 | pr_info(" lx_buffer <%s>\n", chan->lx_buffer); | ||
51 | } | ||
52 | } | ||
53 | |||
54 | /* call when we have the address of the shared structure from the SP side. */ | ||
55 | static int rtlx_init(struct rtlx_info *rtlxi) | ||
56 | { | ||
57 | if (rtlxi->id != RTLX_ID) { | ||
58 | pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id); | ||
59 | return -ENOEXEC; | ||
60 | } | ||
61 | |||
62 | rtlx = rtlxi; | ||
63 | |||
64 | return 0; | ||
65 | } | ||
66 | |||
67 | /* notifications */ | ||
68 | void rtlx_starting(int vpe) | ||
69 | { | ||
70 | int i; | ||
71 | sp_stopping = 0; | ||
72 | |||
73 | /* force a reload of rtlx */ | ||
74 | rtlx = NULL; | ||
75 | |||
76 | /* wake up any sleeping rtlx_open's */ | ||
77 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
78 | wake_up_interruptible(&channel_wqs[i].lx_queue); | ||
79 | } | ||
80 | |||
81 | void rtlx_stopping(int vpe) | ||
82 | { | ||
83 | int i; | ||
84 | |||
85 | sp_stopping = 1; | ||
86 | for (i = 0; i < RTLX_CHANNELS; i++) | ||
87 | wake_up_interruptible(&channel_wqs[i].lx_queue); | ||
88 | } | ||
89 | |||
90 | |||
91 | int rtlx_open(int index, int can_sleep) | ||
92 | { | ||
93 | struct rtlx_info **p; | ||
94 | struct rtlx_channel *chan; | ||
95 | enum rtlx_state state; | ||
96 | int ret = 0; | ||
97 | |||
98 | if (index >= RTLX_CHANNELS) { | ||
99 | pr_debug("rtlx_open index out of range\n"); | ||
100 | return -ENOSYS; | ||
101 | } | ||
102 | |||
103 | if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { | ||
104 | pr_debug("rtlx_open channel %d already opened\n", index); | ||
105 | ret = -EBUSY; | ||
106 | goto out_fail; | ||
107 | } | ||
108 | |||
109 | if (rtlx == NULL) { | ||
110 | p = vpe_get_shared(aprp_cpu_index()); | ||
111 | if (p == NULL) { | ||
112 | if (can_sleep) { | ||
113 | ret = __wait_event_interruptible( | ||
114 | channel_wqs[index].lx_queue, | ||
115 | (p = vpe_get_shared(aprp_cpu_index()))); | ||
116 | if (ret) | ||
117 | goto out_fail; | ||
118 | } else { | ||
119 | pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n"); | ||
120 | ret = -ENOSYS; | ||
121 | goto out_fail; | ||
122 | } | ||
123 | } | ||
124 | |||
125 | smp_rmb(); | ||
126 | if (*p == NULL) { | ||
127 | if (can_sleep) { | ||
128 | DEFINE_WAIT(wait); | ||
129 | |||
130 | for (;;) { | ||
131 | prepare_to_wait( | ||
132 | &channel_wqs[index].lx_queue, | ||
133 | &wait, TASK_INTERRUPTIBLE); | ||
134 | smp_rmb(); | ||
135 | if (*p != NULL) | ||
136 | break; | ||
137 | if (!signal_pending(current)) { | ||
138 | schedule(); | ||
139 | continue; | ||
140 | } | ||
141 | ret = -ERESTARTSYS; | ||
142 | goto out_fail; | ||
143 | } | ||
144 | finish_wait(&channel_wqs[index].lx_queue, | ||
145 | &wait); | ||
146 | } else { | ||
147 | pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n"); | ||
148 | ret = -ENOSYS; | ||
149 | goto out_fail; | ||
150 | } | ||
151 | } | ||
152 | |||
153 | if ((unsigned int)*p < KSEG0) { | ||
154 | pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n", | ||
155 | (int)*p); | ||
156 | ret = -ENOSYS; | ||
157 | goto out_fail; | ||
158 | } | ||
159 | |||
160 | ret = rtlx_init(*p); | ||
161 | if (ret < 0) | ||
162 | goto out_ret; | ||
163 | } | ||
164 | |||
165 | chan = &rtlx->channel[index]; | ||
166 | |||
167 | state = xchg(&chan->lx_state, RTLX_STATE_OPENED); | ||
168 | if (state == RTLX_STATE_OPENED) { | ||
169 | ret = -EBUSY; | ||
170 | goto out_fail; | ||
171 | } | ||
172 | |||
173 | out_fail: | ||
174 | smp_mb(); | ||
175 | atomic_dec(&channel_wqs[index].in_open); | ||
176 | smp_mb(); | ||
177 | |||
178 | out_ret: | ||
179 | return ret; | ||
180 | } | ||
181 | |||
182 | int rtlx_release(int index) | ||
183 | { | ||
184 | if (rtlx == NULL) { | ||
185 | pr_err("rtlx_release() with null rtlx\n"); | ||
186 | return 0; | ||
187 | } | ||
188 | rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; | ||
189 | return 0; | ||
190 | } | ||
191 | |||
192 | unsigned int rtlx_read_poll(int index, int can_sleep) | ||
193 | { | ||
194 | struct rtlx_channel *chan; | ||
195 | |||
196 | if (rtlx == NULL) | ||
197 | return 0; | ||
198 | |||
199 | chan = &rtlx->channel[index]; | ||
200 | |||
201 | /* data available to read? */ | ||
202 | if (chan->lx_read == chan->lx_write) { | ||
203 | if (can_sleep) { | ||
204 | int ret = __wait_event_interruptible( | ||
205 | channel_wqs[index].lx_queue, | ||
206 | (chan->lx_read != chan->lx_write) || | ||
207 | sp_stopping); | ||
208 | if (ret) | ||
209 | return ret; | ||
210 | |||
211 | if (sp_stopping) | ||
212 | return 0; | ||
213 | } else | ||
214 | return 0; | ||
215 | } | ||
216 | |||
217 | return (chan->lx_write + chan->buffer_size - chan->lx_read) | ||
218 | % chan->buffer_size; | ||
219 | } | ||
220 | |||
221 | static inline int write_spacefree(int read, int write, int size) | ||
222 | { | ||
223 | if (read == write) { | ||
224 | /* | ||
225 | * Never fill the buffer completely, so indexes are always | ||
226 | * equal if empty and only empty, or !equal if data available | ||
227 | */ | ||
228 | return size - 1; | ||
229 | } | ||
230 | |||
231 | return ((read + size - write) % size) - 1; | ||
232 | } | ||
233 | |||
234 | unsigned int rtlx_write_poll(int index) | ||
235 | { | ||
236 | struct rtlx_channel *chan = &rtlx->channel[index]; | ||
237 | |||
238 | return write_spacefree(chan->rt_read, chan->rt_write, | ||
239 | chan->buffer_size); | ||
240 | } | ||
241 | |||
242 | ssize_t rtlx_read(int index, void __user *buff, size_t count) | ||
243 | { | ||
244 | size_t lx_write, fl = 0L; | ||
245 | struct rtlx_channel *lx; | ||
246 | unsigned long failed; | ||
247 | |||
248 | if (rtlx == NULL) | ||
249 | return -ENOSYS; | ||
250 | |||
251 | lx = &rtlx->channel[index]; | ||
252 | |||
253 | mutex_lock(&channel_wqs[index].mutex); | ||
254 | smp_rmb(); | ||
255 | lx_write = lx->lx_write; | ||
256 | |||
257 | /* find out how much in total */ | ||
258 | count = min(count, | ||
259 | (size_t)(lx_write + lx->buffer_size - lx->lx_read) | ||
260 | % lx->buffer_size); | ||
261 | |||
262 | /* then how much from the read pointer onwards */ | ||
263 | fl = min(count, (size_t)lx->buffer_size - lx->lx_read); | ||
264 | |||
265 | failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); | ||
266 | if (failed) | ||
267 | goto out; | ||
268 | |||
269 | /* and if there is anything left at the beginning of the buffer */ | ||
270 | if (count - fl) | ||
271 | failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); | ||
272 | |||
273 | out: | ||
274 | count -= failed; | ||
275 | |||
276 | smp_wmb(); | ||
277 | lx->lx_read = (lx->lx_read + count) % lx->buffer_size; | ||
278 | smp_wmb(); | ||
279 | mutex_unlock(&channel_wqs[index].mutex); | ||
280 | |||
281 | return count; | ||
282 | } | ||
283 | |||
284 | ssize_t rtlx_write(int index, const void __user *buffer, size_t count) | ||
285 | { | ||
286 | struct rtlx_channel *rt; | ||
287 | unsigned long failed; | ||
288 | size_t rt_read; | ||
289 | size_t fl; | ||
290 | |||
291 | if (rtlx == NULL) | ||
292 | return -ENOSYS; | ||
293 | |||
294 | rt = &rtlx->channel[index]; | ||
295 | |||
296 | mutex_lock(&channel_wqs[index].mutex); | ||
297 | smp_rmb(); | ||
298 | rt_read = rt->rt_read; | ||
299 | |||
300 | /* total number of bytes to copy */ | ||
301 | count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write, | ||
302 | rt->buffer_size)); | ||
303 | |||
304 | /* first bit from write pointer to the end of the buffer, or count */ | ||
305 | fl = min(count, (size_t) rt->buffer_size - rt->rt_write); | ||
306 | |||
307 | failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); | ||
308 | if (failed) | ||
309 | goto out; | ||
310 | |||
311 | /* if there's any left copy to the beginning of the buffer */ | ||
312 | if (count - fl) | ||
313 | failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); | ||
314 | |||
315 | out: | ||
316 | count -= failed; | ||
317 | |||
318 | smp_wmb(); | ||
319 | rt->rt_write = (rt->rt_write + count) % rt->buffer_size; | ||
320 | smp_wmb(); | ||
321 | mutex_unlock(&channel_wqs[index].mutex); | ||
322 | |||
323 | _interrupt_sp(); | ||
324 | |||
325 | return count; | ||
326 | } | ||
327 | |||
328 | |||
329 | static int file_open(struct inode *inode, struct file *filp) | ||
330 | { | ||
331 | return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1); | ||
332 | } | ||
333 | |||
334 | static int file_release(struct inode *inode, struct file *filp) | ||
335 | { | ||
336 | return rtlx_release(iminor(inode)); | ||
337 | } | ||
338 | |||
339 | static __poll_t file_poll(struct file *file, poll_table *wait) | ||
340 | { | ||
341 | int minor = iminor(file_inode(file)); | ||
342 | __poll_t mask = 0; | ||
343 | |||
344 | poll_wait(file, &channel_wqs[minor].rt_queue, wait); | ||
345 | poll_wait(file, &channel_wqs[minor].lx_queue, wait); | ||
346 | |||
347 | if (rtlx == NULL) | ||
348 | return 0; | ||
349 | |||
350 | /* data available to read? */ | ||
351 | if (rtlx_read_poll(minor, 0)) | ||
352 | mask |= EPOLLIN | EPOLLRDNORM; | ||
353 | |||
354 | /* space to write */ | ||
355 | if (rtlx_write_poll(minor)) | ||
356 | mask |= EPOLLOUT | EPOLLWRNORM; | ||
357 | |||
358 | return mask; | ||
359 | } | ||
360 | |||
361 | static ssize_t file_read(struct file *file, char __user *buffer, size_t count, | ||
362 | loff_t *ppos) | ||
363 | { | ||
364 | int minor = iminor(file_inode(file)); | ||
365 | |||
366 | /* data available? */ | ||
367 | if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) | ||
368 | return 0; /* -EAGAIN makes 'cat' whine */ | ||
369 | |||
370 | return rtlx_read(minor, buffer, count); | ||
371 | } | ||
372 | |||
373 | static ssize_t file_write(struct file *file, const char __user *buffer, | ||
374 | size_t count, loff_t *ppos) | ||
375 | { | ||
376 | int minor = iminor(file_inode(file)); | ||
377 | |||
378 | /* any space left... */ | ||
379 | if (!rtlx_write_poll(minor)) { | ||
380 | int ret; | ||
381 | |||
382 | if (file->f_flags & O_NONBLOCK) | ||
383 | return -EAGAIN; | ||
384 | |||
385 | ret = __wait_event_interruptible(channel_wqs[minor].rt_queue, | ||
386 | rtlx_write_poll(minor)); | ||
387 | if (ret) | ||
388 | return ret; | ||
389 | } | ||
390 | |||
391 | return rtlx_write(minor, buffer, count); | ||
392 | } | ||
393 | |||
394 | const struct file_operations rtlx_fops = { | ||
395 | .owner = THIS_MODULE, | ||
396 | .open = file_open, | ||
397 | .release = file_release, | ||
398 | .write = file_write, | ||
399 | .read = file_read, | ||
400 | .poll = file_poll, | ||
401 | .llseek = noop_llseek, | ||
402 | }; | ||
403 | |||
404 | module_init(rtlx_module_init); | ||
405 | module_exit(rtlx_module_exit); | ||
406 | |||
407 | MODULE_DESCRIPTION("MIPS RTLX"); | ||
408 | MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); | ||
409 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S new file mode 100644 index 000000000..b449b6866 --- /dev/null +++ b/arch/mips/kernel/scall32-o32.S | |||
@@ -0,0 +1,225 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org> | ||
7 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
8 | * Copyright (C) 2004 Thiemo Seufer | ||
9 | * Copyright (C) 2014 Imagination Technologies Ltd. | ||
10 | */ | ||
11 | #include <linux/errno.h> | ||
12 | #include <asm/asm.h> | ||
13 | #include <asm/asmmacro.h> | ||
14 | #include <asm/irqflags.h> | ||
15 | #include <asm/mipsregs.h> | ||
16 | #include <asm/regdef.h> | ||
17 | #include <asm/stackframe.h> | ||
18 | #include <asm/isadep.h> | ||
19 | #include <asm/sysmips.h> | ||
20 | #include <asm/thread_info.h> | ||
21 | #include <asm/unistd.h> | ||
22 | #include <asm/war.h> | ||
23 | #include <asm/asm-offsets.h> | ||
24 | |||
25 | .align 5 | ||
26 | NESTED(handle_sys, PT_SIZE, sp) | ||
27 | .set noat | ||
28 | SAVE_SOME | ||
29 | TRACE_IRQS_ON_RELOAD | ||
30 | STI | ||
31 | .set at | ||
32 | |||
33 | lw t1, PT_EPC(sp) # skip syscall on return | ||
34 | |||
35 | addiu t1, 4 # skip to next instruction | ||
36 | sw t1, PT_EPC(sp) | ||
37 | |||
38 | sw a3, PT_R26(sp) # save a3 for syscall restarting | ||
39 | |||
40 | /* | ||
41 | * More than four arguments. Try to deal with it by copying the | ||
42 | * stack arguments from the user stack to the kernel stack. | ||
43 | * This Sucks (TM). | ||
44 | */ | ||
45 | lw t0, PT_R29(sp) # get old user stack pointer | ||
46 | |||
47 | /* | ||
48 | * We intentionally keep the kernel stack a little below the top of | ||
49 | * userspace so we don't have to do a slower byte accurate check here. | ||
50 | */ | ||
51 | lw t5, TI_ADDR_LIMIT($28) | ||
52 | addu t4, t0, 32 | ||
53 | and t5, t4 | ||
54 | bltz t5, bad_stack # -> sp is bad | ||
55 | |||
56 | /* | ||
57 | * Ok, copy the args from the luser stack to the kernel stack. | ||
58 | */ | ||
59 | |||
60 | .set push | ||
61 | .set noreorder | ||
62 | .set nomacro | ||
63 | |||
64 | load_a4: user_lw(t5, 16(t0)) # argument #5 from usp | ||
65 | load_a5: user_lw(t6, 20(t0)) # argument #6 from usp | ||
66 | load_a6: user_lw(t7, 24(t0)) # argument #7 from usp | ||
67 | load_a7: user_lw(t8, 28(t0)) # argument #8 from usp | ||
68 | loads_done: | ||
69 | |||
70 | sw t5, 16(sp) # argument #5 to ksp | ||
71 | sw t6, 20(sp) # argument #6 to ksp | ||
72 | sw t7, 24(sp) # argument #7 to ksp | ||
73 | sw t8, 28(sp) # argument #8 to ksp | ||
74 | .set pop | ||
75 | |||
76 | .section __ex_table,"a" | ||
77 | PTR load_a4, bad_stack_a4 | ||
78 | PTR load_a5, bad_stack_a5 | ||
79 | PTR load_a6, bad_stack_a6 | ||
80 | PTR load_a7, bad_stack_a7 | ||
81 | .previous | ||
82 | |||
83 | lw t0, TI_FLAGS($28) # syscall tracing enabled? | ||
84 | li t1, _TIF_WORK_SYSCALL_ENTRY | ||
85 | and t0, t1 | ||
86 | bnez t0, syscall_trace_entry # -> yes | ||
87 | syscall_common: | ||
88 | subu v0, v0, __NR_O32_Linux # check syscall number | ||
89 | sltiu t0, v0, __NR_O32_Linux_syscalls | ||
90 | beqz t0, illegal_syscall | ||
91 | |||
92 | sll t0, v0, 2 | ||
93 | la t1, sys_call_table | ||
94 | addu t1, t0 | ||
95 | lw t2, (t1) # syscall routine | ||
96 | |||
97 | beqz t2, illegal_syscall | ||
98 | |||
99 | jalr t2 # Do The Real Thing (TM) | ||
100 | |||
101 | li t0, -EMAXERRNO - 1 # error? | ||
102 | sltu t0, t0, v0 | ||
103 | sw t0, PT_R7(sp) # set error flag | ||
104 | beqz t0, 1f | ||
105 | |||
106 | lw t1, PT_R2(sp) # syscall number | ||
107 | negu v0 # error | ||
108 | sw t1, PT_R0(sp) # save it for syscall restarting | ||
109 | 1: sw v0, PT_R2(sp) # result | ||
110 | |||
111 | o32_syscall_exit: | ||
112 | j syscall_exit_partial | ||
113 | |||
114 | /* ------------------------------------------------------------------------ */ | ||
115 | |||
116 | syscall_trace_entry: | ||
117 | SAVE_STATIC | ||
118 | move a0, sp | ||
119 | |||
120 | /* | ||
121 | * syscall number is in v0 unless we called syscall(__NR_###) | ||
122 | * where the real syscall number is in a0 | ||
123 | */ | ||
124 | move a1, v0 | ||
125 | subu t2, v0, __NR_O32_Linux | ||
126 | bnez t2, 1f /* __NR_syscall at offset 0 */ | ||
127 | lw a1, PT_R4(sp) | ||
128 | |||
129 | 1: jal syscall_trace_enter | ||
130 | |||
131 | bltz v0, 1f # seccomp failed? Skip syscall | ||
132 | |||
133 | RESTORE_STATIC | ||
134 | lw v0, PT_R2(sp) # Restore syscall (maybe modified) | ||
135 | lw a0, PT_R4(sp) # Restore argument registers | ||
136 | lw a1, PT_R5(sp) | ||
137 | lw a2, PT_R6(sp) | ||
138 | lw a3, PT_R7(sp) | ||
139 | j syscall_common | ||
140 | |||
141 | 1: j syscall_exit | ||
142 | |||
143 | /* ------------------------------------------------------------------------ */ | ||
144 | |||
145 | /* | ||
146 | * Our open-coded access area sanity test for the stack pointer | ||
147 | * failed. We probably should handle this case a bit more drastic. | ||
148 | */ | ||
149 | bad_stack: | ||
150 | li v0, EFAULT | ||
151 | sw v0, PT_R2(sp) | ||
152 | li t0, 1 # set error flag | ||
153 | sw t0, PT_R7(sp) | ||
154 | j o32_syscall_exit | ||
155 | |||
156 | bad_stack_a4: | ||
157 | li t5, 0 | ||
158 | b load_a5 | ||
159 | |||
160 | bad_stack_a5: | ||
161 | li t6, 0 | ||
162 | b load_a6 | ||
163 | |||
164 | bad_stack_a6: | ||
165 | li t7, 0 | ||
166 | b load_a7 | ||
167 | |||
168 | bad_stack_a7: | ||
169 | li t8, 0 | ||
170 | b loads_done | ||
171 | |||
172 | /* | ||
173 | * The system call does not exist in this kernel | ||
174 | */ | ||
175 | illegal_syscall: | ||
176 | li v0, ENOSYS # error | ||
177 | sw v0, PT_R2(sp) | ||
178 | li t0, 1 # set error flag | ||
179 | sw t0, PT_R7(sp) | ||
180 | j o32_syscall_exit | ||
181 | END(handle_sys) | ||
182 | |||
183 | LEAF(sys_syscall) | ||
184 | subu t0, a0, __NR_O32_Linux # check syscall number | ||
185 | sltiu v0, t0, __NR_O32_Linux_syscalls | ||
186 | beqz t0, einval # do not recurse | ||
187 | sll t1, t0, 2 | ||
188 | beqz v0, einval | ||
189 | lw t2, sys_call_table(t1) # syscall routine | ||
190 | |||
191 | move a0, a1 # shift argument registers | ||
192 | move a1, a2 | ||
193 | move a2, a3 | ||
194 | lw a3, 16(sp) | ||
195 | lw t4, 20(sp) | ||
196 | lw t5, 24(sp) | ||
197 | lw t6, 28(sp) | ||
198 | sw t4, 16(sp) | ||
199 | sw t5, 20(sp) | ||
200 | sw t6, 24(sp) | ||
201 | jr t2 | ||
202 | /* Unreached */ | ||
203 | |||
204 | einval: li v0, -ENOSYS | ||
205 | jr ra | ||
206 | END(sys_syscall) | ||
207 | |||
208 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
209 | /* | ||
210 | * For FPU affinity scheduling on MIPS MT processors, we need to | ||
211 | * intercept sys_sched_xxxaffinity() calls until we get a proper hook | ||
212 | * in kernel/sched/core.c. Considered only temporary we only support | ||
213 | * these hooks for the 32-bit kernel - there is no MIPS64 MT processor | ||
214 | * atm. | ||
215 | */ | ||
216 | #define sys_sched_setaffinity mipsmt_sys_sched_setaffinity | ||
217 | #define sys_sched_getaffinity mipsmt_sys_sched_getaffinity | ||
218 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
219 | |||
220 | #define __SYSCALL(nr, entry) PTR entry | ||
221 | .align 2 | ||
222 | .type sys_call_table, @object | ||
223 | EXPORT(sys_call_table) | ||
224 | #include <asm/syscall_table_32_o32.h> | ||
225 | #undef __SYSCALL | ||
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S new file mode 100644 index 000000000..35d8c86b1 --- /dev/null +++ b/arch/mips/kernel/scall64-n32.S | |||
@@ -0,0 +1,108 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | #include <linux/errno.h> | ||
11 | #include <asm/asm.h> | ||
12 | #include <asm/asmmacro.h> | ||
13 | #include <asm/irqflags.h> | ||
14 | #include <asm/mipsregs.h> | ||
15 | #include <asm/regdef.h> | ||
16 | #include <asm/stackframe.h> | ||
17 | #include <asm/thread_info.h> | ||
18 | #include <asm/unistd.h> | ||
19 | |||
20 | #ifndef CONFIG_MIPS32_O32 | ||
21 | /* No O32, so define handle_sys here */ | ||
22 | #define handle_sysn32 handle_sys | ||
23 | #endif | ||
24 | |||
25 | .align 5 | ||
26 | NESTED(handle_sysn32, PT_SIZE, sp) | ||
27 | #ifndef CONFIG_MIPS32_O32 | ||
28 | .set noat | ||
29 | SAVE_SOME | ||
30 | TRACE_IRQS_ON_RELOAD | ||
31 | STI | ||
32 | .set at | ||
33 | #endif | ||
34 | |||
35 | dsubu t0, v0, __NR_N32_Linux # check syscall number | ||
36 | sltiu t0, t0, __NR_N32_Linux_syscalls | ||
37 | |||
38 | #ifndef CONFIG_MIPS32_O32 | ||
39 | ld t1, PT_EPC(sp) # skip syscall on return | ||
40 | daddiu t1, 4 # skip to next instruction | ||
41 | sd t1, PT_EPC(sp) | ||
42 | #endif | ||
43 | beqz t0, not_n32_scall | ||
44 | |||
45 | sd a3, PT_R26(sp) # save a3 for syscall restarting | ||
46 | |||
47 | li t1, _TIF_WORK_SYSCALL_ENTRY | ||
48 | LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? | ||
49 | and t0, t1, t0 | ||
50 | bnez t0, n32_syscall_trace_entry | ||
51 | |||
52 | syscall_common: | ||
53 | dsll t0, v0, 3 # offset into table | ||
54 | ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0) | ||
55 | |||
56 | jalr t2 # Do The Real Thing (TM) | ||
57 | |||
58 | li t0, -EMAXERRNO - 1 # error? | ||
59 | sltu t0, t0, v0 | ||
60 | sd t0, PT_R7(sp) # set error flag | ||
61 | beqz t0, 1f | ||
62 | |||
63 | ld t1, PT_R2(sp) # syscall number | ||
64 | dnegu v0 # error | ||
65 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
66 | 1: sd v0, PT_R2(sp) # result | ||
67 | |||
68 | j syscall_exit_partial | ||
69 | |||
70 | /* ------------------------------------------------------------------------ */ | ||
71 | |||
72 | n32_syscall_trace_entry: | ||
73 | SAVE_STATIC | ||
74 | move a0, sp | ||
75 | move a1, v0 | ||
76 | jal syscall_trace_enter | ||
77 | |||
78 | bltz v0, 1f # seccomp failed? Skip syscall | ||
79 | |||
80 | RESTORE_STATIC | ||
81 | ld v0, PT_R2(sp) # Restore syscall (maybe modified) | ||
82 | ld a0, PT_R4(sp) # Restore argument registers | ||
83 | ld a1, PT_R5(sp) | ||
84 | ld a2, PT_R6(sp) | ||
85 | ld a3, PT_R7(sp) | ||
86 | ld a4, PT_R8(sp) | ||
87 | ld a5, PT_R9(sp) | ||
88 | |||
89 | dsubu t2, v0, __NR_N32_Linux # check (new) syscall number | ||
90 | sltiu t0, t2, __NR_N32_Linux_syscalls | ||
91 | beqz t0, not_n32_scall | ||
92 | |||
93 | j syscall_common | ||
94 | |||
95 | 1: j syscall_exit | ||
96 | |||
97 | not_n32_scall: | ||
98 | /* This is not an n32 compatibility syscall, pass it on to | ||
99 | the n64 syscall handlers. */ | ||
100 | j handle_sys64 | ||
101 | |||
102 | END(handle_sysn32) | ||
103 | |||
104 | #define __SYSCALL(nr, entry) PTR entry | ||
105 | .type sysn32_call_table, @object | ||
106 | EXPORT(sysn32_call_table) | ||
107 | #include <asm/syscall_table_64_n32.h> | ||
108 | #undef __SYSCALL | ||
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S new file mode 100644 index 000000000..23b2e2b16 --- /dev/null +++ b/arch/mips/kernel/scall64-n64.S | |||
@@ -0,0 +1,117 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | #include <linux/errno.h> | ||
11 | #include <asm/asm.h> | ||
12 | #include <asm/asmmacro.h> | ||
13 | #include <asm/irqflags.h> | ||
14 | #include <asm/mipsregs.h> | ||
15 | #include <asm/regdef.h> | ||
16 | #include <asm/stackframe.h> | ||
17 | #include <asm/asm-offsets.h> | ||
18 | #include <asm/sysmips.h> | ||
19 | #include <asm/thread_info.h> | ||
20 | #include <asm/unistd.h> | ||
21 | #include <asm/war.h> | ||
22 | |||
23 | #ifndef CONFIG_BINFMT_ELF32 | ||
24 | /* Neither O32 nor N32, so define handle_sys here */ | ||
25 | #define handle_sys64 handle_sys | ||
26 | #endif | ||
27 | |||
28 | .align 5 | ||
29 | NESTED(handle_sys64, PT_SIZE, sp) | ||
30 | #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) | ||
31 | /* | ||
32 | * When 32-bit compatibility is configured scall_o32.S | ||
33 | * already did this. | ||
34 | */ | ||
35 | .set noat | ||
36 | SAVE_SOME | ||
37 | TRACE_IRQS_ON_RELOAD | ||
38 | STI | ||
39 | .set at | ||
40 | #endif | ||
41 | |||
42 | #if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32) | ||
43 | ld t1, PT_EPC(sp) # skip syscall on return | ||
44 | daddiu t1, 4 # skip to next instruction | ||
45 | sd t1, PT_EPC(sp) | ||
46 | #endif | ||
47 | |||
48 | sd a3, PT_R26(sp) # save a3 for syscall restarting | ||
49 | |||
50 | li t1, _TIF_WORK_SYSCALL_ENTRY | ||
51 | LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? | ||
52 | and t0, t1, t0 | ||
53 | bnez t0, syscall_trace_entry | ||
54 | |||
55 | syscall_common: | ||
56 | dsubu t2, v0, __NR_64_Linux | ||
57 | sltiu t0, t2, __NR_64_Linux_syscalls | ||
58 | beqz t0, illegal_syscall | ||
59 | |||
60 | dsll t0, t2, 3 # offset into table | ||
61 | dla t2, sys_call_table | ||
62 | daddu t0, t2, t0 | ||
63 | ld t2, (t0) # syscall routine | ||
64 | beqz t2, illegal_syscall | ||
65 | |||
66 | jalr t2 # Do The Real Thing (TM) | ||
67 | |||
68 | li t0, -EMAXERRNO - 1 # error? | ||
69 | sltu t0, t0, v0 | ||
70 | sd t0, PT_R7(sp) # set error flag | ||
71 | beqz t0, 1f | ||
72 | |||
73 | ld t1, PT_R2(sp) # syscall number | ||
74 | dnegu v0 # error | ||
75 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
76 | 1: sd v0, PT_R2(sp) # result | ||
77 | |||
78 | n64_syscall_exit: | ||
79 | j syscall_exit_partial | ||
80 | |||
81 | /* ------------------------------------------------------------------------ */ | ||
82 | |||
83 | syscall_trace_entry: | ||
84 | SAVE_STATIC | ||
85 | move a0, sp | ||
86 | move a1, v0 | ||
87 | jal syscall_trace_enter | ||
88 | |||
89 | bltz v0, 1f # seccomp failed? Skip syscall | ||
90 | |||
91 | RESTORE_STATIC | ||
92 | ld v0, PT_R2(sp) # Restore syscall (maybe modified) | ||
93 | ld a0, PT_R4(sp) # Restore argument registers | ||
94 | ld a1, PT_R5(sp) | ||
95 | ld a2, PT_R6(sp) | ||
96 | ld a3, PT_R7(sp) | ||
97 | ld a4, PT_R8(sp) | ||
98 | ld a5, PT_R9(sp) | ||
99 | j syscall_common | ||
100 | |||
101 | 1: j syscall_exit | ||
102 | |||
103 | illegal_syscall: | ||
104 | /* This also isn't a 64-bit syscall, throw an error. */ | ||
105 | li v0, ENOSYS # error | ||
106 | sd v0, PT_R2(sp) | ||
107 | li t0, 1 # set error flag | ||
108 | sd t0, PT_R7(sp) | ||
109 | j n64_syscall_exit | ||
110 | END(handle_sys64) | ||
111 | |||
112 | #define __SYSCALL(nr, entry) PTR entry | ||
113 | .align 3 | ||
114 | .type sys_call_table, @object | ||
115 | EXPORT(sys_call_table) | ||
116 | #include <asm/syscall_table_64_n64.h> | ||
117 | #undef __SYSCALL | ||
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S new file mode 100644 index 000000000..50c9a57e0 --- /dev/null +++ b/arch/mips/kernel/scall64-o32.S | |||
@@ -0,0 +1,221 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
9 | * Copyright (C) 2004 Thiemo Seufer | ||
10 | * | ||
11 | * Hairy, the userspace application uses a different argument passing | ||
12 | * convention than the kernel, so we have to translate things from o32 | ||
13 | * to ABI64 calling convention. 64-bit syscalls are also processed | ||
14 | * here for now. | ||
15 | */ | ||
16 | #include <linux/errno.h> | ||
17 | #include <asm/asm.h> | ||
18 | #include <asm/asmmacro.h> | ||
19 | #include <asm/irqflags.h> | ||
20 | #include <asm/mipsregs.h> | ||
21 | #include <asm/regdef.h> | ||
22 | #include <asm/stackframe.h> | ||
23 | #include <asm/thread_info.h> | ||
24 | #include <asm/unistd.h> | ||
25 | #include <asm/sysmips.h> | ||
26 | |||
27 | .align 5 | ||
28 | NESTED(handle_sys, PT_SIZE, sp) | ||
29 | .set noat | ||
30 | SAVE_SOME | ||
31 | TRACE_IRQS_ON_RELOAD | ||
32 | STI | ||
33 | .set at | ||
34 | ld t1, PT_EPC(sp) # skip syscall on return | ||
35 | |||
36 | dsubu t0, v0, __NR_O32_Linux # check syscall number | ||
37 | sltiu t0, t0, __NR_O32_Linux_syscalls | ||
38 | daddiu t1, 4 # skip to next instruction | ||
39 | sd t1, PT_EPC(sp) | ||
40 | beqz t0, not_o32_scall | ||
41 | #if 0 | ||
42 | SAVE_ALL | ||
43 | move a1, v0 | ||
44 | ASM_PRINT("Scall %ld\n") | ||
45 | RESTORE_ALL | ||
46 | #endif | ||
47 | |||
48 | /* We don't want to stumble over broken sign extensions from | ||
49 | userland. O32 does never use the upper half. */ | ||
50 | sll a0, a0, 0 | ||
51 | sll a1, a1, 0 | ||
52 | sll a2, a2, 0 | ||
53 | sll a3, a3, 0 | ||
54 | |||
55 | sd a3, PT_R26(sp) # save a3 for syscall restarting | ||
56 | |||
57 | /* | ||
58 | * More than four arguments. Try to deal with it by copying the | ||
59 | * stack arguments from the user stack to the kernel stack. | ||
60 | * This Sucks (TM). | ||
61 | * | ||
62 | * We intentionally keep the kernel stack a little below the top of | ||
63 | * userspace so we don't have to do a slower byte accurate check here. | ||
64 | */ | ||
65 | ld t0, PT_R29(sp) # get old user stack pointer | ||
66 | daddu t1, t0, 32 | ||
67 | bltz t1, bad_stack | ||
68 | |||
69 | load_a4: lw a4, 16(t0) # argument #5 from usp | ||
70 | load_a5: lw a5, 20(t0) # argument #6 from usp | ||
71 | load_a6: lw a6, 24(t0) # argument #7 from usp | ||
72 | load_a7: lw a7, 28(t0) # argument #8 from usp | ||
73 | loads_done: | ||
74 | |||
75 | .section __ex_table,"a" | ||
76 | PTR load_a4, bad_stack_a4 | ||
77 | PTR load_a5, bad_stack_a5 | ||
78 | PTR load_a6, bad_stack_a6 | ||
79 | PTR load_a7, bad_stack_a7 | ||
80 | .previous | ||
81 | |||
82 | li t1, _TIF_WORK_SYSCALL_ENTRY | ||
83 | LONG_L t0, TI_FLAGS($28) # syscall tracing enabled? | ||
84 | and t0, t1, t0 | ||
85 | bnez t0, trace_a_syscall | ||
86 | |||
87 | syscall_common: | ||
88 | dsll t0, v0, 3 # offset into table | ||
89 | ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0) | ||
90 | |||
91 | jalr t2 # Do The Real Thing (TM) | ||
92 | |||
93 | li t0, -EMAXERRNO - 1 # error? | ||
94 | sltu t0, t0, v0 | ||
95 | sd t0, PT_R7(sp) # set error flag | ||
96 | beqz t0, 1f | ||
97 | |||
98 | ld t1, PT_R2(sp) # syscall number | ||
99 | dnegu v0 # error | ||
100 | sd t1, PT_R0(sp) # save it for syscall restarting | ||
101 | 1: sd v0, PT_R2(sp) # result | ||
102 | |||
103 | o32_syscall_exit: | ||
104 | j syscall_exit_partial | ||
105 | |||
106 | /* ------------------------------------------------------------------------ */ | ||
107 | |||
108 | trace_a_syscall: | ||
109 | SAVE_STATIC | ||
110 | sd a4, PT_R8(sp) # Save argument registers | ||
111 | sd a5, PT_R9(sp) | ||
112 | sd a6, PT_R10(sp) | ||
113 | sd a7, PT_R11(sp) # For indirect syscalls | ||
114 | |||
115 | move a0, sp | ||
116 | /* | ||
117 | * absolute syscall number is in v0 unless we called syscall(__NR_###) | ||
118 | * where the real syscall number is in a0 | ||
119 | * note: NR_syscall is the first O32 syscall but the macro is | ||
120 | * only defined when compiling with -mabi=32 (CONFIG_32BIT) | ||
121 | * therefore __NR_O32_Linux is used (4000) | ||
122 | */ | ||
123 | .set push | ||
124 | .set reorder | ||
125 | subu t1, v0, __NR_O32_Linux | ||
126 | move a1, v0 | ||
127 | bnez t1, 1f /* __NR_syscall at offset 0 */ | ||
128 | ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */ | ||
129 | .set pop | ||
130 | |||
131 | 1: jal syscall_trace_enter | ||
132 | |||
133 | bltz v0, 1f # seccomp failed? Skip syscall | ||
134 | |||
135 | RESTORE_STATIC | ||
136 | ld v0, PT_R2(sp) # Restore syscall (maybe modified) | ||
137 | ld a0, PT_R4(sp) # Restore argument registers | ||
138 | ld a1, PT_R5(sp) | ||
139 | ld a2, PT_R6(sp) | ||
140 | ld a3, PT_R7(sp) | ||
141 | ld a4, PT_R8(sp) | ||
142 | ld a5, PT_R9(sp) | ||
143 | ld a6, PT_R10(sp) | ||
144 | ld a7, PT_R11(sp) # For indirect syscalls | ||
145 | |||
146 | dsubu t0, v0, __NR_O32_Linux # check (new) syscall number | ||
147 | sltiu t0, t0, __NR_O32_Linux_syscalls | ||
148 | beqz t0, not_o32_scall | ||
149 | |||
150 | j syscall_common | ||
151 | |||
152 | 1: j syscall_exit | ||
153 | |||
154 | /* ------------------------------------------------------------------------ */ | ||
155 | |||
156 | /* | ||
157 | * The stackpointer for a call with more than 4 arguments is bad. | ||
158 | */ | ||
159 | bad_stack: | ||
160 | li v0, EFAULT | ||
161 | sd v0, PT_R2(sp) | ||
162 | li t0, 1 # set error flag | ||
163 | sd t0, PT_R7(sp) | ||
164 | j o32_syscall_exit | ||
165 | |||
166 | bad_stack_a4: | ||
167 | li a4, 0 | ||
168 | b load_a5 | ||
169 | |||
170 | bad_stack_a5: | ||
171 | li a5, 0 | ||
172 | b load_a6 | ||
173 | |||
174 | bad_stack_a6: | ||
175 | li a6, 0 | ||
176 | b load_a7 | ||
177 | |||
178 | bad_stack_a7: | ||
179 | li a7, 0 | ||
180 | b loads_done | ||
181 | |||
182 | not_o32_scall: | ||
183 | /* | ||
184 | * This is not an o32 compatibility syscall, pass it on | ||
185 | * to the 64-bit syscall handlers. | ||
186 | */ | ||
187 | #ifdef CONFIG_MIPS32_N32 | ||
188 | j handle_sysn32 | ||
189 | #else | ||
190 | j handle_sys64 | ||
191 | #endif | ||
192 | END(handle_sys) | ||
193 | |||
194 | LEAF(sys32_syscall) | ||
195 | subu t0, a0, __NR_O32_Linux # check syscall number | ||
196 | sltiu v0, t0, __NR_O32_Linux_syscalls | ||
197 | beqz t0, einval # do not recurse | ||
198 | dsll t1, t0, 3 | ||
199 | beqz v0, einval | ||
200 | ld t2, sys32_call_table(t1) # syscall routine | ||
201 | |||
202 | move a0, a1 # shift argument registers | ||
203 | move a1, a2 | ||
204 | move a2, a3 | ||
205 | move a3, a4 | ||
206 | move a4, a5 | ||
207 | move a5, a6 | ||
208 | move a6, a7 | ||
209 | jr t2 | ||
210 | /* Unreached */ | ||
211 | |||
212 | einval: li v0, -ENOSYS | ||
213 | jr ra | ||
214 | END(sys32_syscall) | ||
215 | |||
216 | #define __SYSCALL(nr, entry) PTR entry | ||
217 | .align 3 | ||
218 | .type sys32_call_table,@object | ||
219 | EXPORT(sys32_call_table) | ||
220 | #include <asm/syscall_table_64_o32.h> | ||
221 | #undef __SYSCALL | ||
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c new file mode 100644 index 000000000..0a9bd7b09 --- /dev/null +++ b/arch/mips/kernel/segment.c | |||
@@ -0,0 +1,104 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
7 | */ | ||
8 | |||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/debugfs.h> | ||
11 | #include <linux/seq_file.h> | ||
12 | #include <asm/cpu.h> | ||
13 | #include <asm/debug.h> | ||
14 | #include <asm/mipsregs.h> | ||
15 | |||
16 | static void build_segment_config(char *str, unsigned int cfg) | ||
17 | { | ||
18 | unsigned int am; | ||
19 | static const char * const am_str[] = { | ||
20 | "UK", "MK", "MSK", "MUSK", "MUSUK", "USK", | ||
21 | "RSRVD", "UUSK"}; | ||
22 | |||
23 | /* Segment access mode. */ | ||
24 | am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT; | ||
25 | str += sprintf(str, "%-5s", am_str[am]); | ||
26 | |||
27 | /* | ||
28 | * Access modes MK, MSK and MUSK are mapped segments. Therefore | ||
29 | * there is no direct physical address mapping unless it becomes | ||
30 | * unmapped uncached at error level due to EU. | ||
31 | */ | ||
32 | if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU)) | ||
33 | str += sprintf(str, " %03lx", | ||
34 | ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT)); | ||
35 | else | ||
36 | str += sprintf(str, " UND"); | ||
37 | |||
38 | if ((am == 0) || (am > 3)) | ||
39 | str += sprintf(str, " %01ld", | ||
40 | ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT)); | ||
41 | else | ||
42 | str += sprintf(str, " U"); | ||
43 | |||
44 | /* Exception configuration. */ | ||
45 | str += sprintf(str, " %01ld\n", | ||
46 | ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT)); | ||
47 | } | ||
48 | |||
49 | static int show_segments(struct seq_file *m, void *v) | ||
50 | { | ||
51 | unsigned int segcfg; | ||
52 | char str[42]; | ||
53 | |||
54 | seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n"); | ||
55 | seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n"); | ||
56 | |||
57 | segcfg = read_c0_segctl0(); | ||
58 | build_segment_config(str, segcfg); | ||
59 | seq_printf(m, " 0 e0000000 512M %s", str); | ||
60 | |||
61 | segcfg >>= 16; | ||
62 | build_segment_config(str, segcfg); | ||
63 | seq_printf(m, " 1 c0000000 512M %s", str); | ||
64 | |||
65 | segcfg = read_c0_segctl1(); | ||
66 | build_segment_config(str, segcfg); | ||
67 | seq_printf(m, " 2 a0000000 512M %s", str); | ||
68 | |||
69 | segcfg >>= 16; | ||
70 | build_segment_config(str, segcfg); | ||
71 | seq_printf(m, " 3 80000000 512M %s", str); | ||
72 | |||
73 | segcfg = read_c0_segctl2(); | ||
74 | build_segment_config(str, segcfg); | ||
75 | seq_printf(m, " 4 40000000 1G %s", str); | ||
76 | |||
77 | segcfg >>= 16; | ||
78 | build_segment_config(str, segcfg); | ||
79 | seq_printf(m, " 5 00000000 1G %s\n", str); | ||
80 | |||
81 | return 0; | ||
82 | } | ||
83 | |||
84 | static int segments_open(struct inode *inode, struct file *file) | ||
85 | { | ||
86 | return single_open(file, show_segments, NULL); | ||
87 | } | ||
88 | |||
89 | static const struct file_operations segments_fops = { | ||
90 | .open = segments_open, | ||
91 | .read = seq_read, | ||
92 | .llseek = seq_lseek, | ||
93 | .release = single_release, | ||
94 | }; | ||
95 | |||
96 | static int __init segments_info(void) | ||
97 | { | ||
98 | if (cpu_has_segments) | ||
99 | debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL, | ||
100 | &segments_fops); | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | device_initcall(segments_info); | ||
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c new file mode 100644 index 000000000..9d11f68a9 --- /dev/null +++ b/arch/mips/kernel/setup.c | |||
@@ -0,0 +1,830 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1995 Linus Torvalds | ||
7 | * Copyright (C) 1995 Waldorf Electronics | ||
8 | * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle | ||
9 | * Copyright (C) 1996 Stoned Elipot | ||
10 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
11 | * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki | ||
12 | */ | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/ioport.h> | ||
15 | #include <linux/export.h> | ||
16 | #include <linux/screen_info.h> | ||
17 | #include <linux/memblock.h> | ||
18 | #include <linux/initrd.h> | ||
19 | #include <linux/root_dev.h> | ||
20 | #include <linux/highmem.h> | ||
21 | #include <linux/console.h> | ||
22 | #include <linux/pfn.h> | ||
23 | #include <linux/debugfs.h> | ||
24 | #include <linux/kexec.h> | ||
25 | #include <linux/sizes.h> | ||
26 | #include <linux/device.h> | ||
27 | #include <linux/dma-map-ops.h> | ||
28 | #include <linux/decompress/generic.h> | ||
29 | #include <linux/of_fdt.h> | ||
30 | #include <linux/of_reserved_mem.h> | ||
31 | #include <linux/dmi.h> | ||
32 | |||
33 | #include <asm/addrspace.h> | ||
34 | #include <asm/bootinfo.h> | ||
35 | #include <asm/bugs.h> | ||
36 | #include <asm/cache.h> | ||
37 | #include <asm/cdmm.h> | ||
38 | #include <asm/cpu.h> | ||
39 | #include <asm/debug.h> | ||
40 | #include <asm/dma-coherence.h> | ||
41 | #include <asm/sections.h> | ||
42 | #include <asm/setup.h> | ||
43 | #include <asm/smp-ops.h> | ||
44 | #include <asm/prom.h> | ||
45 | |||
46 | #ifdef CONFIG_MIPS_ELF_APPENDED_DTB | ||
47 | const char __section(".appended_dtb") __appended_dtb[0x100000]; | ||
48 | #endif /* CONFIG_MIPS_ELF_APPENDED_DTB */ | ||
49 | |||
50 | struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly; | ||
51 | |||
52 | EXPORT_SYMBOL(cpu_data); | ||
53 | |||
54 | #ifdef CONFIG_VT | ||
55 | struct screen_info screen_info; | ||
56 | #endif | ||
57 | |||
58 | /* | ||
59 | * Setup information | ||
60 | * | ||
61 | * These are initialized so they are in the .data section | ||
62 | */ | ||
63 | unsigned long mips_machtype __read_mostly = MACH_UNKNOWN; | ||
64 | |||
65 | EXPORT_SYMBOL(mips_machtype); | ||
66 | |||
67 | static char __initdata command_line[COMMAND_LINE_SIZE]; | ||
68 | char __initdata arcs_cmdline[COMMAND_LINE_SIZE]; | ||
69 | |||
70 | #ifdef CONFIG_CMDLINE_BOOL | ||
71 | static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE; | ||
72 | #else | ||
73 | static const char builtin_cmdline[] __initconst = ""; | ||
74 | #endif | ||
75 | |||
76 | /* | ||
77 | * mips_io_port_base is the begin of the address space to which x86 style | ||
78 | * I/O ports are mapped. | ||
79 | */ | ||
80 | unsigned long mips_io_port_base = -1; | ||
81 | EXPORT_SYMBOL(mips_io_port_base); | ||
82 | |||
83 | static struct resource code_resource = { .name = "Kernel code", }; | ||
84 | static struct resource data_resource = { .name = "Kernel data", }; | ||
85 | static struct resource bss_resource = { .name = "Kernel bss", }; | ||
86 | |||
87 | static void *detect_magic __initdata = detect_memory_region; | ||
88 | |||
89 | #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET | ||
90 | unsigned long ARCH_PFN_OFFSET; | ||
91 | EXPORT_SYMBOL(ARCH_PFN_OFFSET); | ||
92 | #endif | ||
93 | |||
94 | void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max) | ||
95 | { | ||
96 | void *dm = &detect_magic; | ||
97 | phys_addr_t size; | ||
98 | |||
99 | for (size = sz_min; size < sz_max; size <<= 1) { | ||
100 | if (!memcmp(dm, dm + size, sizeof(detect_magic))) | ||
101 | break; | ||
102 | } | ||
103 | |||
104 | pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n", | ||
105 | ((unsigned long long) size) / SZ_1M, | ||
106 | (unsigned long long) start, | ||
107 | ((unsigned long long) sz_min) / SZ_1M, | ||
108 | ((unsigned long long) sz_max) / SZ_1M); | ||
109 | |||
110 | memblock_add(start, size); | ||
111 | } | ||
112 | |||
113 | /* | ||
114 | * Manage initrd | ||
115 | */ | ||
116 | #ifdef CONFIG_BLK_DEV_INITRD | ||
117 | |||
118 | static int __init rd_start_early(char *p) | ||
119 | { | ||
120 | unsigned long start = memparse(p, &p); | ||
121 | |||
122 | #ifdef CONFIG_64BIT | ||
123 | /* Guess if the sign extension was forgotten by bootloader */ | ||
124 | if (start < XKPHYS) | ||
125 | start = (int)start; | ||
126 | #endif | ||
127 | initrd_start = start; | ||
128 | initrd_end += start; | ||
129 | return 0; | ||
130 | } | ||
131 | early_param("rd_start", rd_start_early); | ||
132 | |||
133 | static int __init rd_size_early(char *p) | ||
134 | { | ||
135 | initrd_end += memparse(p, &p); | ||
136 | return 0; | ||
137 | } | ||
138 | early_param("rd_size", rd_size_early); | ||
139 | |||
140 | /* it returns the next free pfn after initrd */ | ||
141 | static unsigned long __init init_initrd(void) | ||
142 | { | ||
143 | unsigned long end; | ||
144 | |||
145 | /* | ||
146 | * Board specific code or command line parser should have | ||
147 | * already set up initrd_start and initrd_end. In these cases | ||
148 | * perfom sanity checks and use them if all looks good. | ||
149 | */ | ||
150 | if (!initrd_start || initrd_end <= initrd_start) | ||
151 | goto disable; | ||
152 | |||
153 | if (initrd_start & ~PAGE_MASK) { | ||
154 | pr_err("initrd start must be page aligned\n"); | ||
155 | goto disable; | ||
156 | } | ||
157 | if (initrd_start < PAGE_OFFSET) { | ||
158 | pr_err("initrd start < PAGE_OFFSET\n"); | ||
159 | goto disable; | ||
160 | } | ||
161 | |||
162 | /* | ||
163 | * Sanitize initrd addresses. For example firmware | ||
164 | * can't guess if they need to pass them through | ||
165 | * 64-bits values if the kernel has been built in pure | ||
166 | * 32-bit. We need also to switch from KSEG0 to XKPHYS | ||
167 | * addresses now, so the code can now safely use __pa(). | ||
168 | */ | ||
169 | end = __pa(initrd_end); | ||
170 | initrd_end = (unsigned long)__va(end); | ||
171 | initrd_start = (unsigned long)__va(__pa(initrd_start)); | ||
172 | |||
173 | ROOT_DEV = Root_RAM0; | ||
174 | return PFN_UP(end); | ||
175 | disable: | ||
176 | initrd_start = 0; | ||
177 | initrd_end = 0; | ||
178 | return 0; | ||
179 | } | ||
180 | |||
181 | /* In some conditions (e.g. big endian bootloader with a little endian | ||
182 | kernel), the initrd might appear byte swapped. Try to detect this and | ||
183 | byte swap it if needed. */ | ||
184 | static void __init maybe_bswap_initrd(void) | ||
185 | { | ||
186 | #if defined(CONFIG_CPU_CAVIUM_OCTEON) | ||
187 | u64 buf; | ||
188 | |||
189 | /* Check for CPIO signature */ | ||
190 | if (!memcmp((void *)initrd_start, "070701", 6)) | ||
191 | return; | ||
192 | |||
193 | /* Check for compressed initrd */ | ||
194 | if (decompress_method((unsigned char *)initrd_start, 8, NULL)) | ||
195 | return; | ||
196 | |||
197 | /* Try again with a byte swapped header */ | ||
198 | buf = swab64p((u64 *)initrd_start); | ||
199 | if (!memcmp(&buf, "070701", 6) || | ||
200 | decompress_method((unsigned char *)(&buf), 8, NULL)) { | ||
201 | unsigned long i; | ||
202 | |||
203 | pr_info("Byteswapped initrd detected\n"); | ||
204 | for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8) | ||
205 | swab64s((u64 *)i); | ||
206 | } | ||
207 | #endif | ||
208 | } | ||
209 | |||
210 | static void __init finalize_initrd(void) | ||
211 | { | ||
212 | unsigned long size = initrd_end - initrd_start; | ||
213 | |||
214 | if (size == 0) { | ||
215 | printk(KERN_INFO "Initrd not found or empty"); | ||
216 | goto disable; | ||
217 | } | ||
218 | if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) { | ||
219 | printk(KERN_ERR "Initrd extends beyond end of memory"); | ||
220 | goto disable; | ||
221 | } | ||
222 | |||
223 | maybe_bswap_initrd(); | ||
224 | |||
225 | memblock_reserve(__pa(initrd_start), size); | ||
226 | initrd_below_start_ok = 1; | ||
227 | |||
228 | pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n", | ||
229 | initrd_start, size); | ||
230 | return; | ||
231 | disable: | ||
232 | printk(KERN_CONT " - disabling initrd\n"); | ||
233 | initrd_start = 0; | ||
234 | initrd_end = 0; | ||
235 | } | ||
236 | |||
237 | #else /* !CONFIG_BLK_DEV_INITRD */ | ||
238 | |||
239 | static unsigned long __init init_initrd(void) | ||
240 | { | ||
241 | return 0; | ||
242 | } | ||
243 | |||
244 | #define finalize_initrd() do {} while (0) | ||
245 | |||
246 | #endif | ||
247 | |||
248 | /* | ||
249 | * Initialize the bootmem allocator. It also setup initrd related data | ||
250 | * if needed. | ||
251 | */ | ||
252 | #if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA)) | ||
253 | |||
254 | static void __init bootmem_init(void) | ||
255 | { | ||
256 | init_initrd(); | ||
257 | finalize_initrd(); | ||
258 | } | ||
259 | |||
260 | #else /* !CONFIG_SGI_IP27 */ | ||
261 | |||
262 | static void __init bootmem_init(void) | ||
263 | { | ||
264 | phys_addr_t ramstart, ramend; | ||
265 | unsigned long start, end; | ||
266 | int i; | ||
267 | |||
268 | ramstart = memblock_start_of_DRAM(); | ||
269 | ramend = memblock_end_of_DRAM(); | ||
270 | |||
271 | /* | ||
272 | * Sanity check any INITRD first. We don't take it into account | ||
273 | * for bootmem setup initially, rely on the end-of-kernel-code | ||
274 | * as our memory range starting point. Once bootmem is inited we | ||
275 | * will reserve the area used for the initrd. | ||
276 | */ | ||
277 | init_initrd(); | ||
278 | |||
279 | /* Reserve memory occupied by kernel. */ | ||
280 | memblock_reserve(__pa_symbol(&_text), | ||
281 | __pa_symbol(&_end) - __pa_symbol(&_text)); | ||
282 | |||
283 | /* max_low_pfn is not a number of pages but the end pfn of low mem */ | ||
284 | |||
285 | #ifdef CONFIG_MIPS_AUTO_PFN_OFFSET | ||
286 | ARCH_PFN_OFFSET = PFN_UP(ramstart); | ||
287 | #else | ||
288 | /* | ||
289 | * Reserve any memory between the start of RAM and PHYS_OFFSET | ||
290 | */ | ||
291 | if (ramstart > PHYS_OFFSET) | ||
292 | memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET); | ||
293 | |||
294 | if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) { | ||
295 | pr_info("Wasting %lu bytes for tracking %lu unused pages\n", | ||
296 | (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)), | ||
297 | (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET)); | ||
298 | } | ||
299 | #endif | ||
300 | |||
301 | min_low_pfn = ARCH_PFN_OFFSET; | ||
302 | max_pfn = PFN_DOWN(ramend); | ||
303 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { | ||
304 | /* | ||
305 | * Skip highmem here so we get an accurate max_low_pfn if low | ||
306 | * memory stops short of high memory. | ||
307 | * If the region overlaps HIGHMEM_START, end is clipped so | ||
308 | * max_pfn excludes the highmem portion. | ||
309 | */ | ||
310 | if (start >= PFN_DOWN(HIGHMEM_START)) | ||
311 | continue; | ||
312 | if (end > PFN_DOWN(HIGHMEM_START)) | ||
313 | end = PFN_DOWN(HIGHMEM_START); | ||
314 | if (end > max_low_pfn) | ||
315 | max_low_pfn = end; | ||
316 | } | ||
317 | |||
318 | if (min_low_pfn >= max_low_pfn) | ||
319 | panic("Incorrect memory mapping !!!"); | ||
320 | |||
321 | if (max_pfn > PFN_DOWN(HIGHMEM_START)) { | ||
322 | #ifdef CONFIG_HIGHMEM | ||
323 | highstart_pfn = PFN_DOWN(HIGHMEM_START); | ||
324 | highend_pfn = max_pfn; | ||
325 | #else | ||
326 | max_low_pfn = PFN_DOWN(HIGHMEM_START); | ||
327 | max_pfn = max_low_pfn; | ||
328 | #endif | ||
329 | } | ||
330 | |||
331 | /* | ||
332 | * Reserve initrd memory if needed. | ||
333 | */ | ||
334 | finalize_initrd(); | ||
335 | } | ||
336 | |||
337 | #endif /* CONFIG_SGI_IP27 */ | ||
338 | |||
339 | static int usermem __initdata; | ||
340 | |||
341 | static int __init early_parse_mem(char *p) | ||
342 | { | ||
343 | phys_addr_t start, size; | ||
344 | |||
345 | /* | ||
346 | * If a user specifies memory size, we | ||
347 | * blow away any automatically generated | ||
348 | * size. | ||
349 | */ | ||
350 | if (usermem == 0) { | ||
351 | usermem = 1; | ||
352 | memblock_remove(memblock_start_of_DRAM(), | ||
353 | memblock_end_of_DRAM() - memblock_start_of_DRAM()); | ||
354 | } | ||
355 | start = 0; | ||
356 | size = memparse(p, &p); | ||
357 | if (*p == '@') | ||
358 | start = memparse(p + 1, &p); | ||
359 | |||
360 | memblock_add(start, size); | ||
361 | |||
362 | return 0; | ||
363 | } | ||
364 | early_param("mem", early_parse_mem); | ||
365 | |||
366 | static int __init early_parse_memmap(char *p) | ||
367 | { | ||
368 | char *oldp; | ||
369 | u64 start_at, mem_size; | ||
370 | |||
371 | if (!p) | ||
372 | return -EINVAL; | ||
373 | |||
374 | if (!strncmp(p, "exactmap", 8)) { | ||
375 | pr_err("\"memmap=exactmap\" invalid on MIPS\n"); | ||
376 | return 0; | ||
377 | } | ||
378 | |||
379 | oldp = p; | ||
380 | mem_size = memparse(p, &p); | ||
381 | if (p == oldp) | ||
382 | return -EINVAL; | ||
383 | |||
384 | if (*p == '@') { | ||
385 | start_at = memparse(p+1, &p); | ||
386 | memblock_add(start_at, mem_size); | ||
387 | } else if (*p == '#') { | ||
388 | pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n"); | ||
389 | return -EINVAL; | ||
390 | } else if (*p == '$') { | ||
391 | start_at = memparse(p+1, &p); | ||
392 | memblock_add(start_at, mem_size); | ||
393 | memblock_reserve(start_at, mem_size); | ||
394 | } else { | ||
395 | pr_err("\"memmap\" invalid format!\n"); | ||
396 | return -EINVAL; | ||
397 | } | ||
398 | |||
399 | if (*p == '\0') { | ||
400 | usermem = 1; | ||
401 | return 0; | ||
402 | } else | ||
403 | return -EINVAL; | ||
404 | } | ||
405 | early_param("memmap", early_parse_memmap); | ||
406 | |||
407 | #ifdef CONFIG_PROC_VMCORE | ||
408 | static unsigned long setup_elfcorehdr, setup_elfcorehdr_size; | ||
409 | static int __init early_parse_elfcorehdr(char *p) | ||
410 | { | ||
411 | phys_addr_t start, end; | ||
412 | u64 i; | ||
413 | |||
414 | setup_elfcorehdr = memparse(p, &p); | ||
415 | |||
416 | for_each_mem_range(i, &start, &end) { | ||
417 | if (setup_elfcorehdr >= start && setup_elfcorehdr < end) { | ||
418 | /* | ||
419 | * Reserve from the elf core header to the end of | ||
420 | * the memory segment, that should all be kdump | ||
421 | * reserved memory. | ||
422 | */ | ||
423 | setup_elfcorehdr_size = end - setup_elfcorehdr; | ||
424 | break; | ||
425 | } | ||
426 | } | ||
427 | /* | ||
428 | * If we don't find it in the memory map, then we shouldn't | ||
429 | * have to worry about it, as the new kernel won't use it. | ||
430 | */ | ||
431 | return 0; | ||
432 | } | ||
433 | early_param("elfcorehdr", early_parse_elfcorehdr); | ||
434 | #endif | ||
435 | |||
436 | #ifdef CONFIG_KEXEC | ||
437 | |||
438 | /* 64M alignment for crash kernel regions */ | ||
439 | #define CRASH_ALIGN SZ_64M | ||
440 | #define CRASH_ADDR_MAX SZ_512M | ||
441 | |||
442 | static void __init mips_parse_crashkernel(void) | ||
443 | { | ||
444 | unsigned long long total_mem; | ||
445 | unsigned long long crash_size, crash_base; | ||
446 | int ret; | ||
447 | |||
448 | total_mem = memblock_phys_mem_size(); | ||
449 | ret = parse_crashkernel(boot_command_line, total_mem, | ||
450 | &crash_size, &crash_base); | ||
451 | if (ret != 0 || crash_size <= 0) | ||
452 | return; | ||
453 | |||
454 | if (crash_base <= 0) { | ||
455 | crash_base = memblock_find_in_range(CRASH_ALIGN, CRASH_ADDR_MAX, | ||
456 | crash_size, CRASH_ALIGN); | ||
457 | if (!crash_base) { | ||
458 | pr_warn("crashkernel reservation failed - No suitable area found.\n"); | ||
459 | return; | ||
460 | } | ||
461 | } else { | ||
462 | unsigned long long start; | ||
463 | |||
464 | start = memblock_find_in_range(crash_base, crash_base + crash_size, | ||
465 | crash_size, 1); | ||
466 | if (start != crash_base) { | ||
467 | pr_warn("Invalid memory region reserved for crash kernel\n"); | ||
468 | return; | ||
469 | } | ||
470 | } | ||
471 | |||
472 | crashk_res.start = crash_base; | ||
473 | crashk_res.end = crash_base + crash_size - 1; | ||
474 | } | ||
475 | |||
476 | static void __init request_crashkernel(struct resource *res) | ||
477 | { | ||
478 | int ret; | ||
479 | |||
480 | if (crashk_res.start == crashk_res.end) | ||
481 | return; | ||
482 | |||
483 | ret = request_resource(res, &crashk_res); | ||
484 | if (!ret) | ||
485 | pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n", | ||
486 | (unsigned long)(resource_size(&crashk_res) >> 20), | ||
487 | (unsigned long)(crashk_res.start >> 20)); | ||
488 | } | ||
489 | #else /* !defined(CONFIG_KEXEC) */ | ||
490 | static void __init mips_parse_crashkernel(void) | ||
491 | { | ||
492 | } | ||
493 | |||
494 | static void __init request_crashkernel(struct resource *res) | ||
495 | { | ||
496 | } | ||
497 | #endif /* !defined(CONFIG_KEXEC) */ | ||
498 | |||
499 | static void __init check_kernel_sections_mem(void) | ||
500 | { | ||
501 | phys_addr_t start = __pa_symbol(&_text); | ||
502 | phys_addr_t size = __pa_symbol(&_end) - start; | ||
503 | |||
504 | if (!memblock_is_region_memory(start, size)) { | ||
505 | pr_info("Kernel sections are not in the memory maps\n"); | ||
506 | memblock_add(start, size); | ||
507 | } | ||
508 | } | ||
509 | |||
510 | static void __init bootcmdline_append(const char *s, size_t max) | ||
511 | { | ||
512 | if (!s[0] || !max) | ||
513 | return; | ||
514 | |||
515 | if (boot_command_line[0]) | ||
516 | strlcat(boot_command_line, " ", COMMAND_LINE_SIZE); | ||
517 | |||
518 | strlcat(boot_command_line, s, max); | ||
519 | } | ||
520 | |||
521 | #ifdef CONFIG_OF_EARLY_FLATTREE | ||
522 | |||
523 | static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname, | ||
524 | int depth, void *data) | ||
525 | { | ||
526 | bool *dt_bootargs = data; | ||
527 | const char *p; | ||
528 | int l; | ||
529 | |||
530 | if (depth != 1 || !data || | ||
531 | (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0)) | ||
532 | return 0; | ||
533 | |||
534 | p = of_get_flat_dt_prop(node, "bootargs", &l); | ||
535 | if (p != NULL && l > 0) { | ||
536 | bootcmdline_append(p, min(l, COMMAND_LINE_SIZE)); | ||
537 | *dt_bootargs = true; | ||
538 | } | ||
539 | |||
540 | return 1; | ||
541 | } | ||
542 | |||
543 | #endif /* CONFIG_OF_EARLY_FLATTREE */ | ||
544 | |||
545 | static void __init bootcmdline_init(void) | ||
546 | { | ||
547 | bool dt_bootargs = false; | ||
548 | |||
549 | /* | ||
550 | * If CMDLINE_OVERRIDE is enabled then initializing the command line is | ||
551 | * trivial - we simply use the built-in command line unconditionally & | ||
552 | * unmodified. | ||
553 | */ | ||
554 | if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) { | ||
555 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
556 | return; | ||
557 | } | ||
558 | |||
559 | /* | ||
560 | * If the user specified a built-in command line & | ||
561 | * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is | ||
562 | * prepended to arguments from the bootloader or DT so we'll copy them | ||
563 | * to the start of boot_command_line here. Otherwise, empty | ||
564 | * boot_command_line to undo anything early_init_dt_scan_chosen() did. | ||
565 | */ | ||
566 | if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) | ||
567 | strlcpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE); | ||
568 | else | ||
569 | boot_command_line[0] = 0; | ||
570 | |||
571 | #ifdef CONFIG_OF_EARLY_FLATTREE | ||
572 | /* | ||
573 | * If we're configured to take boot arguments from DT, look for those | ||
574 | * now. | ||
575 | */ | ||
576 | if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) || | ||
577 | IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)) | ||
578 | of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs); | ||
579 | #endif | ||
580 | |||
581 | /* | ||
582 | * If we didn't get any arguments from DT (regardless of whether that's | ||
583 | * because we weren't configured to look for them, or because we looked | ||
584 | * & found none) then we'll take arguments from the bootloader. | ||
585 | * plat_mem_setup() should have filled arcs_cmdline with arguments from | ||
586 | * the bootloader. | ||
587 | */ | ||
588 | if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs) | ||
589 | bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE); | ||
590 | |||
591 | /* | ||
592 | * If the user specified a built-in command line & we didn't already | ||
593 | * prepend it, we append it to boot_command_line here. | ||
594 | */ | ||
595 | if (IS_ENABLED(CONFIG_CMDLINE_BOOL) && | ||
596 | !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)) | ||
597 | bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE); | ||
598 | } | ||
599 | |||
600 | /* | ||
601 | * arch_mem_init - initialize memory management subsystem | ||
602 | * | ||
603 | * o plat_mem_setup() detects the memory configuration and will record detected | ||
604 | * memory areas using memblock_add. | ||
605 | * | ||
606 | * At this stage the memory configuration of the system is known to the | ||
607 | * kernel but generic memory management system is still entirely uninitialized. | ||
608 | * | ||
609 | * o bootmem_init() | ||
610 | * o sparse_init() | ||
611 | * o paging_init() | ||
612 | * o dma_contiguous_reserve() | ||
613 | * | ||
614 | * At this stage the bootmem allocator is ready to use. | ||
615 | * | ||
616 | * NOTE: historically plat_mem_setup did the entire platform initialization. | ||
617 | * This was rather impractical because it meant plat_mem_setup had to | ||
618 | * get away without any kind of memory allocator. To keep old code from | ||
619 | * breaking plat_setup was just renamed to plat_mem_setup and a second platform | ||
620 | * initialization hook for anything else was introduced. | ||
621 | */ | ||
622 | static void __init arch_mem_init(char **cmdline_p) | ||
623 | { | ||
624 | /* call board setup routine */ | ||
625 | plat_mem_setup(); | ||
626 | memblock_set_bottom_up(true); | ||
627 | |||
628 | bootcmdline_init(); | ||
629 | strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE); | ||
630 | *cmdline_p = command_line; | ||
631 | |||
632 | parse_early_param(); | ||
633 | |||
634 | if (usermem) | ||
635 | pr_info("User-defined physical RAM map overwrite\n"); | ||
636 | |||
637 | check_kernel_sections_mem(); | ||
638 | |||
639 | early_init_fdt_reserve_self(); | ||
640 | early_init_fdt_scan_reserved_mem(); | ||
641 | |||
642 | #ifndef CONFIG_NUMA | ||
643 | memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0); | ||
644 | #endif | ||
645 | bootmem_init(); | ||
646 | |||
647 | /* | ||
648 | * Prevent memblock from allocating high memory. | ||
649 | * This cannot be done before max_low_pfn is detected, so up | ||
650 | * to this point is possible to only reserve physical memory | ||
651 | * with memblock_reserve; memblock_alloc* can be used | ||
652 | * only after this point | ||
653 | */ | ||
654 | memblock_set_current_limit(PFN_PHYS(max_low_pfn)); | ||
655 | |||
656 | #ifdef CONFIG_PROC_VMCORE | ||
657 | if (setup_elfcorehdr && setup_elfcorehdr_size) { | ||
658 | printk(KERN_INFO "kdump reserved memory at %lx-%lx\n", | ||
659 | setup_elfcorehdr, setup_elfcorehdr_size); | ||
660 | memblock_reserve(setup_elfcorehdr, setup_elfcorehdr_size); | ||
661 | } | ||
662 | #endif | ||
663 | |||
664 | mips_parse_crashkernel(); | ||
665 | #ifdef CONFIG_KEXEC | ||
666 | if (crashk_res.start != crashk_res.end) | ||
667 | memblock_reserve(crashk_res.start, resource_size(&crashk_res)); | ||
668 | #endif | ||
669 | device_tree_init(); | ||
670 | |||
671 | /* | ||
672 | * In order to reduce the possibility of kernel panic when failed to | ||
673 | * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate | ||
674 | * low memory as small as possible before plat_swiotlb_setup(), so | ||
675 | * make sparse_init() using top-down allocation. | ||
676 | */ | ||
677 | memblock_set_bottom_up(false); | ||
678 | sparse_init(); | ||
679 | memblock_set_bottom_up(true); | ||
680 | |||
681 | plat_swiotlb_setup(); | ||
682 | |||
683 | dma_contiguous_reserve(PFN_PHYS(max_low_pfn)); | ||
684 | |||
685 | /* Reserve for hibernation. */ | ||
686 | memblock_reserve(__pa_symbol(&__nosave_begin), | ||
687 | __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin)); | ||
688 | |||
689 | fdt_init_reserved_mem(); | ||
690 | |||
691 | memblock_dump_all(); | ||
692 | |||
693 | early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn)); | ||
694 | } | ||
695 | |||
696 | static void __init resource_init(void) | ||
697 | { | ||
698 | phys_addr_t start, end; | ||
699 | u64 i; | ||
700 | |||
701 | if (UNCAC_BASE != IO_BASE) | ||
702 | return; | ||
703 | |||
704 | code_resource.start = __pa_symbol(&_text); | ||
705 | code_resource.end = __pa_symbol(&_etext) - 1; | ||
706 | data_resource.start = __pa_symbol(&_etext); | ||
707 | data_resource.end = __pa_symbol(&_edata) - 1; | ||
708 | bss_resource.start = __pa_symbol(&__bss_start); | ||
709 | bss_resource.end = __pa_symbol(&__bss_stop) - 1; | ||
710 | |||
711 | for_each_mem_range(i, &start, &end) { | ||
712 | struct resource *res; | ||
713 | |||
714 | res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES); | ||
715 | if (!res) | ||
716 | panic("%s: Failed to allocate %zu bytes\n", __func__, | ||
717 | sizeof(struct resource)); | ||
718 | |||
719 | res->start = start; | ||
720 | /* | ||
721 | * In memblock, end points to the first byte after the | ||
722 | * range while in resourses, end points to the last byte in | ||
723 | * the range. | ||
724 | */ | ||
725 | res->end = end - 1; | ||
726 | res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY; | ||
727 | res->name = "System RAM"; | ||
728 | |||
729 | request_resource(&iomem_resource, res); | ||
730 | |||
731 | /* | ||
732 | * We don't know which RAM region contains kernel data, | ||
733 | * so we try it repeatedly and let the resource manager | ||
734 | * test it. | ||
735 | */ | ||
736 | request_resource(res, &code_resource); | ||
737 | request_resource(res, &data_resource); | ||
738 | request_resource(res, &bss_resource); | ||
739 | request_crashkernel(res); | ||
740 | } | ||
741 | } | ||
742 | |||
743 | #ifdef CONFIG_SMP | ||
744 | static void __init prefill_possible_map(void) | ||
745 | { | ||
746 | int i, possible = num_possible_cpus(); | ||
747 | |||
748 | if (possible > nr_cpu_ids) | ||
749 | possible = nr_cpu_ids; | ||
750 | |||
751 | for (i = 0; i < possible; i++) | ||
752 | set_cpu_possible(i, true); | ||
753 | for (; i < NR_CPUS; i++) | ||
754 | set_cpu_possible(i, false); | ||
755 | |||
756 | nr_cpu_ids = possible; | ||
757 | } | ||
758 | #else | ||
759 | static inline void prefill_possible_map(void) {} | ||
760 | #endif | ||
761 | |||
762 | void __init setup_arch(char **cmdline_p) | ||
763 | { | ||
764 | cpu_probe(); | ||
765 | mips_cm_probe(); | ||
766 | prom_init(); | ||
767 | |||
768 | setup_early_fdc_console(); | ||
769 | #ifdef CONFIG_EARLY_PRINTK | ||
770 | setup_early_printk(); | ||
771 | #endif | ||
772 | cpu_report(); | ||
773 | check_bugs_early(); | ||
774 | |||
775 | #if defined(CONFIG_VT) | ||
776 | #if defined(CONFIG_VGA_CONSOLE) | ||
777 | conswitchp = &vga_con; | ||
778 | #endif | ||
779 | #endif | ||
780 | |||
781 | arch_mem_init(cmdline_p); | ||
782 | dmi_setup(); | ||
783 | |||
784 | resource_init(); | ||
785 | plat_smp_setup(); | ||
786 | prefill_possible_map(); | ||
787 | |||
788 | cpu_cache_init(); | ||
789 | paging_init(); | ||
790 | } | ||
791 | |||
792 | unsigned long kernelsp[NR_CPUS]; | ||
793 | unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3; | ||
794 | |||
795 | #ifdef CONFIG_USE_OF | ||
796 | unsigned long fw_passed_dtb; | ||
797 | #endif | ||
798 | |||
799 | #ifdef CONFIG_DEBUG_FS | ||
800 | struct dentry *mips_debugfs_dir; | ||
801 | static int __init debugfs_mips(void) | ||
802 | { | ||
803 | mips_debugfs_dir = debugfs_create_dir("mips", NULL); | ||
804 | return 0; | ||
805 | } | ||
806 | arch_initcall(debugfs_mips); | ||
807 | #endif | ||
808 | |||
809 | #ifdef CONFIG_DMA_MAYBE_COHERENT | ||
810 | /* User defined DMA coherency from command line. */ | ||
811 | enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT; | ||
812 | EXPORT_SYMBOL_GPL(coherentio); | ||
813 | int hw_coherentio; /* Actual hardware supported DMA coherency setting. */ | ||
814 | |||
815 | static int __init setcoherentio(char *str) | ||
816 | { | ||
817 | coherentio = IO_COHERENCE_ENABLED; | ||
818 | pr_info("Hardware DMA cache coherency (command line)\n"); | ||
819 | return 0; | ||
820 | } | ||
821 | early_param("coherentio", setcoherentio); | ||
822 | |||
823 | static int __init setnocoherentio(char *str) | ||
824 | { | ||
825 | coherentio = IO_COHERENCE_DISABLED; | ||
826 | pr_info("Software DMA cache coherency (command line)\n"); | ||
827 | return 0; | ||
828 | } | ||
829 | early_param("nocoherentio", setnocoherentio); | ||
830 | #endif | ||
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h new file mode 100644 index 000000000..f50d48435 --- /dev/null +++ b/arch/mips/kernel/signal-common.h | |||
@@ -0,0 +1,43 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
7 | * Copyright (C) 1994 - 2000 Ralf Baechle | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | */ | ||
10 | |||
11 | #ifndef __SIGNAL_COMMON_H | ||
12 | #define __SIGNAL_COMMON_H | ||
13 | |||
14 | /* #define DEBUG_SIG */ | ||
15 | |||
16 | #ifdef DEBUG_SIG | ||
17 | # define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args) | ||
18 | #else | ||
19 | # define DEBUGP(fmt, args...) | ||
20 | #endif | ||
21 | |||
22 | /* | ||
23 | * Determine which stack to use.. | ||
24 | */ | ||
25 | extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, | ||
26 | size_t frame_size); | ||
27 | /* Check and clear pending FPU exceptions in saved CSR */ | ||
28 | extern int fpcsr_pending(unsigned int __user *fpcsr); | ||
29 | |||
30 | /* Make sure we will not lose FPU ownership */ | ||
31 | #define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); }) | ||
32 | #define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); }) | ||
33 | |||
34 | /* Assembly functions to move context to/from the FPU */ | ||
35 | extern asmlinkage int | ||
36 | _save_fp_context(void __user *fpregs, void __user *csr); | ||
37 | extern asmlinkage int | ||
38 | _restore_fp_context(void __user *fpregs, void __user *csr); | ||
39 | |||
40 | extern asmlinkage int _save_msa_all_upper(void __user *buf); | ||
41 | extern asmlinkage int _restore_msa_all_upper(void __user *buf); | ||
42 | |||
43 | #endif /* __SIGNAL_COMMON_H */ | ||
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c new file mode 100644 index 000000000..f1e985109 --- /dev/null +++ b/arch/mips/kernel/signal.c | |||
@@ -0,0 +1,962 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
7 | * Copyright (C) 1994 - 2000 Ralf Baechle | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | * Copyright (C) 2014, Imagination Technologies Ltd. | ||
10 | */ | ||
11 | #include <linux/cache.h> | ||
12 | #include <linux/context_tracking.h> | ||
13 | #include <linux/irqflags.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/personality.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/kernel.h> | ||
19 | #include <linux/signal.h> | ||
20 | #include <linux/errno.h> | ||
21 | #include <linux/wait.h> | ||
22 | #include <linux/ptrace.h> | ||
23 | #include <linux/unistd.h> | ||
24 | #include <linux/uprobes.h> | ||
25 | #include <linux/compiler.h> | ||
26 | #include <linux/syscalls.h> | ||
27 | #include <linux/uaccess.h> | ||
28 | #include <linux/tracehook.h> | ||
29 | |||
30 | #include <asm/abi.h> | ||
31 | #include <asm/asm.h> | ||
32 | #include <linux/bitops.h> | ||
33 | #include <asm/cacheflush.h> | ||
34 | #include <asm/fpu.h> | ||
35 | #include <asm/sim.h> | ||
36 | #include <asm/ucontext.h> | ||
37 | #include <asm/cpu-features.h> | ||
38 | #include <asm/war.h> | ||
39 | #include <asm/dsp.h> | ||
40 | #include <asm/inst.h> | ||
41 | #include <asm/msa.h> | ||
42 | |||
43 | #include "signal-common.h" | ||
44 | |||
45 | static int (*save_fp_context)(void __user *sc); | ||
46 | static int (*restore_fp_context)(void __user *sc); | ||
47 | |||
48 | struct sigframe { | ||
49 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
50 | u32 sf_pad[2]; /* Was: signal trampoline */ | ||
51 | |||
52 | /* Matches struct ucontext from its uc_mcontext field onwards */ | ||
53 | struct sigcontext sf_sc; | ||
54 | sigset_t sf_mask; | ||
55 | unsigned long long sf_extcontext[]; | ||
56 | }; | ||
57 | |||
58 | struct rt_sigframe { | ||
59 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
60 | u32 rs_pad[2]; /* Was: signal trampoline */ | ||
61 | struct siginfo rs_info; | ||
62 | struct ucontext rs_uc; | ||
63 | }; | ||
64 | |||
65 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
66 | |||
67 | /* | ||
68 | * Thread saved context copy to/from a signal context presumed to be on the | ||
69 | * user stack, and therefore accessed with appropriate macros from uaccess.h. | ||
70 | */ | ||
71 | static int copy_fp_to_sigcontext(void __user *sc) | ||
72 | { | ||
73 | struct mips_abi *abi = current->thread.abi; | ||
74 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | ||
75 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | ||
76 | int i; | ||
77 | int err = 0; | ||
78 | int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; | ||
79 | |||
80 | for (i = 0; i < NUM_FPU_REGS; i += inc) { | ||
81 | err |= | ||
82 | __put_user(get_fpr64(¤t->thread.fpu.fpr[i], 0), | ||
83 | &fpregs[i]); | ||
84 | } | ||
85 | err |= __put_user(current->thread.fpu.fcr31, csr); | ||
86 | |||
87 | return err; | ||
88 | } | ||
89 | |||
90 | static int copy_fp_from_sigcontext(void __user *sc) | ||
91 | { | ||
92 | struct mips_abi *abi = current->thread.abi; | ||
93 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | ||
94 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | ||
95 | int i; | ||
96 | int err = 0; | ||
97 | int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1; | ||
98 | u64 fpr_val; | ||
99 | |||
100 | for (i = 0; i < NUM_FPU_REGS; i += inc) { | ||
101 | err |= __get_user(fpr_val, &fpregs[i]); | ||
102 | set_fpr64(¤t->thread.fpu.fpr[i], 0, fpr_val); | ||
103 | } | ||
104 | err |= __get_user(current->thread.fpu.fcr31, csr); | ||
105 | |||
106 | return err; | ||
107 | } | ||
108 | |||
109 | #else /* !CONFIG_MIPS_FP_SUPPORT */ | ||
110 | |||
111 | static int copy_fp_to_sigcontext(void __user *sc) | ||
112 | { | ||
113 | return 0; | ||
114 | } | ||
115 | |||
116 | static int copy_fp_from_sigcontext(void __user *sc) | ||
117 | { | ||
118 | return 0; | ||
119 | } | ||
120 | |||
121 | #endif /* !CONFIG_MIPS_FP_SUPPORT */ | ||
122 | |||
123 | /* | ||
124 | * Wrappers for the assembly _{save,restore}_fp_context functions. | ||
125 | */ | ||
126 | static int save_hw_fp_context(void __user *sc) | ||
127 | { | ||
128 | struct mips_abi *abi = current->thread.abi; | ||
129 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | ||
130 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | ||
131 | |||
132 | return _save_fp_context(fpregs, csr); | ||
133 | } | ||
134 | |||
135 | static int restore_hw_fp_context(void __user *sc) | ||
136 | { | ||
137 | struct mips_abi *abi = current->thread.abi; | ||
138 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | ||
139 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | ||
140 | |||
141 | return _restore_fp_context(fpregs, csr); | ||
142 | } | ||
143 | |||
144 | /* | ||
145 | * Extended context handling. | ||
146 | */ | ||
147 | |||
148 | static inline void __user *sc_to_extcontext(void __user *sc) | ||
149 | { | ||
150 | struct ucontext __user *uc; | ||
151 | |||
152 | /* | ||
153 | * We can just pretend the sigcontext is always embedded in a struct | ||
154 | * ucontext here, because the offset from sigcontext to extended | ||
155 | * context is the same in the struct sigframe case. | ||
156 | */ | ||
157 | uc = container_of(sc, struct ucontext, uc_mcontext); | ||
158 | return &uc->uc_extcontext; | ||
159 | } | ||
160 | |||
161 | #ifdef CONFIG_CPU_HAS_MSA | ||
162 | |||
163 | static int save_msa_extcontext(void __user *buf) | ||
164 | { | ||
165 | struct msa_extcontext __user *msa = buf; | ||
166 | uint64_t val; | ||
167 | int i, err; | ||
168 | |||
169 | if (!thread_msa_context_live()) | ||
170 | return 0; | ||
171 | |||
172 | /* | ||
173 | * Ensure that we can't lose the live MSA context between checking | ||
174 | * for it & writing it to memory. | ||
175 | */ | ||
176 | preempt_disable(); | ||
177 | |||
178 | if (is_msa_enabled()) { | ||
179 | /* | ||
180 | * There are no EVA versions of the vector register load/store | ||
181 | * instructions, so MSA context has to be saved to kernel memory | ||
182 | * and then copied to user memory. The save to kernel memory | ||
183 | * should already have been done when handling scalar FP | ||
184 | * context. | ||
185 | */ | ||
186 | BUG_ON(IS_ENABLED(CONFIG_EVA)); | ||
187 | |||
188 | err = __put_user(read_msa_csr(), &msa->csr); | ||
189 | err |= _save_msa_all_upper(&msa->wr); | ||
190 | |||
191 | preempt_enable(); | ||
192 | } else { | ||
193 | preempt_enable(); | ||
194 | |||
195 | err = __put_user(current->thread.fpu.msacsr, &msa->csr); | ||
196 | |||
197 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
198 | val = get_fpr64(¤t->thread.fpu.fpr[i], 1); | ||
199 | err |= __put_user(val, &msa->wr[i]); | ||
200 | } | ||
201 | } | ||
202 | |||
203 | err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic); | ||
204 | err |= __put_user(sizeof(*msa), &msa->ext.size); | ||
205 | |||
206 | return err ? -EFAULT : sizeof(*msa); | ||
207 | } | ||
208 | |||
209 | static int restore_msa_extcontext(void __user *buf, unsigned int size) | ||
210 | { | ||
211 | struct msa_extcontext __user *msa = buf; | ||
212 | unsigned long long val; | ||
213 | unsigned int csr; | ||
214 | int i, err; | ||
215 | |||
216 | if (size != sizeof(*msa)) | ||
217 | return -EINVAL; | ||
218 | |||
219 | err = get_user(csr, &msa->csr); | ||
220 | if (err) | ||
221 | return err; | ||
222 | |||
223 | preempt_disable(); | ||
224 | |||
225 | if (is_msa_enabled()) { | ||
226 | /* | ||
227 | * There are no EVA versions of the vector register load/store | ||
228 | * instructions, so MSA context has to be copied to kernel | ||
229 | * memory and later loaded to registers. The same is true of | ||
230 | * scalar FP context, so FPU & MSA should have already been | ||
231 | * disabled whilst handling scalar FP context. | ||
232 | */ | ||
233 | BUG_ON(IS_ENABLED(CONFIG_EVA)); | ||
234 | |||
235 | write_msa_csr(csr); | ||
236 | err |= _restore_msa_all_upper(&msa->wr); | ||
237 | preempt_enable(); | ||
238 | } else { | ||
239 | preempt_enable(); | ||
240 | |||
241 | current->thread.fpu.msacsr = csr; | ||
242 | |||
243 | for (i = 0; i < NUM_FPU_REGS; i++) { | ||
244 | err |= __get_user(val, &msa->wr[i]); | ||
245 | set_fpr64(¤t->thread.fpu.fpr[i], 1, val); | ||
246 | } | ||
247 | } | ||
248 | |||
249 | return err; | ||
250 | } | ||
251 | |||
252 | #else /* !CONFIG_CPU_HAS_MSA */ | ||
253 | |||
254 | static int save_msa_extcontext(void __user *buf) | ||
255 | { | ||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int restore_msa_extcontext(void __user *buf, unsigned int size) | ||
260 | { | ||
261 | return SIGSYS; | ||
262 | } | ||
263 | |||
264 | #endif /* !CONFIG_CPU_HAS_MSA */ | ||
265 | |||
266 | static int save_extcontext(void __user *buf) | ||
267 | { | ||
268 | int sz; | ||
269 | |||
270 | sz = save_msa_extcontext(buf); | ||
271 | if (sz < 0) | ||
272 | return sz; | ||
273 | buf += sz; | ||
274 | |||
275 | /* If no context was saved then trivially return */ | ||
276 | if (!sz) | ||
277 | return 0; | ||
278 | |||
279 | /* Write the end marker */ | ||
280 | if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf)) | ||
281 | return -EFAULT; | ||
282 | |||
283 | sz += sizeof(((struct extcontext *)NULL)->magic); | ||
284 | return sz; | ||
285 | } | ||
286 | |||
287 | static int restore_extcontext(void __user *buf) | ||
288 | { | ||
289 | struct extcontext ext; | ||
290 | int err; | ||
291 | |||
292 | while (1) { | ||
293 | err = __get_user(ext.magic, (unsigned int *)buf); | ||
294 | if (err) | ||
295 | return err; | ||
296 | |||
297 | if (ext.magic == END_EXTCONTEXT_MAGIC) | ||
298 | return 0; | ||
299 | |||
300 | err = __get_user(ext.size, (unsigned int *)(buf | ||
301 | + offsetof(struct extcontext, size))); | ||
302 | if (err) | ||
303 | return err; | ||
304 | |||
305 | switch (ext.magic) { | ||
306 | case MSA_EXTCONTEXT_MAGIC: | ||
307 | err = restore_msa_extcontext(buf, ext.size); | ||
308 | break; | ||
309 | |||
310 | default: | ||
311 | err = -EINVAL; | ||
312 | break; | ||
313 | } | ||
314 | |||
315 | if (err) | ||
316 | return err; | ||
317 | |||
318 | buf += ext.size; | ||
319 | } | ||
320 | } | ||
321 | |||
322 | /* | ||
323 | * Helper routines | ||
324 | */ | ||
325 | int protected_save_fp_context(void __user *sc) | ||
326 | { | ||
327 | struct mips_abi *abi = current->thread.abi; | ||
328 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | ||
329 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | ||
330 | uint32_t __user *used_math = sc + abi->off_sc_used_math; | ||
331 | unsigned int used, ext_sz; | ||
332 | int err; | ||
333 | |||
334 | used = used_math() ? USED_FP : 0; | ||
335 | if (!used) | ||
336 | goto fp_done; | ||
337 | |||
338 | if (!test_thread_flag(TIF_32BIT_FPREGS)) | ||
339 | used |= USED_FR1; | ||
340 | if (test_thread_flag(TIF_HYBRID_FPREGS)) | ||
341 | used |= USED_HYBRID_FPRS; | ||
342 | |||
343 | /* | ||
344 | * EVA does not have userland equivalents of ldc1 or sdc1, so | ||
345 | * save to the kernel FP context & copy that to userland below. | ||
346 | */ | ||
347 | if (IS_ENABLED(CONFIG_EVA)) | ||
348 | lose_fpu(1); | ||
349 | |||
350 | while (1) { | ||
351 | lock_fpu_owner(); | ||
352 | if (is_fpu_owner()) { | ||
353 | err = save_fp_context(sc); | ||
354 | unlock_fpu_owner(); | ||
355 | } else { | ||
356 | unlock_fpu_owner(); | ||
357 | err = copy_fp_to_sigcontext(sc); | ||
358 | } | ||
359 | if (likely(!err)) | ||
360 | break; | ||
361 | /* touch the sigcontext and try again */ | ||
362 | err = __put_user(0, &fpregs[0]) | | ||
363 | __put_user(0, &fpregs[31]) | | ||
364 | __put_user(0, csr); | ||
365 | if (err) | ||
366 | return err; /* really bad sigcontext */ | ||
367 | } | ||
368 | |||
369 | fp_done: | ||
370 | ext_sz = err = save_extcontext(sc_to_extcontext(sc)); | ||
371 | if (err < 0) | ||
372 | return err; | ||
373 | used |= ext_sz ? USED_EXTCONTEXT : 0; | ||
374 | |||
375 | return __put_user(used, used_math); | ||
376 | } | ||
377 | |||
378 | int protected_restore_fp_context(void __user *sc) | ||
379 | { | ||
380 | struct mips_abi *abi = current->thread.abi; | ||
381 | uint64_t __user *fpregs = sc + abi->off_sc_fpregs; | ||
382 | uint32_t __user *csr = sc + abi->off_sc_fpc_csr; | ||
383 | uint32_t __user *used_math = sc + abi->off_sc_used_math; | ||
384 | unsigned int used; | ||
385 | int err, sig = 0, tmp __maybe_unused; | ||
386 | |||
387 | err = __get_user(used, used_math); | ||
388 | conditional_used_math(used & USED_FP); | ||
389 | |||
390 | /* | ||
391 | * The signal handler may have used FPU; give it up if the program | ||
392 | * doesn't want it following sigreturn. | ||
393 | */ | ||
394 | if (err || !(used & USED_FP)) | ||
395 | lose_fpu(0); | ||
396 | if (err) | ||
397 | return err; | ||
398 | if (!(used & USED_FP)) | ||
399 | goto fp_done; | ||
400 | |||
401 | err = sig = fpcsr_pending(csr); | ||
402 | if (err < 0) | ||
403 | return err; | ||
404 | |||
405 | /* | ||
406 | * EVA does not have userland equivalents of ldc1 or sdc1, so we | ||
407 | * disable the FPU here such that the code below simply copies to | ||
408 | * the kernel FP context. | ||
409 | */ | ||
410 | if (IS_ENABLED(CONFIG_EVA)) | ||
411 | lose_fpu(0); | ||
412 | |||
413 | while (1) { | ||
414 | lock_fpu_owner(); | ||
415 | if (is_fpu_owner()) { | ||
416 | err = restore_fp_context(sc); | ||
417 | unlock_fpu_owner(); | ||
418 | } else { | ||
419 | unlock_fpu_owner(); | ||
420 | err = copy_fp_from_sigcontext(sc); | ||
421 | } | ||
422 | if (likely(!err)) | ||
423 | break; | ||
424 | /* touch the sigcontext and try again */ | ||
425 | err = __get_user(tmp, &fpregs[0]) | | ||
426 | __get_user(tmp, &fpregs[31]) | | ||
427 | __get_user(tmp, csr); | ||
428 | if (err) | ||
429 | break; /* really bad sigcontext */ | ||
430 | } | ||
431 | |||
432 | fp_done: | ||
433 | if (!err && (used & USED_EXTCONTEXT)) | ||
434 | err = restore_extcontext(sc_to_extcontext(sc)); | ||
435 | |||
436 | return err ?: sig; | ||
437 | } | ||
438 | |||
439 | int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | ||
440 | { | ||
441 | int err = 0; | ||
442 | int i; | ||
443 | |||
444 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | ||
445 | |||
446 | err |= __put_user(0, &sc->sc_regs[0]); | ||
447 | for (i = 1; i < 32; i++) | ||
448 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); | ||
449 | |||
450 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
451 | err |= __put_user(regs->acx, &sc->sc_acx); | ||
452 | #endif | ||
453 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
454 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
455 | if (cpu_has_dsp) { | ||
456 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
457 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
458 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
459 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
460 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
461 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
462 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
463 | } | ||
464 | |||
465 | |||
466 | /* | ||
467 | * Save FPU state to signal context. Signal handler | ||
468 | * will "inherit" current FPU state. | ||
469 | */ | ||
470 | err |= protected_save_fp_context(sc); | ||
471 | |||
472 | return err; | ||
473 | } | ||
474 | |||
475 | static size_t extcontext_max_size(void) | ||
476 | { | ||
477 | size_t sz = 0; | ||
478 | |||
479 | /* | ||
480 | * The assumption here is that between this point & the point at which | ||
481 | * the extended context is saved the size of the context should only | ||
482 | * ever be able to shrink (if the task is preempted), but never grow. | ||
483 | * That is, what this function returns is an upper bound on the size of | ||
484 | * the extended context for the current task at the current time. | ||
485 | */ | ||
486 | |||
487 | if (thread_msa_context_live()) | ||
488 | sz += sizeof(struct msa_extcontext); | ||
489 | |||
490 | /* If any context is saved then we'll append the end marker */ | ||
491 | if (sz) | ||
492 | sz += sizeof(((struct extcontext *)NULL)->magic); | ||
493 | |||
494 | return sz; | ||
495 | } | ||
496 | |||
497 | int fpcsr_pending(unsigned int __user *fpcsr) | ||
498 | { | ||
499 | int err, sig = 0; | ||
500 | unsigned int csr, enabled; | ||
501 | |||
502 | err = __get_user(csr, fpcsr); | ||
503 | enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5); | ||
504 | /* | ||
505 | * If the signal handler set some FPU exceptions, clear it and | ||
506 | * send SIGFPE. | ||
507 | */ | ||
508 | if (csr & enabled) { | ||
509 | csr &= ~enabled; | ||
510 | err |= __put_user(csr, fpcsr); | ||
511 | sig = SIGFPE; | ||
512 | } | ||
513 | return err ?: sig; | ||
514 | } | ||
515 | |||
516 | int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc) | ||
517 | { | ||
518 | unsigned long treg; | ||
519 | int err = 0; | ||
520 | int i; | ||
521 | |||
522 | /* Always make any pending restarted system calls return -EINTR */ | ||
523 | current->restart_block.fn = do_no_restart_syscall; | ||
524 | |||
525 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
526 | |||
527 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
528 | err |= __get_user(regs->acx, &sc->sc_acx); | ||
529 | #endif | ||
530 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
531 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
532 | if (cpu_has_dsp) { | ||
533 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
534 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
535 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
536 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
537 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
538 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
539 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
540 | } | ||
541 | |||
542 | for (i = 1; i < 32; i++) | ||
543 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); | ||
544 | |||
545 | return err ?: protected_restore_fp_context(sc); | ||
546 | } | ||
547 | |||
548 | #ifdef CONFIG_WAR_ICACHE_REFILLS | ||
549 | #define SIGMASK ~(cpu_icache_line_size()-1) | ||
550 | #else | ||
551 | #define SIGMASK ALMASK | ||
552 | #endif | ||
553 | |||
554 | void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs, | ||
555 | size_t frame_size) | ||
556 | { | ||
557 | unsigned long sp; | ||
558 | |||
559 | /* Leave space for potential extended context */ | ||
560 | frame_size += extcontext_max_size(); | ||
561 | |||
562 | /* Default to using normal stack */ | ||
563 | sp = regs->regs[29]; | ||
564 | |||
565 | /* | ||
566 | * FPU emulator may have it's own trampoline active just | ||
567 | * above the user stack, 16-bytes before the next lowest | ||
568 | * 16 byte boundary. Try to avoid trashing it. | ||
569 | */ | ||
570 | sp -= 32; | ||
571 | |||
572 | sp = sigsp(sp, ksig); | ||
573 | |||
574 | return (void __user *)((sp - frame_size) & SIGMASK); | ||
575 | } | ||
576 | |||
577 | /* | ||
578 | * Atomically swap in the new signal mask, and wait for a signal. | ||
579 | */ | ||
580 | |||
581 | #ifdef CONFIG_TRAD_SIGNALS | ||
582 | SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset) | ||
583 | { | ||
584 | return sys_rt_sigsuspend(uset, sizeof(sigset_t)); | ||
585 | } | ||
586 | #endif | ||
587 | |||
588 | #ifdef CONFIG_TRAD_SIGNALS | ||
589 | SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act, | ||
590 | struct sigaction __user *, oact) | ||
591 | { | ||
592 | struct k_sigaction new_ka, old_ka; | ||
593 | int ret; | ||
594 | int err = 0; | ||
595 | |||
596 | if (act) { | ||
597 | old_sigset_t mask; | ||
598 | |||
599 | if (!access_ok(act, sizeof(*act))) | ||
600 | return -EFAULT; | ||
601 | err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler); | ||
602 | err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
603 | err |= __get_user(mask, &act->sa_mask.sig[0]); | ||
604 | if (err) | ||
605 | return -EFAULT; | ||
606 | |||
607 | siginitset(&new_ka.sa.sa_mask, mask); | ||
608 | } | ||
609 | |||
610 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
611 | |||
612 | if (!ret && oact) { | ||
613 | if (!access_ok(oact, sizeof(*oact))) | ||
614 | return -EFAULT; | ||
615 | err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
616 | err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler); | ||
617 | err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); | ||
618 | err |= __put_user(0, &oact->sa_mask.sig[1]); | ||
619 | err |= __put_user(0, &oact->sa_mask.sig[2]); | ||
620 | err |= __put_user(0, &oact->sa_mask.sig[3]); | ||
621 | if (err) | ||
622 | return -EFAULT; | ||
623 | } | ||
624 | |||
625 | return ret; | ||
626 | } | ||
627 | #endif | ||
628 | |||
629 | #ifdef CONFIG_TRAD_SIGNALS | ||
630 | asmlinkage void sys_sigreturn(void) | ||
631 | { | ||
632 | struct sigframe __user *frame; | ||
633 | struct pt_regs *regs; | ||
634 | sigset_t blocked; | ||
635 | int sig; | ||
636 | |||
637 | regs = current_pt_regs(); | ||
638 | frame = (struct sigframe __user *)regs->regs[29]; | ||
639 | if (!access_ok(frame, sizeof(*frame))) | ||
640 | goto badframe; | ||
641 | if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked))) | ||
642 | goto badframe; | ||
643 | |||
644 | set_current_blocked(&blocked); | ||
645 | |||
646 | sig = restore_sigcontext(regs, &frame->sf_sc); | ||
647 | if (sig < 0) | ||
648 | goto badframe; | ||
649 | else if (sig) | ||
650 | force_sig(sig); | ||
651 | |||
652 | /* | ||
653 | * Don't let your children do this ... | ||
654 | */ | ||
655 | __asm__ __volatile__( | ||
656 | "move\t$29, %0\n\t" | ||
657 | "j\tsyscall_exit" | ||
658 | : /* no outputs */ | ||
659 | : "r" (regs)); | ||
660 | /* Unreached */ | ||
661 | |||
662 | badframe: | ||
663 | force_sig(SIGSEGV); | ||
664 | } | ||
665 | #endif /* CONFIG_TRAD_SIGNALS */ | ||
666 | |||
667 | asmlinkage void sys_rt_sigreturn(void) | ||
668 | { | ||
669 | struct rt_sigframe __user *frame; | ||
670 | struct pt_regs *regs; | ||
671 | sigset_t set; | ||
672 | int sig; | ||
673 | |||
674 | regs = current_pt_regs(); | ||
675 | frame = (struct rt_sigframe __user *)regs->regs[29]; | ||
676 | if (!access_ok(frame, sizeof(*frame))) | ||
677 | goto badframe; | ||
678 | if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set))) | ||
679 | goto badframe; | ||
680 | |||
681 | set_current_blocked(&set); | ||
682 | |||
683 | sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); | ||
684 | if (sig < 0) | ||
685 | goto badframe; | ||
686 | else if (sig) | ||
687 | force_sig(sig); | ||
688 | |||
689 | if (restore_altstack(&frame->rs_uc.uc_stack)) | ||
690 | goto badframe; | ||
691 | |||
692 | /* | ||
693 | * Don't let your children do this ... | ||
694 | */ | ||
695 | __asm__ __volatile__( | ||
696 | "move\t$29, %0\n\t" | ||
697 | "j\tsyscall_exit" | ||
698 | : /* no outputs */ | ||
699 | : "r" (regs)); | ||
700 | /* Unreached */ | ||
701 | |||
702 | badframe: | ||
703 | force_sig(SIGSEGV); | ||
704 | } | ||
705 | |||
706 | #ifdef CONFIG_TRAD_SIGNALS | ||
707 | static int setup_frame(void *sig_return, struct ksignal *ksig, | ||
708 | struct pt_regs *regs, sigset_t *set) | ||
709 | { | ||
710 | struct sigframe __user *frame; | ||
711 | int err = 0; | ||
712 | |||
713 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
714 | if (!access_ok(frame, sizeof (*frame))) | ||
715 | return -EFAULT; | ||
716 | |||
717 | err |= setup_sigcontext(regs, &frame->sf_sc); | ||
718 | err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set)); | ||
719 | if (err) | ||
720 | return -EFAULT; | ||
721 | |||
722 | /* | ||
723 | * Arguments to signal handler: | ||
724 | * | ||
725 | * a0 = signal number | ||
726 | * a1 = 0 (should be cause) | ||
727 | * a2 = pointer to struct sigcontext | ||
728 | * | ||
729 | * $25 and c0_epc point to the signal handler, $29 points to the | ||
730 | * struct sigframe. | ||
731 | */ | ||
732 | regs->regs[ 4] = ksig->sig; | ||
733 | regs->regs[ 5] = 0; | ||
734 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | ||
735 | regs->regs[29] = (unsigned long) frame; | ||
736 | regs->regs[31] = (unsigned long) sig_return; | ||
737 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
738 | |||
739 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
740 | current->comm, current->pid, | ||
741 | frame, regs->cp0_epc, regs->regs[31]); | ||
742 | return 0; | ||
743 | } | ||
744 | #endif | ||
745 | |||
746 | static int setup_rt_frame(void *sig_return, struct ksignal *ksig, | ||
747 | struct pt_regs *regs, sigset_t *set) | ||
748 | { | ||
749 | struct rt_sigframe __user *frame; | ||
750 | int err = 0; | ||
751 | |||
752 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
753 | if (!access_ok(frame, sizeof (*frame))) | ||
754 | return -EFAULT; | ||
755 | |||
756 | /* Create siginfo. */ | ||
757 | err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info); | ||
758 | |||
759 | /* Create the ucontext. */ | ||
760 | err |= __put_user(0, &frame->rs_uc.uc_flags); | ||
761 | err |= __put_user(NULL, &frame->rs_uc.uc_link); | ||
762 | err |= __save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); | ||
763 | err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); | ||
764 | err |= __copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)); | ||
765 | |||
766 | if (err) | ||
767 | return -EFAULT; | ||
768 | |||
769 | /* | ||
770 | * Arguments to signal handler: | ||
771 | * | ||
772 | * a0 = signal number | ||
773 | * a1 = 0 (should be cause) | ||
774 | * a2 = pointer to ucontext | ||
775 | * | ||
776 | * $25 and c0_epc point to the signal handler, $29 points to | ||
777 | * the struct rt_sigframe. | ||
778 | */ | ||
779 | regs->regs[ 4] = ksig->sig; | ||
780 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | ||
781 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | ||
782 | regs->regs[29] = (unsigned long) frame; | ||
783 | regs->regs[31] = (unsigned long) sig_return; | ||
784 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
785 | |||
786 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
787 | current->comm, current->pid, | ||
788 | frame, regs->cp0_epc, regs->regs[31]); | ||
789 | |||
790 | return 0; | ||
791 | } | ||
792 | |||
793 | struct mips_abi mips_abi = { | ||
794 | #ifdef CONFIG_TRAD_SIGNALS | ||
795 | .setup_frame = setup_frame, | ||
796 | #endif | ||
797 | .setup_rt_frame = setup_rt_frame, | ||
798 | .restart = __NR_restart_syscall, | ||
799 | |||
800 | .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), | ||
801 | .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), | ||
802 | .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), | ||
803 | |||
804 | .vdso = &vdso_image, | ||
805 | }; | ||
806 | |||
807 | static void handle_signal(struct ksignal *ksig, struct pt_regs *regs) | ||
808 | { | ||
809 | sigset_t *oldset = sigmask_to_save(); | ||
810 | int ret; | ||
811 | struct mips_abi *abi = current->thread.abi; | ||
812 | void *vdso = current->mm->context.vdso; | ||
813 | |||
814 | /* | ||
815 | * If we were emulating a delay slot instruction, exit that frame such | ||
816 | * that addresses in the sigframe are as expected for userland and we | ||
817 | * don't have a problem if we reuse the thread's frame for an | ||
818 | * instruction within the signal handler. | ||
819 | */ | ||
820 | dsemul_thread_rollback(regs); | ||
821 | |||
822 | if (regs->regs[0]) { | ||
823 | switch(regs->regs[2]) { | ||
824 | case ERESTART_RESTARTBLOCK: | ||
825 | case ERESTARTNOHAND: | ||
826 | regs->regs[2] = EINTR; | ||
827 | break; | ||
828 | case ERESTARTSYS: | ||
829 | if (!(ksig->ka.sa.sa_flags & SA_RESTART)) { | ||
830 | regs->regs[2] = EINTR; | ||
831 | break; | ||
832 | } | ||
833 | fallthrough; | ||
834 | case ERESTARTNOINTR: | ||
835 | regs->regs[7] = regs->regs[26]; | ||
836 | regs->regs[2] = regs->regs[0]; | ||
837 | regs->cp0_epc -= 4; | ||
838 | } | ||
839 | |||
840 | regs->regs[0] = 0; /* Don't deal with this again. */ | ||
841 | } | ||
842 | |||
843 | rseq_signal_deliver(ksig, regs); | ||
844 | |||
845 | if (sig_uses_siginfo(&ksig->ka, abi)) | ||
846 | ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn, | ||
847 | ksig, regs, oldset); | ||
848 | else | ||
849 | ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn, | ||
850 | ksig, regs, oldset); | ||
851 | |||
852 | signal_setup_done(ret, ksig, 0); | ||
853 | } | ||
854 | |||
855 | static void do_signal(struct pt_regs *regs) | ||
856 | { | ||
857 | struct ksignal ksig; | ||
858 | |||
859 | if (get_signal(&ksig)) { | ||
860 | /* Whee! Actually deliver the signal. */ | ||
861 | handle_signal(&ksig, regs); | ||
862 | return; | ||
863 | } | ||
864 | |||
865 | if (regs->regs[0]) { | ||
866 | switch (regs->regs[2]) { | ||
867 | case ERESTARTNOHAND: | ||
868 | case ERESTARTSYS: | ||
869 | case ERESTARTNOINTR: | ||
870 | regs->regs[2] = regs->regs[0]; | ||
871 | regs->regs[7] = regs->regs[26]; | ||
872 | regs->cp0_epc -= 4; | ||
873 | break; | ||
874 | |||
875 | case ERESTART_RESTARTBLOCK: | ||
876 | regs->regs[2] = current->thread.abi->restart; | ||
877 | regs->regs[7] = regs->regs[26]; | ||
878 | regs->cp0_epc -= 4; | ||
879 | break; | ||
880 | } | ||
881 | regs->regs[0] = 0; /* Don't deal with this again. */ | ||
882 | } | ||
883 | |||
884 | /* | ||
885 | * If there's no signal to deliver, we just put the saved sigmask | ||
886 | * back | ||
887 | */ | ||
888 | restore_saved_sigmask(); | ||
889 | } | ||
890 | |||
891 | /* | ||
892 | * notification of userspace execution resumption | ||
893 | * - triggered by the TIF_WORK_MASK flags | ||
894 | */ | ||
895 | asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused, | ||
896 | __u32 thread_info_flags) | ||
897 | { | ||
898 | local_irq_enable(); | ||
899 | |||
900 | user_exit(); | ||
901 | |||
902 | if (thread_info_flags & _TIF_UPROBE) | ||
903 | uprobe_notify_resume(regs); | ||
904 | |||
905 | /* deal with pending signal delivery */ | ||
906 | if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) | ||
907 | do_signal(regs); | ||
908 | |||
909 | if (thread_info_flags & _TIF_NOTIFY_RESUME) { | ||
910 | tracehook_notify_resume(regs); | ||
911 | rseq_handle_notify_resume(NULL, regs); | ||
912 | } | ||
913 | |||
914 | user_enter(); | ||
915 | } | ||
916 | |||
917 | #if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) | ||
918 | static int smp_save_fp_context(void __user *sc) | ||
919 | { | ||
920 | return raw_cpu_has_fpu | ||
921 | ? save_hw_fp_context(sc) | ||
922 | : copy_fp_to_sigcontext(sc); | ||
923 | } | ||
924 | |||
925 | static int smp_restore_fp_context(void __user *sc) | ||
926 | { | ||
927 | return raw_cpu_has_fpu | ||
928 | ? restore_hw_fp_context(sc) | ||
929 | : copy_fp_from_sigcontext(sc); | ||
930 | } | ||
931 | #endif | ||
932 | |||
933 | static int signal_setup(void) | ||
934 | { | ||
935 | /* | ||
936 | * The offset from sigcontext to extended context should be the same | ||
937 | * regardless of the type of signal, such that userland can always know | ||
938 | * where to look if it wishes to find the extended context structures. | ||
939 | */ | ||
940 | BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) - | ||
941 | offsetof(struct sigframe, sf_sc)) != | ||
942 | (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) - | ||
943 | offsetof(struct rt_sigframe, rs_uc.uc_mcontext))); | ||
944 | |||
945 | #if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT) | ||
946 | /* For now just do the cpu_has_fpu check when the functions are invoked */ | ||
947 | save_fp_context = smp_save_fp_context; | ||
948 | restore_fp_context = smp_restore_fp_context; | ||
949 | #else | ||
950 | if (cpu_has_fpu) { | ||
951 | save_fp_context = save_hw_fp_context; | ||
952 | restore_fp_context = restore_hw_fp_context; | ||
953 | } else { | ||
954 | save_fp_context = copy_fp_to_sigcontext; | ||
955 | restore_fp_context = copy_fp_from_sigcontext; | ||
956 | } | ||
957 | #endif /* CONFIG_SMP */ | ||
958 | |||
959 | return 0; | ||
960 | } | ||
961 | |||
962 | arch_initcall(signal_setup); | ||
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c new file mode 100644 index 000000000..59b896543 --- /dev/null +++ b/arch/mips/kernel/signal32.c | |||
@@ -0,0 +1,78 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
7 | * Copyright (C) 1994 - 2000, 2006 Ralf Baechle | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | * Copyright (C) 2016, Imagination Technologies Ltd. | ||
10 | */ | ||
11 | #include <linux/compat.h> | ||
12 | #include <linux/compiler.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/signal.h> | ||
16 | #include <linux/syscalls.h> | ||
17 | |||
18 | #include <asm/compat-signal.h> | ||
19 | #include <linux/uaccess.h> | ||
20 | #include <asm/unistd.h> | ||
21 | |||
22 | #include "signal-common.h" | ||
23 | |||
24 | /* 32-bit compatibility types */ | ||
25 | |||
26 | typedef unsigned int __sighandler32_t; | ||
27 | typedef void (*vfptr_t)(void); | ||
28 | |||
29 | /* | ||
30 | * Atomically swap in the new signal mask, and wait for a signal. | ||
31 | */ | ||
32 | |||
33 | asmlinkage int sys32_sigsuspend(compat_sigset_t __user *uset) | ||
34 | { | ||
35 | return compat_sys_rt_sigsuspend(uset, sizeof(compat_sigset_t)); | ||
36 | } | ||
37 | |||
38 | SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, act, | ||
39 | struct compat_sigaction __user *, oact) | ||
40 | { | ||
41 | struct k_sigaction new_ka, old_ka; | ||
42 | int ret; | ||
43 | int err = 0; | ||
44 | |||
45 | if (act) { | ||
46 | old_sigset_t mask; | ||
47 | s32 handler; | ||
48 | |||
49 | if (!access_ok(act, sizeof(*act))) | ||
50 | return -EFAULT; | ||
51 | err |= __get_user(handler, &act->sa_handler); | ||
52 | new_ka.sa.sa_handler = (void __user *)(s64)handler; | ||
53 | err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags); | ||
54 | err |= __get_user(mask, &act->sa_mask.sig[0]); | ||
55 | if (err) | ||
56 | return -EFAULT; | ||
57 | |||
58 | siginitset(&new_ka.sa.sa_mask, mask); | ||
59 | } | ||
60 | |||
61 | ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL); | ||
62 | |||
63 | if (!ret && oact) { | ||
64 | if (!access_ok(oact, sizeof(*oact))) | ||
65 | return -EFAULT; | ||
66 | err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags); | ||
67 | err |= __put_user((u32)(u64)old_ka.sa.sa_handler, | ||
68 | &oact->sa_handler); | ||
69 | err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig); | ||
70 | err |= __put_user(0, &oact->sa_mask.sig[1]); | ||
71 | err |= __put_user(0, &oact->sa_mask.sig[2]); | ||
72 | err |= __put_user(0, &oact->sa_mask.sig[3]); | ||
73 | if (err) | ||
74 | return -EFAULT; | ||
75 | } | ||
76 | |||
77 | return ret; | ||
78 | } | ||
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c new file mode 100644 index 000000000..7bd00fad6 --- /dev/null +++ b/arch/mips/kernel/signal_n32.c | |||
@@ -0,0 +1,149 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2003 Broadcom Corporation | ||
4 | */ | ||
5 | #include <linux/cache.h> | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/mm.h> | ||
8 | #include <linux/smp.h> | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/signal.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/wait.h> | ||
13 | #include <linux/ptrace.h> | ||
14 | #include <linux/unistd.h> | ||
15 | #include <linux/compat.h> | ||
16 | #include <linux/bitops.h> | ||
17 | |||
18 | #include <asm/abi.h> | ||
19 | #include <asm/asm.h> | ||
20 | #include <asm/cacheflush.h> | ||
21 | #include <asm/compat-signal.h> | ||
22 | #include <asm/sim.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | #include <asm/ucontext.h> | ||
25 | #include <asm/fpu.h> | ||
26 | #include <asm/cpu-features.h> | ||
27 | #include <asm/war.h> | ||
28 | |||
29 | #include "signal-common.h" | ||
30 | |||
31 | /* | ||
32 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | ||
33 | */ | ||
34 | #define __NR_N32_restart_syscall 6214 | ||
35 | |||
36 | extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *); | ||
37 | extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *); | ||
38 | |||
39 | struct ucontextn32 { | ||
40 | u32 uc_flags; | ||
41 | s32 uc_link; | ||
42 | compat_stack_t uc_stack; | ||
43 | struct sigcontext uc_mcontext; | ||
44 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
45 | }; | ||
46 | |||
47 | struct rt_sigframe_n32 { | ||
48 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
49 | u32 rs_pad[2]; /* Was: signal trampoline */ | ||
50 | struct compat_siginfo rs_info; | ||
51 | struct ucontextn32 rs_uc; | ||
52 | }; | ||
53 | |||
54 | asmlinkage void sysn32_rt_sigreturn(void) | ||
55 | { | ||
56 | struct rt_sigframe_n32 __user *frame; | ||
57 | struct pt_regs *regs; | ||
58 | sigset_t set; | ||
59 | int sig; | ||
60 | |||
61 | regs = current_pt_regs(); | ||
62 | frame = (struct rt_sigframe_n32 __user *)regs->regs[29]; | ||
63 | if (!access_ok(frame, sizeof(*frame))) | ||
64 | goto badframe; | ||
65 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) | ||
66 | goto badframe; | ||
67 | |||
68 | set_current_blocked(&set); | ||
69 | |||
70 | sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext); | ||
71 | if (sig < 0) | ||
72 | goto badframe; | ||
73 | else if (sig) | ||
74 | force_sig(sig); | ||
75 | |||
76 | if (compat_restore_altstack(&frame->rs_uc.uc_stack)) | ||
77 | goto badframe; | ||
78 | |||
79 | /* | ||
80 | * Don't let your children do this ... | ||
81 | */ | ||
82 | __asm__ __volatile__( | ||
83 | "move\t$29, %0\n\t" | ||
84 | "j\tsyscall_exit" | ||
85 | : /* no outputs */ | ||
86 | : "r" (regs)); | ||
87 | /* Unreached */ | ||
88 | |||
89 | badframe: | ||
90 | force_sig(SIGSEGV); | ||
91 | } | ||
92 | |||
93 | static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig, | ||
94 | struct pt_regs *regs, sigset_t *set) | ||
95 | { | ||
96 | struct rt_sigframe_n32 __user *frame; | ||
97 | int err = 0; | ||
98 | |||
99 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
100 | if (!access_ok(frame, sizeof (*frame))) | ||
101 | return -EFAULT; | ||
102 | |||
103 | /* Create siginfo. */ | ||
104 | err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); | ||
105 | |||
106 | /* Create the ucontext. */ | ||
107 | err |= __put_user(0, &frame->rs_uc.uc_flags); | ||
108 | err |= __put_user(0, &frame->rs_uc.uc_link); | ||
109 | err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); | ||
110 | err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext); | ||
111 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); | ||
112 | |||
113 | if (err) | ||
114 | return -EFAULT; | ||
115 | |||
116 | /* | ||
117 | * Arguments to signal handler: | ||
118 | * | ||
119 | * a0 = signal number | ||
120 | * a1 = 0 (should be cause) | ||
121 | * a2 = pointer to ucontext | ||
122 | * | ||
123 | * $25 and c0_epc point to the signal handler, $29 points to | ||
124 | * the struct rt_sigframe. | ||
125 | */ | ||
126 | regs->regs[ 4] = ksig->sig; | ||
127 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | ||
128 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | ||
129 | regs->regs[29] = (unsigned long) frame; | ||
130 | regs->regs[31] = (unsigned long) sig_return; | ||
131 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
132 | |||
133 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
134 | current->comm, current->pid, | ||
135 | frame, regs->cp0_epc, regs->regs[31]); | ||
136 | |||
137 | return 0; | ||
138 | } | ||
139 | |||
140 | struct mips_abi mips_abi_n32 = { | ||
141 | .setup_rt_frame = setup_rt_frame_n32, | ||
142 | .restart = __NR_N32_restart_syscall, | ||
143 | |||
144 | .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs), | ||
145 | .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr), | ||
146 | .off_sc_used_math = offsetof(struct sigcontext, sc_used_math), | ||
147 | |||
148 | .vdso = &vdso_image_n32, | ||
149 | }; | ||
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c new file mode 100644 index 000000000..299a7a28c --- /dev/null +++ b/arch/mips/kernel/signal_o32.c | |||
@@ -0,0 +1,290 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1991, 1992 Linus Torvalds | ||
7 | * Copyright (C) 1994 - 2000, 2006 Ralf Baechle | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | * Copyright (C) 2016, Imagination Technologies Ltd. | ||
10 | */ | ||
11 | #include <linux/compiler.h> | ||
12 | #include <linux/errno.h> | ||
13 | #include <linux/signal.h> | ||
14 | #include <linux/sched/signal.h> | ||
15 | #include <linux/uaccess.h> | ||
16 | |||
17 | #include <asm/abi.h> | ||
18 | #include <asm/compat-signal.h> | ||
19 | #include <asm/dsp.h> | ||
20 | #include <asm/sim.h> | ||
21 | #include <asm/unistd.h> | ||
22 | |||
23 | #include "signal-common.h" | ||
24 | |||
25 | /* | ||
26 | * Including <asm/unistd.h> would give use the 64-bit syscall numbers ... | ||
27 | */ | ||
28 | #define __NR_O32_restart_syscall 4253 | ||
29 | |||
30 | struct sigframe32 { | ||
31 | u32 sf_ass[4]; /* argument save space for o32 */ | ||
32 | u32 sf_pad[2]; /* Was: signal trampoline */ | ||
33 | struct sigcontext32 sf_sc; | ||
34 | compat_sigset_t sf_mask; | ||
35 | }; | ||
36 | |||
37 | struct ucontext32 { | ||
38 | u32 uc_flags; | ||
39 | s32 uc_link; | ||
40 | compat_stack_t uc_stack; | ||
41 | struct sigcontext32 uc_mcontext; | ||
42 | compat_sigset_t uc_sigmask; /* mask last for extensibility */ | ||
43 | }; | ||
44 | |||
45 | struct rt_sigframe32 { | ||
46 | u32 rs_ass[4]; /* argument save space for o32 */ | ||
47 | u32 rs_pad[2]; /* Was: signal trampoline */ | ||
48 | compat_siginfo_t rs_info; | ||
49 | struct ucontext32 rs_uc; | ||
50 | }; | ||
51 | |||
52 | static int setup_sigcontext32(struct pt_regs *regs, | ||
53 | struct sigcontext32 __user *sc) | ||
54 | { | ||
55 | int err = 0; | ||
56 | int i; | ||
57 | |||
58 | err |= __put_user(regs->cp0_epc, &sc->sc_pc); | ||
59 | |||
60 | err |= __put_user(0, &sc->sc_regs[0]); | ||
61 | for (i = 1; i < 32; i++) | ||
62 | err |= __put_user(regs->regs[i], &sc->sc_regs[i]); | ||
63 | |||
64 | err |= __put_user(regs->hi, &sc->sc_mdhi); | ||
65 | err |= __put_user(regs->lo, &sc->sc_mdlo); | ||
66 | if (cpu_has_dsp) { | ||
67 | err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp); | ||
68 | err |= __put_user(mfhi1(), &sc->sc_hi1); | ||
69 | err |= __put_user(mflo1(), &sc->sc_lo1); | ||
70 | err |= __put_user(mfhi2(), &sc->sc_hi2); | ||
71 | err |= __put_user(mflo2(), &sc->sc_lo2); | ||
72 | err |= __put_user(mfhi3(), &sc->sc_hi3); | ||
73 | err |= __put_user(mflo3(), &sc->sc_lo3); | ||
74 | } | ||
75 | |||
76 | /* | ||
77 | * Save FPU state to signal context. Signal handler | ||
78 | * will "inherit" current FPU state. | ||
79 | */ | ||
80 | err |= protected_save_fp_context(sc); | ||
81 | |||
82 | return err; | ||
83 | } | ||
84 | |||
85 | static int restore_sigcontext32(struct pt_regs *regs, | ||
86 | struct sigcontext32 __user *sc) | ||
87 | { | ||
88 | int err = 0; | ||
89 | s32 treg; | ||
90 | int i; | ||
91 | |||
92 | /* Always make any pending restarted system calls return -EINTR */ | ||
93 | current->restart_block.fn = do_no_restart_syscall; | ||
94 | |||
95 | err |= __get_user(regs->cp0_epc, &sc->sc_pc); | ||
96 | err |= __get_user(regs->hi, &sc->sc_mdhi); | ||
97 | err |= __get_user(regs->lo, &sc->sc_mdlo); | ||
98 | if (cpu_has_dsp) { | ||
99 | err |= __get_user(treg, &sc->sc_hi1); mthi1(treg); | ||
100 | err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg); | ||
101 | err |= __get_user(treg, &sc->sc_hi2); mthi2(treg); | ||
102 | err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg); | ||
103 | err |= __get_user(treg, &sc->sc_hi3); mthi3(treg); | ||
104 | err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg); | ||
105 | err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK); | ||
106 | } | ||
107 | |||
108 | for (i = 1; i < 32; i++) | ||
109 | err |= __get_user(regs->regs[i], &sc->sc_regs[i]); | ||
110 | |||
111 | return err ?: protected_restore_fp_context(sc); | ||
112 | } | ||
113 | |||
114 | static int setup_frame_32(void *sig_return, struct ksignal *ksig, | ||
115 | struct pt_regs *regs, sigset_t *set) | ||
116 | { | ||
117 | struct sigframe32 __user *frame; | ||
118 | int err = 0; | ||
119 | |||
120 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
121 | if (!access_ok(frame, sizeof (*frame))) | ||
122 | return -EFAULT; | ||
123 | |||
124 | err |= setup_sigcontext32(regs, &frame->sf_sc); | ||
125 | err |= __copy_conv_sigset_to_user(&frame->sf_mask, set); | ||
126 | |||
127 | if (err) | ||
128 | return -EFAULT; | ||
129 | |||
130 | /* | ||
131 | * Arguments to signal handler: | ||
132 | * | ||
133 | * a0 = signal number | ||
134 | * a1 = 0 (should be cause) | ||
135 | * a2 = pointer to struct sigcontext | ||
136 | * | ||
137 | * $25 and c0_epc point to the signal handler, $29 points to the | ||
138 | * struct sigframe. | ||
139 | */ | ||
140 | regs->regs[ 4] = ksig->sig; | ||
141 | regs->regs[ 5] = 0; | ||
142 | regs->regs[ 6] = (unsigned long) &frame->sf_sc; | ||
143 | regs->regs[29] = (unsigned long) frame; | ||
144 | regs->regs[31] = (unsigned long) sig_return; | ||
145 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
146 | |||
147 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
148 | current->comm, current->pid, | ||
149 | frame, regs->cp0_epc, regs->regs[31]); | ||
150 | |||
151 | return 0; | ||
152 | } | ||
153 | |||
154 | asmlinkage void sys32_rt_sigreturn(void) | ||
155 | { | ||
156 | struct rt_sigframe32 __user *frame; | ||
157 | struct pt_regs *regs; | ||
158 | sigset_t set; | ||
159 | int sig; | ||
160 | |||
161 | regs = current_pt_regs(); | ||
162 | frame = (struct rt_sigframe32 __user *)regs->regs[29]; | ||
163 | if (!access_ok(frame, sizeof(*frame))) | ||
164 | goto badframe; | ||
165 | if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask)) | ||
166 | goto badframe; | ||
167 | |||
168 | set_current_blocked(&set); | ||
169 | |||
170 | sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext); | ||
171 | if (sig < 0) | ||
172 | goto badframe; | ||
173 | else if (sig) | ||
174 | force_sig(sig); | ||
175 | |||
176 | if (compat_restore_altstack(&frame->rs_uc.uc_stack)) | ||
177 | goto badframe; | ||
178 | |||
179 | /* | ||
180 | * Don't let your children do this ... | ||
181 | */ | ||
182 | __asm__ __volatile__( | ||
183 | "move\t$29, %0\n\t" | ||
184 | "j\tsyscall_exit" | ||
185 | : /* no outputs */ | ||
186 | : "r" (regs)); | ||
187 | /* Unreached */ | ||
188 | |||
189 | badframe: | ||
190 | force_sig(SIGSEGV); | ||
191 | } | ||
192 | |||
193 | static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig, | ||
194 | struct pt_regs *regs, sigset_t *set) | ||
195 | { | ||
196 | struct rt_sigframe32 __user *frame; | ||
197 | int err = 0; | ||
198 | |||
199 | frame = get_sigframe(ksig, regs, sizeof(*frame)); | ||
200 | if (!access_ok(frame, sizeof (*frame))) | ||
201 | return -EFAULT; | ||
202 | |||
203 | /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */ | ||
204 | err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info); | ||
205 | |||
206 | /* Create the ucontext. */ | ||
207 | err |= __put_user(0, &frame->rs_uc.uc_flags); | ||
208 | err |= __put_user(0, &frame->rs_uc.uc_link); | ||
209 | err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]); | ||
210 | err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext); | ||
211 | err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set); | ||
212 | |||
213 | if (err) | ||
214 | return -EFAULT; | ||
215 | |||
216 | /* | ||
217 | * Arguments to signal handler: | ||
218 | * | ||
219 | * a0 = signal number | ||
220 | * a1 = 0 (should be cause) | ||
221 | * a2 = pointer to ucontext | ||
222 | * | ||
223 | * $25 and c0_epc point to the signal handler, $29 points to | ||
224 | * the struct rt_sigframe32. | ||
225 | */ | ||
226 | regs->regs[ 4] = ksig->sig; | ||
227 | regs->regs[ 5] = (unsigned long) &frame->rs_info; | ||
228 | regs->regs[ 6] = (unsigned long) &frame->rs_uc; | ||
229 | regs->regs[29] = (unsigned long) frame; | ||
230 | regs->regs[31] = (unsigned long) sig_return; | ||
231 | regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler; | ||
232 | |||
233 | DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n", | ||
234 | current->comm, current->pid, | ||
235 | frame, regs->cp0_epc, regs->regs[31]); | ||
236 | |||
237 | return 0; | ||
238 | } | ||
239 | |||
240 | /* | ||
241 | * o32 compatibility on 64-bit kernels, without DSP ASE | ||
242 | */ | ||
243 | struct mips_abi mips_abi_32 = { | ||
244 | .setup_frame = setup_frame_32, | ||
245 | .setup_rt_frame = setup_rt_frame_32, | ||
246 | .restart = __NR_O32_restart_syscall, | ||
247 | |||
248 | .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs), | ||
249 | .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr), | ||
250 | .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math), | ||
251 | |||
252 | .vdso = &vdso_image_o32, | ||
253 | }; | ||
254 | |||
255 | |||
256 | asmlinkage void sys32_sigreturn(void) | ||
257 | { | ||
258 | struct sigframe32 __user *frame; | ||
259 | struct pt_regs *regs; | ||
260 | sigset_t blocked; | ||
261 | int sig; | ||
262 | |||
263 | regs = current_pt_regs(); | ||
264 | frame = (struct sigframe32 __user *)regs->regs[29]; | ||
265 | if (!access_ok(frame, sizeof(*frame))) | ||
266 | goto badframe; | ||
267 | if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask)) | ||
268 | goto badframe; | ||
269 | |||
270 | set_current_blocked(&blocked); | ||
271 | |||
272 | sig = restore_sigcontext32(regs, &frame->sf_sc); | ||
273 | if (sig < 0) | ||
274 | goto badframe; | ||
275 | else if (sig) | ||
276 | force_sig(sig); | ||
277 | |||
278 | /* | ||
279 | * Don't let your children do this ... | ||
280 | */ | ||
281 | __asm__ __volatile__( | ||
282 | "move\t$29, %0\n\t" | ||
283 | "j\tsyscall_exit" | ||
284 | : /* no outputs */ | ||
285 | : "r" (regs)); | ||
286 | /* Unreached */ | ||
287 | |||
288 | badframe: | ||
289 | force_sig(SIGSEGV); | ||
290 | } | ||
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c new file mode 100644 index 000000000..1dbfb5aad --- /dev/null +++ b/arch/mips/kernel/smp-bmips.c | |||
@@ -0,0 +1,667 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com) | ||
7 | * | ||
8 | * SMP support for BMIPS | ||
9 | */ | ||
10 | |||
11 | #include <linux/init.h> | ||
12 | #include <linux/sched.h> | ||
13 | #include <linux/sched/hotplug.h> | ||
14 | #include <linux/sched/task_stack.h> | ||
15 | #include <linux/mm.h> | ||
16 | #include <linux/delay.h> | ||
17 | #include <linux/smp.h> | ||
18 | #include <linux/interrupt.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/cpu.h> | ||
21 | #include <linux/cpumask.h> | ||
22 | #include <linux/reboot.h> | ||
23 | #include <linux/io.h> | ||
24 | #include <linux/compiler.h> | ||
25 | #include <linux/linkage.h> | ||
26 | #include <linux/bug.h> | ||
27 | #include <linux/kernel.h> | ||
28 | #include <linux/kexec.h> | ||
29 | |||
30 | #include <asm/time.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/bootinfo.h> | ||
33 | #include <asm/cacheflush.h> | ||
34 | #include <asm/tlbflush.h> | ||
35 | #include <asm/mipsregs.h> | ||
36 | #include <asm/bmips.h> | ||
37 | #include <asm/traps.h> | ||
38 | #include <asm/barrier.h> | ||
39 | #include <asm/cpu-features.h> | ||
40 | |||
41 | static int __maybe_unused max_cpus = 1; | ||
42 | |||
43 | /* these may be configured by the platform code */ | ||
44 | int bmips_smp_enabled = 1; | ||
45 | int bmips_cpu_offset; | ||
46 | cpumask_t bmips_booted_mask; | ||
47 | unsigned long bmips_tp1_irqs = IE_IRQ1; | ||
48 | |||
49 | #define RESET_FROM_KSEG0 0x80080800 | ||
50 | #define RESET_FROM_KSEG1 0xa0080800 | ||
51 | |||
52 | static void bmips_set_reset_vec(int cpu, u32 val); | ||
53 | |||
54 | #ifdef CONFIG_SMP | ||
55 | |||
56 | /* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */ | ||
57 | unsigned long bmips_smp_boot_sp; | ||
58 | unsigned long bmips_smp_boot_gp; | ||
59 | |||
60 | static void bmips43xx_send_ipi_single(int cpu, unsigned int action); | ||
61 | static void bmips5000_send_ipi_single(int cpu, unsigned int action); | ||
62 | static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id); | ||
63 | static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id); | ||
64 | |||
65 | /* SW interrupts 0,1 are used for interprocessor signaling */ | ||
66 | #define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0) | ||
67 | #define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1) | ||
68 | |||
69 | #define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift)) | ||
70 | #define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8)) | ||
71 | #define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8)) | ||
72 | #define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0)) | ||
73 | |||
74 | static void __init bmips_smp_setup(void) | ||
75 | { | ||
76 | int i, cpu = 1, boot_cpu = 0; | ||
77 | int cpu_hw_intr; | ||
78 | |||
79 | switch (current_cpu_type()) { | ||
80 | case CPU_BMIPS4350: | ||
81 | case CPU_BMIPS4380: | ||
82 | /* arbitration priority */ | ||
83 | clear_c0_brcm_cmt_ctrl(0x30); | ||
84 | |||
85 | /* NBK and weak order flags */ | ||
86 | set_c0_brcm_config_0(0x30000); | ||
87 | |||
88 | /* Find out if we are running on TP0 or TP1 */ | ||
89 | boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31)); | ||
90 | |||
91 | /* | ||
92 | * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other | ||
93 | * thread | ||
94 | * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output | ||
95 | * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output | ||
96 | */ | ||
97 | if (boot_cpu == 0) | ||
98 | cpu_hw_intr = 0x02; | ||
99 | else | ||
100 | cpu_hw_intr = 0x1d; | ||
101 | |||
102 | change_c0_brcm_cmt_intr(0xf8018000, | ||
103 | (cpu_hw_intr << 27) | (0x03 << 15)); | ||
104 | |||
105 | /* single core, 2 threads (2 pipelines) */ | ||
106 | max_cpus = 2; | ||
107 | |||
108 | break; | ||
109 | case CPU_BMIPS5000: | ||
110 | /* enable raceless SW interrupts */ | ||
111 | set_c0_brcm_config(0x03 << 22); | ||
112 | |||
113 | /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */ | ||
114 | change_c0_brcm_mode(0x1f << 27, 0x02 << 27); | ||
115 | |||
116 | /* N cores, 2 threads per core */ | ||
117 | max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1; | ||
118 | |||
119 | /* clear any pending SW interrupts */ | ||
120 | for (i = 0; i < max_cpus; i++) { | ||
121 | write_c0_brcm_action(ACTION_CLR_IPI(i, 0)); | ||
122 | write_c0_brcm_action(ACTION_CLR_IPI(i, 1)); | ||
123 | } | ||
124 | |||
125 | break; | ||
126 | default: | ||
127 | max_cpus = 1; | ||
128 | } | ||
129 | |||
130 | if (!bmips_smp_enabled) | ||
131 | max_cpus = 1; | ||
132 | |||
133 | /* this can be overridden by the BSP */ | ||
134 | if (!board_ebase_setup) | ||
135 | board_ebase_setup = &bmips_ebase_setup; | ||
136 | |||
137 | __cpu_number_map[boot_cpu] = 0; | ||
138 | __cpu_logical_map[0] = boot_cpu; | ||
139 | |||
140 | for (i = 0; i < max_cpus; i++) { | ||
141 | if (i != boot_cpu) { | ||
142 | __cpu_number_map[i] = cpu; | ||
143 | __cpu_logical_map[cpu] = i; | ||
144 | cpu++; | ||
145 | } | ||
146 | set_cpu_possible(i, 1); | ||
147 | set_cpu_present(i, 1); | ||
148 | } | ||
149 | } | ||
150 | |||
151 | /* | ||
152 | * IPI IRQ setup - runs on CPU0 | ||
153 | */ | ||
154 | static void bmips_prepare_cpus(unsigned int max_cpus) | ||
155 | { | ||
156 | irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id); | ||
157 | |||
158 | switch (current_cpu_type()) { | ||
159 | case CPU_BMIPS4350: | ||
160 | case CPU_BMIPS4380: | ||
161 | bmips_ipi_interrupt = bmips43xx_ipi_interrupt; | ||
162 | break; | ||
163 | case CPU_BMIPS5000: | ||
164 | bmips_ipi_interrupt = bmips5000_ipi_interrupt; | ||
165 | break; | ||
166 | default: | ||
167 | return; | ||
168 | } | ||
169 | |||
170 | if (request_irq(IPI0_IRQ, bmips_ipi_interrupt, | ||
171 | IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL)) | ||
172 | panic("Can't request IPI0 interrupt"); | ||
173 | if (request_irq(IPI1_IRQ, bmips_ipi_interrupt, | ||
174 | IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL)) | ||
175 | panic("Can't request IPI1 interrupt"); | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Tell the hardware to boot CPUx - runs on CPU0 | ||
180 | */ | ||
181 | static int bmips_boot_secondary(int cpu, struct task_struct *idle) | ||
182 | { | ||
183 | bmips_smp_boot_sp = __KSTK_TOS(idle); | ||
184 | bmips_smp_boot_gp = (unsigned long)task_thread_info(idle); | ||
185 | mb(); | ||
186 | |||
187 | /* | ||
188 | * Initial boot sequence for secondary CPU: | ||
189 | * bmips_reset_nmi_vec @ a000_0000 -> | ||
190 | * bmips_smp_entry -> | ||
191 | * plat_wired_tlb_setup (cached function call; optional) -> | ||
192 | * start_secondary (cached jump) | ||
193 | * | ||
194 | * Warm restart sequence: | ||
195 | * play_dead WAIT loop -> | ||
196 | * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC -> | ||
197 | * eret to play_dead -> | ||
198 | * bmips_secondary_reentry -> | ||
199 | * start_secondary | ||
200 | */ | ||
201 | |||
202 | pr_info("SMP: Booting CPU%d...\n", cpu); | ||
203 | |||
204 | if (cpumask_test_cpu(cpu, &bmips_booted_mask)) { | ||
205 | /* kseg1 might not exist if this CPU enabled XKS01 */ | ||
206 | bmips_set_reset_vec(cpu, RESET_FROM_KSEG0); | ||
207 | |||
208 | switch (current_cpu_type()) { | ||
209 | case CPU_BMIPS4350: | ||
210 | case CPU_BMIPS4380: | ||
211 | bmips43xx_send_ipi_single(cpu, 0); | ||
212 | break; | ||
213 | case CPU_BMIPS5000: | ||
214 | bmips5000_send_ipi_single(cpu, 0); | ||
215 | break; | ||
216 | } | ||
217 | } else { | ||
218 | bmips_set_reset_vec(cpu, RESET_FROM_KSEG1); | ||
219 | |||
220 | switch (current_cpu_type()) { | ||
221 | case CPU_BMIPS4350: | ||
222 | case CPU_BMIPS4380: | ||
223 | /* Reset slave TP1 if booting from TP0 */ | ||
224 | if (cpu_logical_map(cpu) == 1) | ||
225 | set_c0_brcm_cmt_ctrl(0x01); | ||
226 | break; | ||
227 | case CPU_BMIPS5000: | ||
228 | write_c0_brcm_action(ACTION_BOOT_THREAD(cpu)); | ||
229 | break; | ||
230 | } | ||
231 | cpumask_set_cpu(cpu, &bmips_booted_mask); | ||
232 | } | ||
233 | |||
234 | return 0; | ||
235 | } | ||
236 | |||
237 | /* | ||
238 | * Early setup - runs on secondary CPU after cache probe | ||
239 | */ | ||
240 | static void bmips_init_secondary(void) | ||
241 | { | ||
242 | bmips_cpu_setup(); | ||
243 | |||
244 | switch (current_cpu_type()) { | ||
245 | case CPU_BMIPS4350: | ||
246 | case CPU_BMIPS4380: | ||
247 | clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0); | ||
248 | break; | ||
249 | case CPU_BMIPS5000: | ||
250 | write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0)); | ||
251 | cpu_set_core(¤t_cpu_data, (read_c0_brcm_config() >> 25) & 3); | ||
252 | break; | ||
253 | } | ||
254 | } | ||
255 | |||
256 | /* | ||
257 | * Late setup - runs on secondary CPU before entering the idle loop | ||
258 | */ | ||
259 | static void bmips_smp_finish(void) | ||
260 | { | ||
261 | pr_info("SMP: CPU%d is running\n", smp_processor_id()); | ||
262 | |||
263 | /* make sure there won't be a timer interrupt for a little while */ | ||
264 | write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ); | ||
265 | |||
266 | irq_enable_hazard(); | ||
267 | set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE); | ||
268 | irq_enable_hazard(); | ||
269 | } | ||
270 | |||
271 | /* | ||
272 | * BMIPS5000 raceless IPIs | ||
273 | * | ||
274 | * Each CPU has two inbound SW IRQs which are independent of all other CPUs. | ||
275 | * IPI0 is used for SMP_RESCHEDULE_YOURSELF | ||
276 | * IPI1 is used for SMP_CALL_FUNCTION | ||
277 | */ | ||
278 | |||
279 | static void bmips5000_send_ipi_single(int cpu, unsigned int action) | ||
280 | { | ||
281 | write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION)); | ||
282 | } | ||
283 | |||
284 | static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id) | ||
285 | { | ||
286 | int action = irq - IPI0_IRQ; | ||
287 | |||
288 | write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action)); | ||
289 | |||
290 | if (action == 0) | ||
291 | scheduler_ipi(); | ||
292 | else | ||
293 | generic_smp_call_function_interrupt(); | ||
294 | |||
295 | return IRQ_HANDLED; | ||
296 | } | ||
297 | |||
298 | static void bmips5000_send_ipi_mask(const struct cpumask *mask, | ||
299 | unsigned int action) | ||
300 | { | ||
301 | unsigned int i; | ||
302 | |||
303 | for_each_cpu(i, mask) | ||
304 | bmips5000_send_ipi_single(i, action); | ||
305 | } | ||
306 | |||
307 | /* | ||
308 | * BMIPS43xx racey IPIs | ||
309 | * | ||
310 | * We use one inbound SW IRQ for each CPU. | ||
311 | * | ||
312 | * A spinlock must be held in order to keep CPUx from accidentally clearing | ||
313 | * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The | ||
314 | * same spinlock is used to protect the action masks. | ||
315 | */ | ||
316 | |||
317 | static DEFINE_SPINLOCK(ipi_lock); | ||
318 | static DEFINE_PER_CPU(int, ipi_action_mask); | ||
319 | |||
320 | static void bmips43xx_send_ipi_single(int cpu, unsigned int action) | ||
321 | { | ||
322 | unsigned long flags; | ||
323 | |||
324 | spin_lock_irqsave(&ipi_lock, flags); | ||
325 | set_c0_cause(cpu ? C_SW1 : C_SW0); | ||
326 | per_cpu(ipi_action_mask, cpu) |= action; | ||
327 | irq_enable_hazard(); | ||
328 | spin_unlock_irqrestore(&ipi_lock, flags); | ||
329 | } | ||
330 | |||
331 | static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id) | ||
332 | { | ||
333 | unsigned long flags; | ||
334 | int action, cpu = irq - IPI0_IRQ; | ||
335 | |||
336 | spin_lock_irqsave(&ipi_lock, flags); | ||
337 | action = __this_cpu_read(ipi_action_mask); | ||
338 | per_cpu(ipi_action_mask, cpu) = 0; | ||
339 | clear_c0_cause(cpu ? C_SW1 : C_SW0); | ||
340 | spin_unlock_irqrestore(&ipi_lock, flags); | ||
341 | |||
342 | if (action & SMP_RESCHEDULE_YOURSELF) | ||
343 | scheduler_ipi(); | ||
344 | if (action & SMP_CALL_FUNCTION) | ||
345 | generic_smp_call_function_interrupt(); | ||
346 | |||
347 | return IRQ_HANDLED; | ||
348 | } | ||
349 | |||
350 | static void bmips43xx_send_ipi_mask(const struct cpumask *mask, | ||
351 | unsigned int action) | ||
352 | { | ||
353 | unsigned int i; | ||
354 | |||
355 | for_each_cpu(i, mask) | ||
356 | bmips43xx_send_ipi_single(i, action); | ||
357 | } | ||
358 | |||
359 | #ifdef CONFIG_HOTPLUG_CPU | ||
360 | |||
361 | static int bmips_cpu_disable(void) | ||
362 | { | ||
363 | unsigned int cpu = smp_processor_id(); | ||
364 | |||
365 | if (cpu == 0) | ||
366 | return -EBUSY; | ||
367 | |||
368 | pr_info("SMP: CPU%d is offline\n", cpu); | ||
369 | |||
370 | set_cpu_online(cpu, false); | ||
371 | calculate_cpu_foreign_map(); | ||
372 | irq_cpu_offline(); | ||
373 | clear_c0_status(IE_IRQ5); | ||
374 | |||
375 | local_flush_tlb_all(); | ||
376 | local_flush_icache_range(0, ~0); | ||
377 | |||
378 | return 0; | ||
379 | } | ||
380 | |||
381 | static void bmips_cpu_die(unsigned int cpu) | ||
382 | { | ||
383 | } | ||
384 | |||
385 | void __ref play_dead(void) | ||
386 | { | ||
387 | idle_task_exit(); | ||
388 | |||
389 | /* flush data cache */ | ||
390 | _dma_cache_wback_inv(0, ~0); | ||
391 | |||
392 | /* | ||
393 | * Wakeup is on SW0 or SW1; disable everything else | ||
394 | * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux | ||
395 | * IRQ handlers; this clears ST0_IE and returns immediately. | ||
396 | */ | ||
397 | clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1); | ||
398 | change_c0_status( | ||
399 | IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV, | ||
400 | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV); | ||
401 | irq_disable_hazard(); | ||
402 | |||
403 | /* | ||
404 | * wait for SW interrupt from bmips_boot_secondary(), then jump | ||
405 | * back to start_secondary() | ||
406 | */ | ||
407 | __asm__ __volatile__( | ||
408 | " wait\n" | ||
409 | " j bmips_secondary_reentry\n" | ||
410 | : : : "memory"); | ||
411 | } | ||
412 | |||
413 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
414 | |||
415 | const struct plat_smp_ops bmips43xx_smp_ops = { | ||
416 | .smp_setup = bmips_smp_setup, | ||
417 | .prepare_cpus = bmips_prepare_cpus, | ||
418 | .boot_secondary = bmips_boot_secondary, | ||
419 | .smp_finish = bmips_smp_finish, | ||
420 | .init_secondary = bmips_init_secondary, | ||
421 | .send_ipi_single = bmips43xx_send_ipi_single, | ||
422 | .send_ipi_mask = bmips43xx_send_ipi_mask, | ||
423 | #ifdef CONFIG_HOTPLUG_CPU | ||
424 | .cpu_disable = bmips_cpu_disable, | ||
425 | .cpu_die = bmips_cpu_die, | ||
426 | #endif | ||
427 | #ifdef CONFIG_KEXEC | ||
428 | .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, | ||
429 | #endif | ||
430 | }; | ||
431 | |||
432 | const struct plat_smp_ops bmips5000_smp_ops = { | ||
433 | .smp_setup = bmips_smp_setup, | ||
434 | .prepare_cpus = bmips_prepare_cpus, | ||
435 | .boot_secondary = bmips_boot_secondary, | ||
436 | .smp_finish = bmips_smp_finish, | ||
437 | .init_secondary = bmips_init_secondary, | ||
438 | .send_ipi_single = bmips5000_send_ipi_single, | ||
439 | .send_ipi_mask = bmips5000_send_ipi_mask, | ||
440 | #ifdef CONFIG_HOTPLUG_CPU | ||
441 | .cpu_disable = bmips_cpu_disable, | ||
442 | .cpu_die = bmips_cpu_die, | ||
443 | #endif | ||
444 | #ifdef CONFIG_KEXEC | ||
445 | .kexec_nonboot_cpu = kexec_nonboot_cpu_jump, | ||
446 | #endif | ||
447 | }; | ||
448 | |||
449 | #endif /* CONFIG_SMP */ | ||
450 | |||
451 | /*********************************************************************** | ||
452 | * BMIPS vector relocation | ||
453 | * This is primarily used for SMP boot, but it is applicable to some | ||
454 | * UP BMIPS systems as well. | ||
455 | ***********************************************************************/ | ||
456 | |||
457 | static void bmips_wr_vec(unsigned long dst, char *start, char *end) | ||
458 | { | ||
459 | memcpy((void *)dst, start, end - start); | ||
460 | dma_cache_wback(dst, end - start); | ||
461 | local_flush_icache_range(dst, dst + (end - start)); | ||
462 | instruction_hazard(); | ||
463 | } | ||
464 | |||
465 | static inline void bmips_nmi_handler_setup(void) | ||
466 | { | ||
467 | bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec, | ||
468 | bmips_reset_nmi_vec_end); | ||
469 | bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec, | ||
470 | bmips_smp_int_vec_end); | ||
471 | } | ||
472 | |||
473 | struct reset_vec_info { | ||
474 | int cpu; | ||
475 | u32 val; | ||
476 | }; | ||
477 | |||
478 | static void bmips_set_reset_vec_remote(void *vinfo) | ||
479 | { | ||
480 | struct reset_vec_info *info = vinfo; | ||
481 | int shift = info->cpu & 0x01 ? 16 : 0; | ||
482 | u32 mask = ~(0xffff << shift), val = info->val >> 16; | ||
483 | |||
484 | preempt_disable(); | ||
485 | if (smp_processor_id() > 0) { | ||
486 | smp_call_function_single(0, &bmips_set_reset_vec_remote, | ||
487 | info, 1); | ||
488 | } else { | ||
489 | if (info->cpu & 0x02) { | ||
490 | /* BMIPS5200 "should" use mask/shift, but it's buggy */ | ||
491 | bmips_write_zscm_reg(0xa0, (val << 16) | val); | ||
492 | bmips_read_zscm_reg(0xa0); | ||
493 | } else { | ||
494 | write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) | | ||
495 | (val << shift)); | ||
496 | } | ||
497 | } | ||
498 | preempt_enable(); | ||
499 | } | ||
500 | |||
501 | static void bmips_set_reset_vec(int cpu, u32 val) | ||
502 | { | ||
503 | struct reset_vec_info info; | ||
504 | |||
505 | if (current_cpu_type() == CPU_BMIPS5000) { | ||
506 | /* this needs to run from CPU0 (which is always online) */ | ||
507 | info.cpu = cpu; | ||
508 | info.val = val; | ||
509 | bmips_set_reset_vec_remote(&info); | ||
510 | } else { | ||
511 | void __iomem *cbr = BMIPS_GET_CBR(); | ||
512 | |||
513 | if (cpu == 0) | ||
514 | __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0); | ||
515 | else { | ||
516 | if (current_cpu_type() != CPU_BMIPS4380) | ||
517 | return; | ||
518 | __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1); | ||
519 | } | ||
520 | } | ||
521 | __sync(); | ||
522 | back_to_back_c0_hazard(); | ||
523 | } | ||
524 | |||
525 | void bmips_ebase_setup(void) | ||
526 | { | ||
527 | unsigned long new_ebase = ebase; | ||
528 | |||
529 | BUG_ON(ebase != CKSEG0); | ||
530 | |||
531 | switch (current_cpu_type()) { | ||
532 | case CPU_BMIPS4350: | ||
533 | /* | ||
534 | * BMIPS4350 cannot relocate the normal vectors, but it | ||
535 | * can relocate the BEV=1 vectors. So CPU1 starts up at | ||
536 | * the relocated BEV=1, IV=0 general exception vector @ | ||
537 | * 0xa000_0380. | ||
538 | * | ||
539 | * set_uncached_handler() is used here because: | ||
540 | * - CPU1 will run this from uncached space | ||
541 | * - None of the cacheflush functions are set up yet | ||
542 | */ | ||
543 | set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0, | ||
544 | &bmips_smp_int_vec, 0x80); | ||
545 | __sync(); | ||
546 | return; | ||
547 | case CPU_BMIPS3300: | ||
548 | case CPU_BMIPS4380: | ||
549 | /* | ||
550 | * 0x8000_0000: reset/NMI (initially in kseg1) | ||
551 | * 0x8000_0400: normal vectors | ||
552 | */ | ||
553 | new_ebase = 0x80000400; | ||
554 | bmips_set_reset_vec(0, RESET_FROM_KSEG0); | ||
555 | break; | ||
556 | case CPU_BMIPS5000: | ||
557 | /* | ||
558 | * 0x8000_0000: reset/NMI (initially in kseg1) | ||
559 | * 0x8000_1000: normal vectors | ||
560 | */ | ||
561 | new_ebase = 0x80001000; | ||
562 | bmips_set_reset_vec(0, RESET_FROM_KSEG0); | ||
563 | write_c0_ebase(new_ebase); | ||
564 | break; | ||
565 | default: | ||
566 | return; | ||
567 | } | ||
568 | |||
569 | board_nmi_handler_setup = &bmips_nmi_handler_setup; | ||
570 | ebase = new_ebase; | ||
571 | } | ||
572 | |||
573 | asmlinkage void __weak plat_wired_tlb_setup(void) | ||
574 | { | ||
575 | /* | ||
576 | * Called when starting/restarting a secondary CPU. | ||
577 | * Kernel stacks and other important data might only be accessible | ||
578 | * once the wired entries are present. | ||
579 | */ | ||
580 | } | ||
581 | |||
582 | void bmips_cpu_setup(void) | ||
583 | { | ||
584 | void __iomem __maybe_unused *cbr = BMIPS_GET_CBR(); | ||
585 | u32 __maybe_unused cfg; | ||
586 | |||
587 | switch (current_cpu_type()) { | ||
588 | case CPU_BMIPS3300: | ||
589 | /* Set BIU to async mode */ | ||
590 | set_c0_brcm_bus_pll(BIT(22)); | ||
591 | __sync(); | ||
592 | |||
593 | /* put the BIU back in sync mode */ | ||
594 | clear_c0_brcm_bus_pll(BIT(22)); | ||
595 | |||
596 | /* clear BHTD to enable branch history table */ | ||
597 | clear_c0_brcm_reset(BIT(16)); | ||
598 | |||
599 | /* Flush and enable RAC */ | ||
600 | cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); | ||
601 | __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG); | ||
602 | __raw_readl(cbr + BMIPS_RAC_CONFIG); | ||
603 | |||
604 | cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG); | ||
605 | __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG); | ||
606 | __raw_readl(cbr + BMIPS_RAC_CONFIG); | ||
607 | |||
608 | cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); | ||
609 | __raw_writel(cfg | 0x0fff0000, cbr + BMIPS_RAC_ADDRESS_RANGE); | ||
610 | __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE); | ||
611 | break; | ||
612 | |||
613 | case CPU_BMIPS4380: | ||
614 | /* CBG workaround for early BMIPS4380 CPUs */ | ||
615 | switch (read_c0_prid()) { | ||
616 | case 0x2a040: | ||
617 | case 0x2a042: | ||
618 | case 0x2a044: | ||
619 | case 0x2a060: | ||
620 | cfg = __raw_readl(cbr + BMIPS_L2_CONFIG); | ||
621 | __raw_writel(cfg & ~0x07000000, cbr + BMIPS_L2_CONFIG); | ||
622 | __raw_readl(cbr + BMIPS_L2_CONFIG); | ||
623 | } | ||
624 | |||
625 | /* clear BHTD to enable branch history table */ | ||
626 | clear_c0_brcm_config_0(BIT(21)); | ||
627 | |||
628 | /* XI/ROTR enable */ | ||
629 | set_c0_brcm_config_0(BIT(23)); | ||
630 | set_c0_brcm_cmt_ctrl(BIT(15)); | ||
631 | break; | ||
632 | |||
633 | case CPU_BMIPS5000: | ||
634 | /* enable RDHWR, BRDHWR */ | ||
635 | set_c0_brcm_config(BIT(17) | BIT(21)); | ||
636 | |||
637 | /* Disable JTB */ | ||
638 | __asm__ __volatile__( | ||
639 | " .set noreorder\n" | ||
640 | " li $8, 0x5a455048\n" | ||
641 | " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */ | ||
642 | " .word 0x4008b008\n" /* mfc0 t0, $22, 8 */ | ||
643 | " li $9, 0x00008000\n" | ||
644 | " or $8, $8, $9\n" | ||
645 | " .word 0x4088b008\n" /* mtc0 t0, $22, 8 */ | ||
646 | " sync\n" | ||
647 | " li $8, 0x0\n" | ||
648 | " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */ | ||
649 | " .set reorder\n" | ||
650 | : : : "$8", "$9"); | ||
651 | |||
652 | /* XI enable */ | ||
653 | set_c0_brcm_config(BIT(27)); | ||
654 | |||
655 | /* enable MIPS32R2 ROR instruction for XI TLB handlers */ | ||
656 | __asm__ __volatile__( | ||
657 | " li $8, 0x5a455048\n" | ||
658 | " .word 0x4088b00f\n" /* mtc0 $8, $22, 15 */ | ||
659 | " nop; nop; nop\n" | ||
660 | " .word 0x4008b008\n" /* mfc0 $8, $22, 8 */ | ||
661 | " lui $9, 0x0100\n" | ||
662 | " or $8, $9\n" | ||
663 | " .word 0x4088b008\n" /* mtc0 $8, $22, 8 */ | ||
664 | : : : "$8", "$9"); | ||
665 | break; | ||
666 | } | ||
667 | } | ||
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c new file mode 100644 index 000000000..76f5824cd --- /dev/null +++ b/arch/mips/kernel/smp-cmp.c | |||
@@ -0,0 +1,148 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
5 | * Chris Dearman (chris@mips.com) | ||
6 | */ | ||
7 | |||
8 | #undef DEBUG | ||
9 | |||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/sched/task_stack.h> | ||
12 | #include <linux/smp.h> | ||
13 | #include <linux/cpumask.h> | ||
14 | #include <linux/interrupt.h> | ||
15 | #include <linux/compiler.h> | ||
16 | |||
17 | #include <linux/atomic.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | #include <asm/cpu.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/hardirq.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | #include <asm/smp.h> | ||
24 | #include <asm/time.h> | ||
25 | #include <asm/mipsregs.h> | ||
26 | #include <asm/mipsmtregs.h> | ||
27 | #include <asm/mips_mt.h> | ||
28 | #include <asm/amon.h> | ||
29 | |||
30 | static void cmp_init_secondary(void) | ||
31 | { | ||
32 | struct cpuinfo_mips *c __maybe_unused = ¤t_cpu_data; | ||
33 | |||
34 | /* Assume GIC is present */ | ||
35 | change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 | | ||
36 | STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7); | ||
37 | |||
38 | /* Enable per-cpu interrupts: platform specific */ | ||
39 | |||
40 | #ifdef CONFIG_MIPS_MT_SMP | ||
41 | if (cpu_has_mipsmt) | ||
42 | cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & | ||
43 | TCBIND_CURVPE); | ||
44 | #endif | ||
45 | } | ||
46 | |||
47 | static void cmp_smp_finish(void) | ||
48 | { | ||
49 | pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); | ||
50 | |||
51 | /* CDFIXME: remove this? */ | ||
52 | write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); | ||
53 | |||
54 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
55 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | ||
56 | if (cpu_has_fpu) | ||
57 | cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); | ||
58 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
59 | |||
60 | local_irq_enable(); | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Setup the PC, SP, and GP of a secondary processor and start it running | ||
65 | * smp_bootstrap is the place to resume from | ||
66 | * __KSTK_TOS(idle) is apparently the stack pointer | ||
67 | * (unsigned long)idle->thread_info the gp | ||
68 | */ | ||
69 | static int cmp_boot_secondary(int cpu, struct task_struct *idle) | ||
70 | { | ||
71 | struct thread_info *gp = task_thread_info(idle); | ||
72 | unsigned long sp = __KSTK_TOS(idle); | ||
73 | unsigned long pc = (unsigned long)&smp_bootstrap; | ||
74 | unsigned long a0 = 0; | ||
75 | |||
76 | pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(), | ||
77 | __func__, cpu); | ||
78 | |||
79 | #if 0 | ||
80 | /* Needed? */ | ||
81 | flush_icache_range((unsigned long)gp, | ||
82 | (unsigned long)(gp + sizeof(struct thread_info))); | ||
83 | #endif | ||
84 | |||
85 | amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0); | ||
86 | return 0; | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Common setup before any secondaries are started | ||
91 | */ | ||
92 | void __init cmp_smp_setup(void) | ||
93 | { | ||
94 | int i; | ||
95 | int ncpu = 0; | ||
96 | |||
97 | pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__); | ||
98 | |||
99 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
100 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | ||
101 | if (cpu_has_fpu) | ||
102 | cpumask_set_cpu(0, &mt_fpu_cpumask); | ||
103 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
104 | |||
105 | for (i = 1; i < NR_CPUS; i++) { | ||
106 | if (amon_cpu_avail(i)) { | ||
107 | set_cpu_possible(i, true); | ||
108 | __cpu_number_map[i] = ++ncpu; | ||
109 | __cpu_logical_map[ncpu] = i; | ||
110 | } | ||
111 | } | ||
112 | |||
113 | if (cpu_has_mipsmt) { | ||
114 | unsigned int nvpe = 1; | ||
115 | #ifdef CONFIG_MIPS_MT_SMP | ||
116 | unsigned int mvpconf0 = read_c0_mvpconf0(); | ||
117 | |||
118 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
119 | #endif | ||
120 | smp_num_siblings = nvpe; | ||
121 | } | ||
122 | pr_info("Detected %i available secondary CPU(s)\n", ncpu); | ||
123 | } | ||
124 | |||
125 | void __init cmp_prepare_cpus(unsigned int max_cpus) | ||
126 | { | ||
127 | pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n", | ||
128 | smp_processor_id(), __func__, max_cpus); | ||
129 | |||
130 | #ifdef CONFIG_MIPS_MT | ||
131 | /* | ||
132 | * FIXME: some of these options are per-system, some per-core and | ||
133 | * some per-cpu | ||
134 | */ | ||
135 | mips_mt_set_cpuoptions(); | ||
136 | #endif | ||
137 | |||
138 | } | ||
139 | |||
140 | const struct plat_smp_ops cmp_smp_ops = { | ||
141 | .send_ipi_single = mips_smp_send_ipi_single, | ||
142 | .send_ipi_mask = mips_smp_send_ipi_mask, | ||
143 | .init_secondary = cmp_init_secondary, | ||
144 | .smp_finish = cmp_smp_finish, | ||
145 | .boot_secondary = cmp_boot_secondary, | ||
146 | .smp_setup = cmp_smp_setup, | ||
147 | .prepare_cpus = cmp_prepare_cpus, | ||
148 | }; | ||
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c new file mode 100644 index 000000000..dbb3f1fc7 --- /dev/null +++ b/arch/mips/kernel/smp-cps.c | |||
@@ -0,0 +1,645 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2013 Imagination Technologies | ||
4 | * Author: Paul Burton <paul.burton@mips.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/cpu.h> | ||
8 | #include <linux/delay.h> | ||
9 | #include <linux/io.h> | ||
10 | #include <linux/sched/task_stack.h> | ||
11 | #include <linux/sched/hotplug.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/types.h> | ||
15 | |||
16 | #include <asm/bcache.h> | ||
17 | #include <asm/mips-cps.h> | ||
18 | #include <asm/mips_mt.h> | ||
19 | #include <asm/mipsregs.h> | ||
20 | #include <asm/pm-cps.h> | ||
21 | #include <asm/r4kcache.h> | ||
22 | #include <asm/smp-cps.h> | ||
23 | #include <asm/time.h> | ||
24 | #include <asm/uasm.h> | ||
25 | |||
26 | static bool threads_disabled; | ||
27 | static DECLARE_BITMAP(core_power, NR_CPUS); | ||
28 | |||
29 | struct core_boot_config *mips_cps_core_bootcfg; | ||
30 | |||
31 | static int __init setup_nothreads(char *s) | ||
32 | { | ||
33 | threads_disabled = true; | ||
34 | return 0; | ||
35 | } | ||
36 | early_param("nothreads", setup_nothreads); | ||
37 | |||
38 | static unsigned core_vpe_count(unsigned int cluster, unsigned core) | ||
39 | { | ||
40 | if (threads_disabled) | ||
41 | return 1; | ||
42 | |||
43 | return mips_cps_numvps(cluster, core); | ||
44 | } | ||
45 | |||
46 | static void __init cps_smp_setup(void) | ||
47 | { | ||
48 | unsigned int nclusters, ncores, nvpes, core_vpes; | ||
49 | unsigned long core_entry; | ||
50 | int cl, c, v; | ||
51 | |||
52 | /* Detect & record VPE topology */ | ||
53 | nvpes = 0; | ||
54 | nclusters = mips_cps_numclusters(); | ||
55 | pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE"); | ||
56 | for (cl = 0; cl < nclusters; cl++) { | ||
57 | if (cl > 0) | ||
58 | pr_cont(","); | ||
59 | pr_cont("{"); | ||
60 | |||
61 | ncores = mips_cps_numcores(cl); | ||
62 | for (c = 0; c < ncores; c++) { | ||
63 | core_vpes = core_vpe_count(cl, c); | ||
64 | |||
65 | if (c > 0) | ||
66 | pr_cont(","); | ||
67 | pr_cont("%u", core_vpes); | ||
68 | |||
69 | /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */ | ||
70 | if (!cl && !c) | ||
71 | smp_num_siblings = core_vpes; | ||
72 | |||
73 | for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) { | ||
74 | cpu_set_cluster(&cpu_data[nvpes + v], cl); | ||
75 | cpu_set_core(&cpu_data[nvpes + v], c); | ||
76 | cpu_set_vpe_id(&cpu_data[nvpes + v], v); | ||
77 | } | ||
78 | |||
79 | nvpes += core_vpes; | ||
80 | } | ||
81 | |||
82 | pr_cont("}"); | ||
83 | } | ||
84 | pr_cont(" total %u\n", nvpes); | ||
85 | |||
86 | /* Indicate present CPUs (CPU being synonymous with VPE) */ | ||
87 | for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) { | ||
88 | set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0); | ||
89 | set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0); | ||
90 | __cpu_number_map[v] = v; | ||
91 | __cpu_logical_map[v] = v; | ||
92 | } | ||
93 | |||
94 | /* Set a coherent default CCA (CWB) */ | ||
95 | change_c0_config(CONF_CM_CMASK, 0x5); | ||
96 | |||
97 | /* Core 0 is powered up (we're running on it) */ | ||
98 | bitmap_set(core_power, 0, 1); | ||
99 | |||
100 | /* Initialise core 0 */ | ||
101 | mips_cps_core_init(); | ||
102 | |||
103 | /* Make core 0 coherent with everything */ | ||
104 | write_gcr_cl_coherence(0xff); | ||
105 | |||
106 | if (mips_cm_revision() >= CM_REV_CM3) { | ||
107 | core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); | ||
108 | write_gcr_bev_base(core_entry); | ||
109 | } | ||
110 | |||
111 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
112 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | ||
113 | if (cpu_has_fpu) | ||
114 | cpumask_set_cpu(0, &mt_fpu_cpumask); | ||
115 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
116 | } | ||
117 | |||
118 | static void __init cps_prepare_cpus(unsigned int max_cpus) | ||
119 | { | ||
120 | unsigned ncores, core_vpes, c, cca; | ||
121 | bool cca_unsuitable, cores_limited; | ||
122 | u32 *entry_code; | ||
123 | |||
124 | mips_mt_set_cpuoptions(); | ||
125 | |||
126 | /* Detect whether the CCA is unsuited to multi-core SMP */ | ||
127 | cca = read_c0_config() & CONF_CM_CMASK; | ||
128 | switch (cca) { | ||
129 | case 0x4: /* CWBE */ | ||
130 | case 0x5: /* CWB */ | ||
131 | /* The CCA is coherent, multi-core is fine */ | ||
132 | cca_unsuitable = false; | ||
133 | break; | ||
134 | |||
135 | default: | ||
136 | /* CCA is not coherent, multi-core is not usable */ | ||
137 | cca_unsuitable = true; | ||
138 | } | ||
139 | |||
140 | /* Warn the user if the CCA prevents multi-core */ | ||
141 | cores_limited = false; | ||
142 | if (cca_unsuitable || cpu_has_dc_aliases) { | ||
143 | for_each_present_cpu(c) { | ||
144 | if (cpus_are_siblings(smp_processor_id(), c)) | ||
145 | continue; | ||
146 | |||
147 | set_cpu_present(c, false); | ||
148 | cores_limited = true; | ||
149 | } | ||
150 | } | ||
151 | if (cores_limited) | ||
152 | pr_warn("Using only one core due to %s%s%s\n", | ||
153 | cca_unsuitable ? "unsuitable CCA" : "", | ||
154 | (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "", | ||
155 | cpu_has_dc_aliases ? "dcache aliasing" : ""); | ||
156 | |||
157 | /* | ||
158 | * Patch the start of mips_cps_core_entry to provide: | ||
159 | * | ||
160 | * s0 = kseg0 CCA | ||
161 | */ | ||
162 | entry_code = (u32 *)&mips_cps_core_entry; | ||
163 | uasm_i_addiu(&entry_code, 16, 0, cca); | ||
164 | blast_dcache_range((unsigned long)&mips_cps_core_entry, | ||
165 | (unsigned long)entry_code); | ||
166 | bc_wback_inv((unsigned long)&mips_cps_core_entry, | ||
167 | (void *)entry_code - (void *)&mips_cps_core_entry); | ||
168 | __sync(); | ||
169 | |||
170 | /* Allocate core boot configuration structs */ | ||
171 | ncores = mips_cps_numcores(0); | ||
172 | mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg), | ||
173 | GFP_KERNEL); | ||
174 | if (!mips_cps_core_bootcfg) { | ||
175 | pr_err("Failed to allocate boot config for %u cores\n", ncores); | ||
176 | goto err_out; | ||
177 | } | ||
178 | |||
179 | /* Allocate VPE boot configuration structs */ | ||
180 | for (c = 0; c < ncores; c++) { | ||
181 | core_vpes = core_vpe_count(0, c); | ||
182 | mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes, | ||
183 | sizeof(*mips_cps_core_bootcfg[c].vpe_config), | ||
184 | GFP_KERNEL); | ||
185 | if (!mips_cps_core_bootcfg[c].vpe_config) { | ||
186 | pr_err("Failed to allocate %u VPE boot configs\n", | ||
187 | core_vpes); | ||
188 | goto err_out; | ||
189 | } | ||
190 | } | ||
191 | |||
192 | /* Mark this CPU as booted */ | ||
193 | atomic_set(&mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)].vpe_mask, | ||
194 | 1 << cpu_vpe_id(¤t_cpu_data)); | ||
195 | |||
196 | return; | ||
197 | err_out: | ||
198 | /* Clean up allocations */ | ||
199 | if (mips_cps_core_bootcfg) { | ||
200 | for (c = 0; c < ncores; c++) | ||
201 | kfree(mips_cps_core_bootcfg[c].vpe_config); | ||
202 | kfree(mips_cps_core_bootcfg); | ||
203 | mips_cps_core_bootcfg = NULL; | ||
204 | } | ||
205 | |||
206 | /* Effectively disable SMP by declaring CPUs not present */ | ||
207 | for_each_possible_cpu(c) { | ||
208 | if (c == 0) | ||
209 | continue; | ||
210 | set_cpu_present(c, false); | ||
211 | } | ||
212 | } | ||
213 | |||
214 | static void boot_core(unsigned int core, unsigned int vpe_id) | ||
215 | { | ||
216 | u32 stat, seq_state; | ||
217 | unsigned timeout; | ||
218 | |||
219 | /* Select the appropriate core */ | ||
220 | mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
221 | |||
222 | /* Set its reset vector */ | ||
223 | write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry)); | ||
224 | |||
225 | /* Ensure its coherency is disabled */ | ||
226 | write_gcr_co_coherence(0); | ||
227 | |||
228 | /* Start it with the legacy memory map and exception base */ | ||
229 | write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB); | ||
230 | |||
231 | /* Ensure the core can access the GCRs */ | ||
232 | set_gcr_access(1 << core); | ||
233 | |||
234 | if (mips_cpc_present()) { | ||
235 | /* Reset the core */ | ||
236 | mips_cpc_lock_other(core); | ||
237 | |||
238 | if (mips_cm_revision() >= CM_REV_CM3) { | ||
239 | /* Run only the requested VP following the reset */ | ||
240 | write_cpc_co_vp_stop(0xf); | ||
241 | write_cpc_co_vp_run(1 << vpe_id); | ||
242 | |||
243 | /* | ||
244 | * Ensure that the VP_RUN register is written before the | ||
245 | * core leaves reset. | ||
246 | */ | ||
247 | wmb(); | ||
248 | } | ||
249 | |||
250 | write_cpc_co_cmd(CPC_Cx_CMD_RESET); | ||
251 | |||
252 | timeout = 100; | ||
253 | while (true) { | ||
254 | stat = read_cpc_co_stat_conf(); | ||
255 | seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE; | ||
256 | seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); | ||
257 | |||
258 | /* U6 == coherent execution, ie. the core is up */ | ||
259 | if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6) | ||
260 | break; | ||
261 | |||
262 | /* Delay a little while before we start warning */ | ||
263 | if (timeout) { | ||
264 | timeout--; | ||
265 | mdelay(10); | ||
266 | continue; | ||
267 | } | ||
268 | |||
269 | pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n", | ||
270 | core, stat); | ||
271 | mdelay(1000); | ||
272 | } | ||
273 | |||
274 | mips_cpc_unlock_other(); | ||
275 | } else { | ||
276 | /* Take the core out of reset */ | ||
277 | write_gcr_co_reset_release(0); | ||
278 | } | ||
279 | |||
280 | mips_cm_unlock_other(); | ||
281 | |||
282 | /* The core is now powered up */ | ||
283 | bitmap_set(core_power, core, 1); | ||
284 | } | ||
285 | |||
286 | static void remote_vpe_boot(void *dummy) | ||
287 | { | ||
288 | unsigned core = cpu_core(¤t_cpu_data); | ||
289 | struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; | ||
290 | |||
291 | mips_cps_boot_vpes(core_cfg, cpu_vpe_id(¤t_cpu_data)); | ||
292 | } | ||
293 | |||
294 | static int cps_boot_secondary(int cpu, struct task_struct *idle) | ||
295 | { | ||
296 | unsigned core = cpu_core(&cpu_data[cpu]); | ||
297 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
298 | struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core]; | ||
299 | struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id]; | ||
300 | unsigned long core_entry; | ||
301 | unsigned int remote; | ||
302 | int err; | ||
303 | |||
304 | /* We don't yet support booting CPUs in other clusters */ | ||
305 | if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data)) | ||
306 | return -ENOSYS; | ||
307 | |||
308 | vpe_cfg->pc = (unsigned long)&smp_bootstrap; | ||
309 | vpe_cfg->sp = __KSTK_TOS(idle); | ||
310 | vpe_cfg->gp = (unsigned long)task_thread_info(idle); | ||
311 | |||
312 | atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask); | ||
313 | |||
314 | preempt_disable(); | ||
315 | |||
316 | if (!test_bit(core, core_power)) { | ||
317 | /* Boot a VPE on a powered down core */ | ||
318 | boot_core(core, vpe_id); | ||
319 | goto out; | ||
320 | } | ||
321 | |||
322 | if (cpu_has_vp) { | ||
323 | mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
324 | core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry); | ||
325 | write_gcr_co_reset_base(core_entry); | ||
326 | mips_cm_unlock_other(); | ||
327 | } | ||
328 | |||
329 | if (!cpus_are_siblings(cpu, smp_processor_id())) { | ||
330 | /* Boot a VPE on another powered up core */ | ||
331 | for (remote = 0; remote < NR_CPUS; remote++) { | ||
332 | if (!cpus_are_siblings(cpu, remote)) | ||
333 | continue; | ||
334 | if (cpu_online(remote)) | ||
335 | break; | ||
336 | } | ||
337 | if (remote >= NR_CPUS) { | ||
338 | pr_crit("No online CPU in core %u to start CPU%d\n", | ||
339 | core, cpu); | ||
340 | goto out; | ||
341 | } | ||
342 | |||
343 | err = smp_call_function_single(remote, remote_vpe_boot, | ||
344 | NULL, 1); | ||
345 | if (err) | ||
346 | panic("Failed to call remote CPU\n"); | ||
347 | goto out; | ||
348 | } | ||
349 | |||
350 | BUG_ON(!cpu_has_mipsmt && !cpu_has_vp); | ||
351 | |||
352 | /* Boot a VPE on this core */ | ||
353 | mips_cps_boot_vpes(core_cfg, vpe_id); | ||
354 | out: | ||
355 | preempt_enable(); | ||
356 | return 0; | ||
357 | } | ||
358 | |||
359 | static void cps_init_secondary(void) | ||
360 | { | ||
361 | /* Disable MT - we only want to run 1 TC per VPE */ | ||
362 | if (cpu_has_mipsmt) | ||
363 | dmt(); | ||
364 | |||
365 | if (mips_cm_revision() >= CM_REV_CM3) { | ||
366 | unsigned int ident = read_gic_vl_ident(); | ||
367 | |||
368 | /* | ||
369 | * Ensure that our calculation of the VP ID matches up with | ||
370 | * what the GIC reports, otherwise we'll have configured | ||
371 | * interrupts incorrectly. | ||
372 | */ | ||
373 | BUG_ON(ident != mips_cm_vp_id(smp_processor_id())); | ||
374 | } | ||
375 | |||
376 | if (cpu_has_veic) | ||
377 | clear_c0_status(ST0_IM); | ||
378 | else | ||
379 | change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | | ||
380 | STATUSF_IP4 | STATUSF_IP5 | | ||
381 | STATUSF_IP6 | STATUSF_IP7); | ||
382 | } | ||
383 | |||
384 | static void cps_smp_finish(void) | ||
385 | { | ||
386 | write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ)); | ||
387 | |||
388 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
389 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | ||
390 | if (cpu_has_fpu) | ||
391 | cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); | ||
392 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
393 | |||
394 | local_irq_enable(); | ||
395 | } | ||
396 | |||
397 | #if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC) | ||
398 | |||
399 | enum cpu_death { | ||
400 | CPU_DEATH_HALT, | ||
401 | CPU_DEATH_POWER, | ||
402 | }; | ||
403 | |||
404 | static void cps_shutdown_this_cpu(enum cpu_death death) | ||
405 | { | ||
406 | unsigned int cpu, core, vpe_id; | ||
407 | |||
408 | cpu = smp_processor_id(); | ||
409 | core = cpu_core(&cpu_data[cpu]); | ||
410 | |||
411 | if (death == CPU_DEATH_HALT) { | ||
412 | vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
413 | |||
414 | pr_debug("Halting core %d VP%d\n", core, vpe_id); | ||
415 | if (cpu_has_mipsmt) { | ||
416 | /* Halt this TC */ | ||
417 | write_c0_tchalt(TCHALT_H); | ||
418 | instruction_hazard(); | ||
419 | } else if (cpu_has_vp) { | ||
420 | write_cpc_cl_vp_stop(1 << vpe_id); | ||
421 | |||
422 | /* Ensure that the VP_STOP register is written */ | ||
423 | wmb(); | ||
424 | } | ||
425 | } else { | ||
426 | pr_debug("Gating power to core %d\n", core); | ||
427 | /* Power down the core */ | ||
428 | cps_pm_enter_state(CPS_PM_POWER_GATED); | ||
429 | } | ||
430 | } | ||
431 | |||
432 | #ifdef CONFIG_KEXEC | ||
433 | |||
434 | static void cps_kexec_nonboot_cpu(void) | ||
435 | { | ||
436 | if (cpu_has_mipsmt || cpu_has_vp) | ||
437 | cps_shutdown_this_cpu(CPU_DEATH_HALT); | ||
438 | else | ||
439 | cps_shutdown_this_cpu(CPU_DEATH_POWER); | ||
440 | } | ||
441 | |||
442 | #endif /* CONFIG_KEXEC */ | ||
443 | |||
444 | #endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */ | ||
445 | |||
446 | #ifdef CONFIG_HOTPLUG_CPU | ||
447 | |||
448 | static int cps_cpu_disable(void) | ||
449 | { | ||
450 | unsigned cpu = smp_processor_id(); | ||
451 | struct core_boot_config *core_cfg; | ||
452 | |||
453 | if (!cpu) | ||
454 | return -EBUSY; | ||
455 | |||
456 | if (!cps_pm_support_state(CPS_PM_POWER_GATED)) | ||
457 | return -EINVAL; | ||
458 | |||
459 | core_cfg = &mips_cps_core_bootcfg[cpu_core(¤t_cpu_data)]; | ||
460 | atomic_sub(1 << cpu_vpe_id(¤t_cpu_data), &core_cfg->vpe_mask); | ||
461 | smp_mb__after_atomic(); | ||
462 | set_cpu_online(cpu, false); | ||
463 | calculate_cpu_foreign_map(); | ||
464 | |||
465 | return 0; | ||
466 | } | ||
467 | |||
468 | static unsigned cpu_death_sibling; | ||
469 | static enum cpu_death cpu_death; | ||
470 | |||
471 | void play_dead(void) | ||
472 | { | ||
473 | unsigned int cpu; | ||
474 | |||
475 | local_irq_disable(); | ||
476 | idle_task_exit(); | ||
477 | cpu = smp_processor_id(); | ||
478 | cpu_death = CPU_DEATH_POWER; | ||
479 | |||
480 | pr_debug("CPU%d going offline\n", cpu); | ||
481 | |||
482 | if (cpu_has_mipsmt || cpu_has_vp) { | ||
483 | /* Look for another online VPE within the core */ | ||
484 | for_each_online_cpu(cpu_death_sibling) { | ||
485 | if (!cpus_are_siblings(cpu, cpu_death_sibling)) | ||
486 | continue; | ||
487 | |||
488 | /* | ||
489 | * There is an online VPE within the core. Just halt | ||
490 | * this TC and leave the core alone. | ||
491 | */ | ||
492 | cpu_death = CPU_DEATH_HALT; | ||
493 | break; | ||
494 | } | ||
495 | } | ||
496 | |||
497 | /* This CPU has chosen its way out */ | ||
498 | (void)cpu_report_death(); | ||
499 | |||
500 | cps_shutdown_this_cpu(cpu_death); | ||
501 | |||
502 | /* This should never be reached */ | ||
503 | panic("Failed to offline CPU %u", cpu); | ||
504 | } | ||
505 | |||
506 | static void wait_for_sibling_halt(void *ptr_cpu) | ||
507 | { | ||
508 | unsigned cpu = (unsigned long)ptr_cpu; | ||
509 | unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
510 | unsigned halted; | ||
511 | unsigned long flags; | ||
512 | |||
513 | do { | ||
514 | local_irq_save(flags); | ||
515 | settc(vpe_id); | ||
516 | halted = read_tc_c0_tchalt(); | ||
517 | local_irq_restore(flags); | ||
518 | } while (!(halted & TCHALT_H)); | ||
519 | } | ||
520 | |||
521 | static void cps_cpu_die(unsigned int cpu) | ||
522 | { | ||
523 | unsigned core = cpu_core(&cpu_data[cpu]); | ||
524 | unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]); | ||
525 | ktime_t fail_time; | ||
526 | unsigned stat; | ||
527 | int err; | ||
528 | |||
529 | /* Wait for the cpu to choose its way out */ | ||
530 | if (!cpu_wait_death(cpu, 5)) { | ||
531 | pr_err("CPU%u: didn't offline\n", cpu); | ||
532 | return; | ||
533 | } | ||
534 | |||
535 | /* | ||
536 | * Now wait for the CPU to actually offline. Without doing this that | ||
537 | * offlining may race with one or more of: | ||
538 | * | ||
539 | * - Onlining the CPU again. | ||
540 | * - Powering down the core if another VPE within it is offlined. | ||
541 | * - A sibling VPE entering a non-coherent state. | ||
542 | * | ||
543 | * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing | ||
544 | * with which we could race, so do nothing. | ||
545 | */ | ||
546 | if (cpu_death == CPU_DEATH_POWER) { | ||
547 | /* | ||
548 | * Wait for the core to enter a powered down or clock gated | ||
549 | * state, the latter happening when a JTAG probe is connected | ||
550 | * in which case the CPC will refuse to power down the core. | ||
551 | */ | ||
552 | fail_time = ktime_add_ms(ktime_get(), 2000); | ||
553 | do { | ||
554 | mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
555 | mips_cpc_lock_other(core); | ||
556 | stat = read_cpc_co_stat_conf(); | ||
557 | stat &= CPC_Cx_STAT_CONF_SEQSTATE; | ||
558 | stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE); | ||
559 | mips_cpc_unlock_other(); | ||
560 | mips_cm_unlock_other(); | ||
561 | |||
562 | if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 || | ||
563 | stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 || | ||
564 | stat == CPC_Cx_STAT_CONF_SEQSTATE_U2) | ||
565 | break; | ||
566 | |||
567 | /* | ||
568 | * The core ought to have powered down, but didn't & | ||
569 | * now we don't really know what state it's in. It's | ||
570 | * likely that its _pwr_up pin has been wired to logic | ||
571 | * 1 & it powered back up as soon as we powered it | ||
572 | * down... | ||
573 | * | ||
574 | * The best we can do is warn the user & continue in | ||
575 | * the hope that the core is doing nothing harmful & | ||
576 | * might behave properly if we online it later. | ||
577 | */ | ||
578 | if (WARN(ktime_after(ktime_get(), fail_time), | ||
579 | "CPU%u hasn't powered down, seq. state %u\n", | ||
580 | cpu, stat)) | ||
581 | break; | ||
582 | } while (1); | ||
583 | |||
584 | /* Indicate the core is powered off */ | ||
585 | bitmap_clear(core_power, core, 1); | ||
586 | } else if (cpu_has_mipsmt) { | ||
587 | /* | ||
588 | * Have a CPU with access to the offlined CPUs registers wait | ||
589 | * for its TC to halt. | ||
590 | */ | ||
591 | err = smp_call_function_single(cpu_death_sibling, | ||
592 | wait_for_sibling_halt, | ||
593 | (void *)(unsigned long)cpu, 1); | ||
594 | if (err) | ||
595 | panic("Failed to call remote sibling CPU\n"); | ||
596 | } else if (cpu_has_vp) { | ||
597 | do { | ||
598 | mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
599 | stat = read_cpc_co_vp_running(); | ||
600 | mips_cm_unlock_other(); | ||
601 | } while (stat & (1 << vpe_id)); | ||
602 | } | ||
603 | } | ||
604 | |||
605 | #endif /* CONFIG_HOTPLUG_CPU */ | ||
606 | |||
607 | static const struct plat_smp_ops cps_smp_ops = { | ||
608 | .smp_setup = cps_smp_setup, | ||
609 | .prepare_cpus = cps_prepare_cpus, | ||
610 | .boot_secondary = cps_boot_secondary, | ||
611 | .init_secondary = cps_init_secondary, | ||
612 | .smp_finish = cps_smp_finish, | ||
613 | .send_ipi_single = mips_smp_send_ipi_single, | ||
614 | .send_ipi_mask = mips_smp_send_ipi_mask, | ||
615 | #ifdef CONFIG_HOTPLUG_CPU | ||
616 | .cpu_disable = cps_cpu_disable, | ||
617 | .cpu_die = cps_cpu_die, | ||
618 | #endif | ||
619 | #ifdef CONFIG_KEXEC | ||
620 | .kexec_nonboot_cpu = cps_kexec_nonboot_cpu, | ||
621 | #endif | ||
622 | }; | ||
623 | |||
624 | bool mips_cps_smp_in_use(void) | ||
625 | { | ||
626 | extern const struct plat_smp_ops *mp_ops; | ||
627 | return mp_ops == &cps_smp_ops; | ||
628 | } | ||
629 | |||
630 | int register_cps_smp_ops(void) | ||
631 | { | ||
632 | if (!mips_cm_present()) { | ||
633 | pr_warn("MIPS CPS SMP unable to proceed without a CM\n"); | ||
634 | return -ENODEV; | ||
635 | } | ||
636 | |||
637 | /* check we have a GIC - we need one for IPIs */ | ||
638 | if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) { | ||
639 | pr_warn("MIPS CPS SMP unable to proceed without a GIC\n"); | ||
640 | return -ENODEV; | ||
641 | } | ||
642 | |||
643 | register_smp_ops(&cps_smp_ops); | ||
644 | return 0; | ||
645 | } | ||
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c new file mode 100644 index 000000000..5f04a0141 --- /dev/null +++ b/arch/mips/kernel/smp-mt.c | |||
@@ -0,0 +1,240 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc. | ||
5 | * Elizabeth Clarke (beth@mips.com) | ||
6 | * Ralf Baechle (ralf@linux-mips.org) | ||
7 | * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org) | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/cpumask.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/compiler.h> | ||
14 | #include <linux/sched/task_stack.h> | ||
15 | #include <linux/smp.h> | ||
16 | |||
17 | #include <linux/atomic.h> | ||
18 | #include <asm/cacheflush.h> | ||
19 | #include <asm/cpu.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/hardirq.h> | ||
22 | #include <asm/mmu_context.h> | ||
23 | #include <asm/time.h> | ||
24 | #include <asm/mipsregs.h> | ||
25 | #include <asm/mipsmtregs.h> | ||
26 | #include <asm/mips_mt.h> | ||
27 | #include <asm/mips-cps.h> | ||
28 | |||
29 | static void __init smvp_copy_vpe_config(void) | ||
30 | { | ||
31 | write_vpe_c0_status( | ||
32 | (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0); | ||
33 | |||
34 | /* set config to be the same as vpe0, particularly kseg0 coherency alg */ | ||
35 | write_vpe_c0_config( read_c0_config()); | ||
36 | |||
37 | /* make sure there are no software interrupts pending */ | ||
38 | write_vpe_c0_cause(0); | ||
39 | |||
40 | /* Propagate Config7 */ | ||
41 | write_vpe_c0_config7(read_c0_config7()); | ||
42 | |||
43 | write_vpe_c0_count(read_c0_count()); | ||
44 | } | ||
45 | |||
46 | static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0, | ||
47 | unsigned int ncpu) | ||
48 | { | ||
49 | if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)) | ||
50 | return ncpu; | ||
51 | |||
52 | /* Deactivate all but VPE 0 */ | ||
53 | if (tc != 0) { | ||
54 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
55 | |||
56 | tmp &= ~VPECONF0_VPA; | ||
57 | |||
58 | /* master VPE */ | ||
59 | tmp |= VPECONF0_MVP; | ||
60 | write_vpe_c0_vpeconf0(tmp); | ||
61 | |||
62 | /* Record this as available CPU */ | ||
63 | set_cpu_possible(tc, true); | ||
64 | set_cpu_present(tc, true); | ||
65 | __cpu_number_map[tc] = ++ncpu; | ||
66 | __cpu_logical_map[ncpu] = tc; | ||
67 | } | ||
68 | |||
69 | /* Disable multi-threading with TC's */ | ||
70 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE); | ||
71 | |||
72 | if (tc != 0) | ||
73 | smvp_copy_vpe_config(); | ||
74 | |||
75 | cpu_set_vpe_id(&cpu_data[ncpu], tc); | ||
76 | |||
77 | return ncpu; | ||
78 | } | ||
79 | |||
80 | static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0) | ||
81 | { | ||
82 | unsigned long tmp; | ||
83 | |||
84 | if (!tc) | ||
85 | return; | ||
86 | |||
87 | /* bind a TC to each VPE, May as well put all excess TC's | ||
88 | on the last VPE */ | ||
89 | if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1)) | ||
90 | write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)); | ||
91 | else { | ||
92 | write_tc_c0_tcbind(read_tc_c0_tcbind() | tc); | ||
93 | |||
94 | /* and set XTC */ | ||
95 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT)); | ||
96 | } | ||
97 | |||
98 | tmp = read_tc_c0_tcstatus(); | ||
99 | |||
100 | /* mark not allocated and not dynamically allocatable */ | ||
101 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
102 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
103 | write_tc_c0_tcstatus(tmp); | ||
104 | |||
105 | write_tc_c0_tchalt(TCHALT_H); | ||
106 | } | ||
107 | |||
108 | static void vsmp_init_secondary(void) | ||
109 | { | ||
110 | /* This is Malta specific: IPI,performance and timer interrupts */ | ||
111 | if (mips_gic_present()) | ||
112 | change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | | ||
113 | STATUSF_IP4 | STATUSF_IP5 | | ||
114 | STATUSF_IP6 | STATUSF_IP7); | ||
115 | else | ||
116 | change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 | | ||
117 | STATUSF_IP6 | STATUSF_IP7); | ||
118 | } | ||
119 | |||
120 | static void vsmp_smp_finish(void) | ||
121 | { | ||
122 | /* CDFIXME: remove this? */ | ||
123 | write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ)); | ||
124 | |||
125 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
126 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | ||
127 | if (cpu_has_fpu) | ||
128 | cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask); | ||
129 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
130 | |||
131 | local_irq_enable(); | ||
132 | } | ||
133 | |||
134 | /* | ||
135 | * Setup the PC, SP, and GP of a secondary processor and start it | ||
136 | * running! | ||
137 | * smp_bootstrap is the place to resume from | ||
138 | * __KSTK_TOS(idle) is apparently the stack pointer | ||
139 | * (unsigned long)idle->thread_info the gp | ||
140 | * assumes a 1:1 mapping of TC => VPE | ||
141 | */ | ||
142 | static int vsmp_boot_secondary(int cpu, struct task_struct *idle) | ||
143 | { | ||
144 | struct thread_info *gp = task_thread_info(idle); | ||
145 | dvpe(); | ||
146 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
147 | |||
148 | settc(cpu); | ||
149 | |||
150 | /* restart */ | ||
151 | write_tc_c0_tcrestart((unsigned long)&smp_bootstrap); | ||
152 | |||
153 | /* enable the tc this vpe/cpu will be running */ | ||
154 | write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A); | ||
155 | |||
156 | write_tc_c0_tchalt(0); | ||
157 | |||
158 | /* enable the VPE */ | ||
159 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
160 | |||
161 | /* stack pointer */ | ||
162 | write_tc_gpr_sp( __KSTK_TOS(idle)); | ||
163 | |||
164 | /* global pointer */ | ||
165 | write_tc_gpr_gp((unsigned long)gp); | ||
166 | |||
167 | flush_icache_range((unsigned long)gp, | ||
168 | (unsigned long)(gp + sizeof(struct thread_info))); | ||
169 | |||
170 | /* finally out of configuration and into chaos */ | ||
171 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
172 | |||
173 | evpe(EVPE_ENABLE); | ||
174 | |||
175 | return 0; | ||
176 | } | ||
177 | |||
178 | /* | ||
179 | * Common setup before any secondaries are started | ||
180 | * Make sure all CPU's are in a sensible state before we boot any of the | ||
181 | * secondaries | ||
182 | */ | ||
183 | static void __init vsmp_smp_setup(void) | ||
184 | { | ||
185 | unsigned int mvpconf0, ntc, tc, ncpu = 0; | ||
186 | unsigned int nvpe; | ||
187 | |||
188 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
189 | /* If we have an FPU, enroll ourselves in the FPU-full mask */ | ||
190 | if (cpu_has_fpu) | ||
191 | cpumask_set_cpu(0, &mt_fpu_cpumask); | ||
192 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
193 | if (!cpu_has_mipsmt) | ||
194 | return; | ||
195 | |||
196 | /* disable MT so we can configure */ | ||
197 | dvpe(); | ||
198 | dmt(); | ||
199 | |||
200 | /* Put MVPE's into 'configuration state' */ | ||
201 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
202 | |||
203 | mvpconf0 = read_c0_mvpconf0(); | ||
204 | ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT; | ||
205 | |||
206 | nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
207 | smp_num_siblings = nvpe; | ||
208 | |||
209 | /* we'll always have more TC's than VPE's, so loop setting everything | ||
210 | to a sensible state */ | ||
211 | for (tc = 0; tc <= ntc; tc++) { | ||
212 | settc(tc); | ||
213 | |||
214 | smvp_tc_init(tc, mvpconf0); | ||
215 | ncpu = smvp_vpe_init(tc, mvpconf0, ncpu); | ||
216 | } | ||
217 | |||
218 | /* Release config state */ | ||
219 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
220 | |||
221 | /* We'll wait until starting the secondaries before starting MVPE */ | ||
222 | |||
223 | printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu); | ||
224 | } | ||
225 | |||
226 | static void __init vsmp_prepare_cpus(unsigned int max_cpus) | ||
227 | { | ||
228 | mips_mt_set_cpuoptions(); | ||
229 | } | ||
230 | |||
231 | const struct plat_smp_ops vsmp_smp_ops = { | ||
232 | .send_ipi_single = mips_smp_send_ipi_single, | ||
233 | .send_ipi_mask = mips_smp_send_ipi_mask, | ||
234 | .init_secondary = vsmp_init_secondary, | ||
235 | .smp_finish = vsmp_smp_finish, | ||
236 | .boot_secondary = vsmp_boot_secondary, | ||
237 | .smp_setup = vsmp_smp_setup, | ||
238 | .prepare_cpus = vsmp_prepare_cpus, | ||
239 | }; | ||
240 | |||
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c new file mode 100644 index 000000000..525d3196f --- /dev/null +++ b/arch/mips/kernel/smp-up.c | |||
@@ -0,0 +1,79 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org) | ||
7 | * | ||
8 | * Symmetric Uniprocessor (TM) Support | ||
9 | */ | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/sched.h> | ||
12 | |||
13 | /* | ||
14 | * Send inter-processor interrupt | ||
15 | */ | ||
16 | static void up_send_ipi_single(int cpu, unsigned int action) | ||
17 | { | ||
18 | panic(KERN_ERR "%s called", __func__); | ||
19 | } | ||
20 | |||
21 | static inline void up_send_ipi_mask(const struct cpumask *mask, | ||
22 | unsigned int action) | ||
23 | { | ||
24 | panic(KERN_ERR "%s called", __func__); | ||
25 | } | ||
26 | |||
27 | /* | ||
28 | * After we've done initial boot, this function is called to allow the | ||
29 | * board code to clean up state, if needed | ||
30 | */ | ||
31 | static void up_init_secondary(void) | ||
32 | { | ||
33 | } | ||
34 | |||
35 | static void up_smp_finish(void) | ||
36 | { | ||
37 | } | ||
38 | |||
39 | /* | ||
40 | * Firmware CPU startup hook | ||
41 | */ | ||
42 | static int up_boot_secondary(int cpu, struct task_struct *idle) | ||
43 | { | ||
44 | return 0; | ||
45 | } | ||
46 | |||
47 | static void __init up_smp_setup(void) | ||
48 | { | ||
49 | } | ||
50 | |||
51 | static void __init up_prepare_cpus(unsigned int max_cpus) | ||
52 | { | ||
53 | } | ||
54 | |||
55 | #ifdef CONFIG_HOTPLUG_CPU | ||
56 | static int up_cpu_disable(void) | ||
57 | { | ||
58 | return -ENOSYS; | ||
59 | } | ||
60 | |||
61 | static void up_cpu_die(unsigned int cpu) | ||
62 | { | ||
63 | BUG(); | ||
64 | } | ||
65 | #endif | ||
66 | |||
67 | const struct plat_smp_ops up_smp_ops = { | ||
68 | .send_ipi_single = up_send_ipi_single, | ||
69 | .send_ipi_mask = up_send_ipi_mask, | ||
70 | .init_secondary = up_init_secondary, | ||
71 | .smp_finish = up_smp_finish, | ||
72 | .boot_secondary = up_boot_secondary, | ||
73 | .smp_setup = up_smp_setup, | ||
74 | .prepare_cpus = up_prepare_cpus, | ||
75 | #ifdef CONFIG_HOTPLUG_CPU | ||
76 | .cpu_disable = up_cpu_disable, | ||
77 | .cpu_die = up_cpu_die, | ||
78 | #endif | ||
79 | }; | ||
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c new file mode 100644 index 000000000..14db66dbc --- /dev/null +++ b/arch/mips/kernel/smp.c | |||
@@ -0,0 +1,721 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * | ||
4 | * Copyright (C) 2000, 2001 Kanoj Sarcar | ||
5 | * Copyright (C) 2000, 2001 Ralf Baechle | ||
6 | * Copyright (C) 2000, 2001 Silicon Graphics, Inc. | ||
7 | * Copyright (C) 2000, 2001, 2003 Broadcom Corporation | ||
8 | */ | ||
9 | #include <linux/cache.h> | ||
10 | #include <linux/delay.h> | ||
11 | #include <linux/init.h> | ||
12 | #include <linux/interrupt.h> | ||
13 | #include <linux/smp.h> | ||
14 | #include <linux/spinlock.h> | ||
15 | #include <linux/threads.h> | ||
16 | #include <linux/export.h> | ||
17 | #include <linux/time.h> | ||
18 | #include <linux/timex.h> | ||
19 | #include <linux/sched/mm.h> | ||
20 | #include <linux/cpumask.h> | ||
21 | #include <linux/cpu.h> | ||
22 | #include <linux/err.h> | ||
23 | #include <linux/ftrace.h> | ||
24 | #include <linux/irqdomain.h> | ||
25 | #include <linux/of.h> | ||
26 | #include <linux/of_irq.h> | ||
27 | |||
28 | #include <linux/atomic.h> | ||
29 | #include <asm/cpu.h> | ||
30 | #include <asm/ginvt.h> | ||
31 | #include <asm/processor.h> | ||
32 | #include <asm/idle.h> | ||
33 | #include <asm/r4k-timer.h> | ||
34 | #include <asm/mips-cps.h> | ||
35 | #include <asm/mmu_context.h> | ||
36 | #include <asm/time.h> | ||
37 | #include <asm/setup.h> | ||
38 | #include <asm/maar.h> | ||
39 | |||
40 | int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */ | ||
41 | EXPORT_SYMBOL(__cpu_number_map); | ||
42 | |||
43 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ | ||
44 | EXPORT_SYMBOL(__cpu_logical_map); | ||
45 | |||
46 | /* Number of TCs (or siblings in Intel speak) per CPU core */ | ||
47 | int smp_num_siblings = 1; | ||
48 | EXPORT_SYMBOL(smp_num_siblings); | ||
49 | |||
50 | /* representing the TCs (or siblings in Intel speak) of each logical CPU */ | ||
51 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; | ||
52 | EXPORT_SYMBOL(cpu_sibling_map); | ||
53 | |||
54 | /* representing the core map of multi-core chips of each logical CPU */ | ||
55 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly; | ||
56 | EXPORT_SYMBOL(cpu_core_map); | ||
57 | |||
58 | static DECLARE_COMPLETION(cpu_starting); | ||
59 | static DECLARE_COMPLETION(cpu_running); | ||
60 | |||
61 | /* | ||
62 | * A logcal cpu mask containing only one VPE per core to | ||
63 | * reduce the number of IPIs on large MT systems. | ||
64 | */ | ||
65 | cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; | ||
66 | EXPORT_SYMBOL(cpu_foreign_map); | ||
67 | |||
68 | /* representing cpus for which sibling maps can be computed */ | ||
69 | static cpumask_t cpu_sibling_setup_map; | ||
70 | |||
71 | /* representing cpus for which core maps can be computed */ | ||
72 | static cpumask_t cpu_core_setup_map; | ||
73 | |||
74 | cpumask_t cpu_coherent_mask; | ||
75 | |||
76 | #ifdef CONFIG_GENERIC_IRQ_IPI | ||
77 | static struct irq_desc *call_desc; | ||
78 | static struct irq_desc *sched_desc; | ||
79 | #endif | ||
80 | |||
81 | static inline void set_cpu_sibling_map(int cpu) | ||
82 | { | ||
83 | int i; | ||
84 | |||
85 | cpumask_set_cpu(cpu, &cpu_sibling_setup_map); | ||
86 | |||
87 | if (smp_num_siblings > 1) { | ||
88 | for_each_cpu(i, &cpu_sibling_setup_map) { | ||
89 | if (cpus_are_siblings(cpu, i)) { | ||
90 | cpumask_set_cpu(i, &cpu_sibling_map[cpu]); | ||
91 | cpumask_set_cpu(cpu, &cpu_sibling_map[i]); | ||
92 | } | ||
93 | } | ||
94 | } else | ||
95 | cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]); | ||
96 | } | ||
97 | |||
98 | static inline void set_cpu_core_map(int cpu) | ||
99 | { | ||
100 | int i; | ||
101 | |||
102 | cpumask_set_cpu(cpu, &cpu_core_setup_map); | ||
103 | |||
104 | for_each_cpu(i, &cpu_core_setup_map) { | ||
105 | if (cpu_data[cpu].package == cpu_data[i].package) { | ||
106 | cpumask_set_cpu(i, &cpu_core_map[cpu]); | ||
107 | cpumask_set_cpu(cpu, &cpu_core_map[i]); | ||
108 | } | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Calculate a new cpu_foreign_map mask whenever a | ||
114 | * new cpu appears or disappears. | ||
115 | */ | ||
116 | void calculate_cpu_foreign_map(void) | ||
117 | { | ||
118 | int i, k, core_present; | ||
119 | cpumask_t temp_foreign_map; | ||
120 | |||
121 | /* Re-calculate the mask */ | ||
122 | cpumask_clear(&temp_foreign_map); | ||
123 | for_each_online_cpu(i) { | ||
124 | core_present = 0; | ||
125 | for_each_cpu(k, &temp_foreign_map) | ||
126 | if (cpus_are_siblings(i, k)) | ||
127 | core_present = 1; | ||
128 | if (!core_present) | ||
129 | cpumask_set_cpu(i, &temp_foreign_map); | ||
130 | } | ||
131 | |||
132 | for_each_online_cpu(i) | ||
133 | cpumask_andnot(&cpu_foreign_map[i], | ||
134 | &temp_foreign_map, &cpu_sibling_map[i]); | ||
135 | } | ||
136 | |||
137 | const struct plat_smp_ops *mp_ops; | ||
138 | EXPORT_SYMBOL(mp_ops); | ||
139 | |||
140 | void register_smp_ops(const struct plat_smp_ops *ops) | ||
141 | { | ||
142 | if (mp_ops) | ||
143 | printk(KERN_WARNING "Overriding previously set SMP ops\n"); | ||
144 | |||
145 | mp_ops = ops; | ||
146 | } | ||
147 | |||
148 | #ifdef CONFIG_GENERIC_IRQ_IPI | ||
149 | void mips_smp_send_ipi_single(int cpu, unsigned int action) | ||
150 | { | ||
151 | mips_smp_send_ipi_mask(cpumask_of(cpu), action); | ||
152 | } | ||
153 | |||
154 | void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) | ||
155 | { | ||
156 | unsigned long flags; | ||
157 | unsigned int core; | ||
158 | int cpu; | ||
159 | |||
160 | local_irq_save(flags); | ||
161 | |||
162 | switch (action) { | ||
163 | case SMP_CALL_FUNCTION: | ||
164 | __ipi_send_mask(call_desc, mask); | ||
165 | break; | ||
166 | |||
167 | case SMP_RESCHEDULE_YOURSELF: | ||
168 | __ipi_send_mask(sched_desc, mask); | ||
169 | break; | ||
170 | |||
171 | default: | ||
172 | BUG(); | ||
173 | } | ||
174 | |||
175 | if (mips_cpc_present()) { | ||
176 | for_each_cpu(cpu, mask) { | ||
177 | if (cpus_are_siblings(cpu, smp_processor_id())) | ||
178 | continue; | ||
179 | |||
180 | core = cpu_core(&cpu_data[cpu]); | ||
181 | |||
182 | while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { | ||
183 | mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL); | ||
184 | mips_cpc_lock_other(core); | ||
185 | write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); | ||
186 | mips_cpc_unlock_other(); | ||
187 | mips_cm_unlock_other(); | ||
188 | } | ||
189 | } | ||
190 | } | ||
191 | |||
192 | local_irq_restore(flags); | ||
193 | } | ||
194 | |||
195 | |||
196 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) | ||
197 | { | ||
198 | scheduler_ipi(); | ||
199 | |||
200 | return IRQ_HANDLED; | ||
201 | } | ||
202 | |||
203 | static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) | ||
204 | { | ||
205 | generic_smp_call_function_interrupt(); | ||
206 | |||
207 | return IRQ_HANDLED; | ||
208 | } | ||
209 | |||
210 | static void smp_ipi_init_one(unsigned int virq, const char *name, | ||
211 | irq_handler_t handler) | ||
212 | { | ||
213 | int ret; | ||
214 | |||
215 | irq_set_handler(virq, handle_percpu_irq); | ||
216 | ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL); | ||
217 | BUG_ON(ret); | ||
218 | } | ||
219 | |||
220 | static unsigned int call_virq, sched_virq; | ||
221 | |||
222 | int mips_smp_ipi_allocate(const struct cpumask *mask) | ||
223 | { | ||
224 | int virq; | ||
225 | struct irq_domain *ipidomain; | ||
226 | struct device_node *node; | ||
227 | |||
228 | node = of_irq_find_parent(of_root); | ||
229 | ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); | ||
230 | |||
231 | /* | ||
232 | * Some platforms have half DT setup. So if we found irq node but | ||
233 | * didn't find an ipidomain, try to search for one that is not in the | ||
234 | * DT. | ||
235 | */ | ||
236 | if (node && !ipidomain) | ||
237 | ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); | ||
238 | |||
239 | /* | ||
240 | * There are systems which use IPI IRQ domains, but only have one | ||
241 | * registered when some runtime condition is met. For example a Malta | ||
242 | * kernel may include support for GIC & CPU interrupt controller IPI | ||
243 | * IRQ domains, but if run on a system with no GIC & no MT ASE then | ||
244 | * neither will be supported or registered. | ||
245 | * | ||
246 | * We only have a problem if we're actually using multiple CPUs so fail | ||
247 | * loudly if that is the case. Otherwise simply return, skipping IPI | ||
248 | * setup, if we're running with only a single CPU. | ||
249 | */ | ||
250 | if (!ipidomain) { | ||
251 | BUG_ON(num_present_cpus() > 1); | ||
252 | return 0; | ||
253 | } | ||
254 | |||
255 | virq = irq_reserve_ipi(ipidomain, mask); | ||
256 | BUG_ON(!virq); | ||
257 | if (!call_virq) | ||
258 | call_virq = virq; | ||
259 | |||
260 | virq = irq_reserve_ipi(ipidomain, mask); | ||
261 | BUG_ON(!virq); | ||
262 | if (!sched_virq) | ||
263 | sched_virq = virq; | ||
264 | |||
265 | if (irq_domain_is_ipi_per_cpu(ipidomain)) { | ||
266 | int cpu; | ||
267 | |||
268 | for_each_cpu(cpu, mask) { | ||
269 | smp_ipi_init_one(call_virq + cpu, "IPI call", | ||
270 | ipi_call_interrupt); | ||
271 | smp_ipi_init_one(sched_virq + cpu, "IPI resched", | ||
272 | ipi_resched_interrupt); | ||
273 | } | ||
274 | } else { | ||
275 | smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt); | ||
276 | smp_ipi_init_one(sched_virq, "IPI resched", | ||
277 | ipi_resched_interrupt); | ||
278 | } | ||
279 | |||
280 | return 0; | ||
281 | } | ||
282 | |||
283 | int mips_smp_ipi_free(const struct cpumask *mask) | ||
284 | { | ||
285 | struct irq_domain *ipidomain; | ||
286 | struct device_node *node; | ||
287 | |||
288 | node = of_irq_find_parent(of_root); | ||
289 | ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); | ||
290 | |||
291 | /* | ||
292 | * Some platforms have half DT setup. So if we found irq node but | ||
293 | * didn't find an ipidomain, try to search for one that is not in the | ||
294 | * DT. | ||
295 | */ | ||
296 | if (node && !ipidomain) | ||
297 | ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); | ||
298 | |||
299 | BUG_ON(!ipidomain); | ||
300 | |||
301 | if (irq_domain_is_ipi_per_cpu(ipidomain)) { | ||
302 | int cpu; | ||
303 | |||
304 | for_each_cpu(cpu, mask) { | ||
305 | free_irq(call_virq + cpu, NULL); | ||
306 | free_irq(sched_virq + cpu, NULL); | ||
307 | } | ||
308 | } | ||
309 | irq_destroy_ipi(call_virq, mask); | ||
310 | irq_destroy_ipi(sched_virq, mask); | ||
311 | return 0; | ||
312 | } | ||
313 | |||
314 | |||
315 | static int __init mips_smp_ipi_init(void) | ||
316 | { | ||
317 | if (num_possible_cpus() == 1) | ||
318 | return 0; | ||
319 | |||
320 | mips_smp_ipi_allocate(cpu_possible_mask); | ||
321 | |||
322 | call_desc = irq_to_desc(call_virq); | ||
323 | sched_desc = irq_to_desc(sched_virq); | ||
324 | |||
325 | return 0; | ||
326 | } | ||
327 | early_initcall(mips_smp_ipi_init); | ||
328 | #endif | ||
329 | |||
330 | /* | ||
331 | * First C code run on the secondary CPUs after being started up by | ||
332 | * the master. | ||
333 | */ | ||
334 | asmlinkage void start_secondary(void) | ||
335 | { | ||
336 | unsigned int cpu; | ||
337 | |||
338 | cpu_probe(); | ||
339 | per_cpu_trap_init(false); | ||
340 | mips_clockevent_init(); | ||
341 | mp_ops->init_secondary(); | ||
342 | cpu_report(); | ||
343 | maar_init(); | ||
344 | |||
345 | /* | ||
346 | * XXX parity protection should be folded in here when it's converted | ||
347 | * to an option instead of something based on .cputype | ||
348 | */ | ||
349 | |||
350 | calibrate_delay(); | ||
351 | cpu = smp_processor_id(); | ||
352 | cpu_data[cpu].udelay_val = loops_per_jiffy; | ||
353 | |||
354 | set_cpu_sibling_map(cpu); | ||
355 | set_cpu_core_map(cpu); | ||
356 | |||
357 | cpumask_set_cpu(cpu, &cpu_coherent_mask); | ||
358 | notify_cpu_starting(cpu); | ||
359 | |||
360 | /* Notify boot CPU that we're starting & ready to sync counters */ | ||
361 | complete(&cpu_starting); | ||
362 | |||
363 | synchronise_count_slave(cpu); | ||
364 | |||
365 | /* The CPU is running and counters synchronised, now mark it online */ | ||
366 | set_cpu_online(cpu, true); | ||
367 | |||
368 | calculate_cpu_foreign_map(); | ||
369 | |||
370 | /* | ||
371 | * Notify boot CPU that we're up & online and it can safely return | ||
372 | * from __cpu_up | ||
373 | */ | ||
374 | complete(&cpu_running); | ||
375 | |||
376 | /* | ||
377 | * irq will be enabled in ->smp_finish(), enabling it too early | ||
378 | * is dangerous. | ||
379 | */ | ||
380 | WARN_ON_ONCE(!irqs_disabled()); | ||
381 | mp_ops->smp_finish(); | ||
382 | |||
383 | cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); | ||
384 | } | ||
385 | |||
386 | static void stop_this_cpu(void *dummy) | ||
387 | { | ||
388 | /* | ||
389 | * Remove this CPU: | ||
390 | */ | ||
391 | |||
392 | set_cpu_online(smp_processor_id(), false); | ||
393 | calculate_cpu_foreign_map(); | ||
394 | local_irq_disable(); | ||
395 | while (1); | ||
396 | } | ||
397 | |||
398 | void smp_send_stop(void) | ||
399 | { | ||
400 | smp_call_function(stop_this_cpu, NULL, 0); | ||
401 | } | ||
402 | |||
403 | void __init smp_cpus_done(unsigned int max_cpus) | ||
404 | { | ||
405 | } | ||
406 | |||
407 | /* called from main before smp_init() */ | ||
408 | void __init smp_prepare_cpus(unsigned int max_cpus) | ||
409 | { | ||
410 | init_new_context(current, &init_mm); | ||
411 | current_thread_info()->cpu = 0; | ||
412 | mp_ops->prepare_cpus(max_cpus); | ||
413 | set_cpu_sibling_map(0); | ||
414 | set_cpu_core_map(0); | ||
415 | calculate_cpu_foreign_map(); | ||
416 | #ifndef CONFIG_HOTPLUG_CPU | ||
417 | init_cpu_present(cpu_possible_mask); | ||
418 | #endif | ||
419 | cpumask_copy(&cpu_coherent_mask, cpu_possible_mask); | ||
420 | } | ||
421 | |||
422 | /* preload SMP state for boot cpu */ | ||
423 | void smp_prepare_boot_cpu(void) | ||
424 | { | ||
425 | if (mp_ops->prepare_boot_cpu) | ||
426 | mp_ops->prepare_boot_cpu(); | ||
427 | set_cpu_possible(0, true); | ||
428 | set_cpu_online(0, true); | ||
429 | } | ||
430 | |||
431 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) | ||
432 | { | ||
433 | int err; | ||
434 | |||
435 | err = mp_ops->boot_secondary(cpu, tidle); | ||
436 | if (err) | ||
437 | return err; | ||
438 | |||
439 | /* Wait for CPU to start and be ready to sync counters */ | ||
440 | if (!wait_for_completion_timeout(&cpu_starting, | ||
441 | msecs_to_jiffies(1000))) { | ||
442 | pr_crit("CPU%u: failed to start\n", cpu); | ||
443 | return -EIO; | ||
444 | } | ||
445 | |||
446 | synchronise_count_master(cpu); | ||
447 | |||
448 | /* Wait for CPU to finish startup & mark itself online before return */ | ||
449 | wait_for_completion(&cpu_running); | ||
450 | return 0; | ||
451 | } | ||
452 | |||
453 | /* Not really SMP stuff ... */ | ||
454 | int setup_profiling_timer(unsigned int multiplier) | ||
455 | { | ||
456 | return 0; | ||
457 | } | ||
458 | |||
459 | static void flush_tlb_all_ipi(void *info) | ||
460 | { | ||
461 | local_flush_tlb_all(); | ||
462 | } | ||
463 | |||
464 | void flush_tlb_all(void) | ||
465 | { | ||
466 | if (cpu_has_mmid) { | ||
467 | htw_stop(); | ||
468 | ginvt_full(); | ||
469 | sync_ginv(); | ||
470 | instruction_hazard(); | ||
471 | htw_start(); | ||
472 | return; | ||
473 | } | ||
474 | |||
475 | on_each_cpu(flush_tlb_all_ipi, NULL, 1); | ||
476 | } | ||
477 | |||
478 | static void flush_tlb_mm_ipi(void *mm) | ||
479 | { | ||
480 | drop_mmu_context((struct mm_struct *)mm); | ||
481 | } | ||
482 | |||
483 | /* | ||
484 | * Special Variant of smp_call_function for use by TLB functions: | ||
485 | * | ||
486 | * o No return value | ||
487 | * o collapses to normal function call on UP kernels | ||
488 | * o collapses to normal function call on systems with a single shared | ||
489 | * primary cache. | ||
490 | */ | ||
491 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) | ||
492 | { | ||
493 | smp_call_function(func, info, 1); | ||
494 | } | ||
495 | |||
496 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) | ||
497 | { | ||
498 | preempt_disable(); | ||
499 | |||
500 | smp_on_other_tlbs(func, info); | ||
501 | func(info); | ||
502 | |||
503 | preempt_enable(); | ||
504 | } | ||
505 | |||
506 | /* | ||
507 | * The following tlb flush calls are invoked when old translations are | ||
508 | * being torn down, or pte attributes are changing. For single threaded | ||
509 | * address spaces, a new context is obtained on the current cpu, and tlb | ||
510 | * context on other cpus are invalidated to force a new context allocation | ||
511 | * at switch_mm time, should the mm ever be used on other cpus. For | ||
512 | * multithreaded address spaces, intercpu interrupts have to be sent. | ||
513 | * Another case where intercpu interrupts are required is when the target | ||
514 | * mm might be active on another cpu (eg debuggers doing the flushes on | ||
515 | * behalf of debugees, kswapd stealing pages from another process etc). | ||
516 | * Kanoj 07/00. | ||
517 | */ | ||
518 | |||
519 | void flush_tlb_mm(struct mm_struct *mm) | ||
520 | { | ||
521 | preempt_disable(); | ||
522 | |||
523 | if (cpu_has_mmid) { | ||
524 | /* | ||
525 | * No need to worry about other CPUs - the ginvt in | ||
526 | * drop_mmu_context() will be globalized. | ||
527 | */ | ||
528 | } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | ||
529 | smp_on_other_tlbs(flush_tlb_mm_ipi, mm); | ||
530 | } else { | ||
531 | unsigned int cpu; | ||
532 | |||
533 | for_each_online_cpu(cpu) { | ||
534 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) | ||
535 | set_cpu_context(cpu, mm, 0); | ||
536 | } | ||
537 | } | ||
538 | drop_mmu_context(mm); | ||
539 | |||
540 | preempt_enable(); | ||
541 | } | ||
542 | |||
543 | struct flush_tlb_data { | ||
544 | struct vm_area_struct *vma; | ||
545 | unsigned long addr1; | ||
546 | unsigned long addr2; | ||
547 | }; | ||
548 | |||
549 | static void flush_tlb_range_ipi(void *info) | ||
550 | { | ||
551 | struct flush_tlb_data *fd = info; | ||
552 | |||
553 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); | ||
554 | } | ||
555 | |||
556 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) | ||
557 | { | ||
558 | struct mm_struct *mm = vma->vm_mm; | ||
559 | unsigned long addr; | ||
560 | u32 old_mmid; | ||
561 | |||
562 | preempt_disable(); | ||
563 | if (cpu_has_mmid) { | ||
564 | htw_stop(); | ||
565 | old_mmid = read_c0_memorymapid(); | ||
566 | write_c0_memorymapid(cpu_asid(0, mm)); | ||
567 | mtc0_tlbw_hazard(); | ||
568 | addr = round_down(start, PAGE_SIZE * 2); | ||
569 | end = round_up(end, PAGE_SIZE * 2); | ||
570 | do { | ||
571 | ginvt_va_mmid(addr); | ||
572 | sync_ginv(); | ||
573 | addr += PAGE_SIZE * 2; | ||
574 | } while (addr < end); | ||
575 | write_c0_memorymapid(old_mmid); | ||
576 | instruction_hazard(); | ||
577 | htw_start(); | ||
578 | } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) { | ||
579 | struct flush_tlb_data fd = { | ||
580 | .vma = vma, | ||
581 | .addr1 = start, | ||
582 | .addr2 = end, | ||
583 | }; | ||
584 | |||
585 | smp_on_other_tlbs(flush_tlb_range_ipi, &fd); | ||
586 | local_flush_tlb_range(vma, start, end); | ||
587 | } else { | ||
588 | unsigned int cpu; | ||
589 | int exec = vma->vm_flags & VM_EXEC; | ||
590 | |||
591 | for_each_online_cpu(cpu) { | ||
592 | /* | ||
593 | * flush_cache_range() will only fully flush icache if | ||
594 | * the VMA is executable, otherwise we must invalidate | ||
595 | * ASID without it appearing to has_valid_asid() as if | ||
596 | * mm has been completely unused by that CPU. | ||
597 | */ | ||
598 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) | ||
599 | set_cpu_context(cpu, mm, !exec); | ||
600 | } | ||
601 | local_flush_tlb_range(vma, start, end); | ||
602 | } | ||
603 | preempt_enable(); | ||
604 | } | ||
605 | |||
606 | static void flush_tlb_kernel_range_ipi(void *info) | ||
607 | { | ||
608 | struct flush_tlb_data *fd = info; | ||
609 | |||
610 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); | ||
611 | } | ||
612 | |||
613 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) | ||
614 | { | ||
615 | struct flush_tlb_data fd = { | ||
616 | .addr1 = start, | ||
617 | .addr2 = end, | ||
618 | }; | ||
619 | |||
620 | on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1); | ||
621 | } | ||
622 | |||
623 | static void flush_tlb_page_ipi(void *info) | ||
624 | { | ||
625 | struct flush_tlb_data *fd = info; | ||
626 | |||
627 | local_flush_tlb_page(fd->vma, fd->addr1); | ||
628 | } | ||
629 | |||
630 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) | ||
631 | { | ||
632 | u32 old_mmid; | ||
633 | |||
634 | preempt_disable(); | ||
635 | if (cpu_has_mmid) { | ||
636 | htw_stop(); | ||
637 | old_mmid = read_c0_memorymapid(); | ||
638 | write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); | ||
639 | mtc0_tlbw_hazard(); | ||
640 | ginvt_va_mmid(page); | ||
641 | sync_ginv(); | ||
642 | write_c0_memorymapid(old_mmid); | ||
643 | instruction_hazard(); | ||
644 | htw_start(); | ||
645 | } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) || | ||
646 | (current->mm != vma->vm_mm)) { | ||
647 | struct flush_tlb_data fd = { | ||
648 | .vma = vma, | ||
649 | .addr1 = page, | ||
650 | }; | ||
651 | |||
652 | smp_on_other_tlbs(flush_tlb_page_ipi, &fd); | ||
653 | local_flush_tlb_page(vma, page); | ||
654 | } else { | ||
655 | unsigned int cpu; | ||
656 | |||
657 | for_each_online_cpu(cpu) { | ||
658 | /* | ||
659 | * flush_cache_page() only does partial flushes, so | ||
660 | * invalidate ASID without it appearing to | ||
661 | * has_valid_asid() as if mm has been completely unused | ||
662 | * by that CPU. | ||
663 | */ | ||
664 | if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) | ||
665 | set_cpu_context(cpu, vma->vm_mm, 1); | ||
666 | } | ||
667 | local_flush_tlb_page(vma, page); | ||
668 | } | ||
669 | preempt_enable(); | ||
670 | } | ||
671 | |||
672 | static void flush_tlb_one_ipi(void *info) | ||
673 | { | ||
674 | unsigned long vaddr = (unsigned long) info; | ||
675 | |||
676 | local_flush_tlb_one(vaddr); | ||
677 | } | ||
678 | |||
679 | void flush_tlb_one(unsigned long vaddr) | ||
680 | { | ||
681 | smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr); | ||
682 | } | ||
683 | |||
684 | EXPORT_SYMBOL(flush_tlb_page); | ||
685 | EXPORT_SYMBOL(flush_tlb_one); | ||
686 | |||
687 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST | ||
688 | |||
689 | static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd); | ||
690 | |||
691 | void tick_broadcast(const struct cpumask *mask) | ||
692 | { | ||
693 | call_single_data_t *csd; | ||
694 | int cpu; | ||
695 | |||
696 | for_each_cpu(cpu, mask) { | ||
697 | csd = &per_cpu(tick_broadcast_csd, cpu); | ||
698 | smp_call_function_single_async(cpu, csd); | ||
699 | } | ||
700 | } | ||
701 | |||
702 | static void tick_broadcast_callee(void *info) | ||
703 | { | ||
704 | tick_receive_broadcast(); | ||
705 | } | ||
706 | |||
707 | static int __init tick_broadcast_init(void) | ||
708 | { | ||
709 | call_single_data_t *csd; | ||
710 | int cpu; | ||
711 | |||
712 | for (cpu = 0; cpu < NR_CPUS; cpu++) { | ||
713 | csd = &per_cpu(tick_broadcast_csd, cpu); | ||
714 | csd->func = tick_broadcast_callee; | ||
715 | } | ||
716 | |||
717 | return 0; | ||
718 | } | ||
719 | early_initcall(tick_broadcast_init); | ||
720 | |||
721 | #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ | ||
diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c new file mode 100644 index 000000000..ab4e3e1b1 --- /dev/null +++ b/arch/mips/kernel/spinlock_test.c | |||
@@ -0,0 +1,127 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/init.h> | ||
3 | #include <linux/kthread.h> | ||
4 | #include <linux/hrtimer.h> | ||
5 | #include <linux/fs.h> | ||
6 | #include <linux/debugfs.h> | ||
7 | #include <linux/export.h> | ||
8 | #include <linux/spinlock.h> | ||
9 | #include <asm/debug.h> | ||
10 | |||
11 | static int ss_get(void *data, u64 *val) | ||
12 | { | ||
13 | ktime_t start, finish; | ||
14 | int loops; | ||
15 | int cont; | ||
16 | DEFINE_RAW_SPINLOCK(ss_spin); | ||
17 | |||
18 | loops = 1000000; | ||
19 | cont = 1; | ||
20 | |||
21 | start = ktime_get(); | ||
22 | |||
23 | while (cont) { | ||
24 | raw_spin_lock(&ss_spin); | ||
25 | loops--; | ||
26 | if (loops == 0) | ||
27 | cont = 0; | ||
28 | raw_spin_unlock(&ss_spin); | ||
29 | } | ||
30 | |||
31 | finish = ktime_get(); | ||
32 | |||
33 | *val = ktime_us_delta(finish, start); | ||
34 | |||
35 | return 0; | ||
36 | } | ||
37 | |||
38 | DEFINE_SIMPLE_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n"); | ||
39 | |||
40 | |||
41 | |||
42 | struct spin_multi_state { | ||
43 | raw_spinlock_t lock; | ||
44 | atomic_t start_wait; | ||
45 | atomic_t enter_wait; | ||
46 | atomic_t exit_wait; | ||
47 | int loops; | ||
48 | }; | ||
49 | |||
50 | struct spin_multi_per_thread { | ||
51 | struct spin_multi_state *state; | ||
52 | ktime_t start; | ||
53 | }; | ||
54 | |||
55 | static int multi_other(void *data) | ||
56 | { | ||
57 | int loops; | ||
58 | int cont; | ||
59 | struct spin_multi_per_thread *pt = data; | ||
60 | struct spin_multi_state *s = pt->state; | ||
61 | |||
62 | loops = s->loops; | ||
63 | cont = 1; | ||
64 | |||
65 | atomic_dec(&s->enter_wait); | ||
66 | |||
67 | while (atomic_read(&s->enter_wait)) | ||
68 | ; /* spin */ | ||
69 | |||
70 | pt->start = ktime_get(); | ||
71 | |||
72 | atomic_dec(&s->start_wait); | ||
73 | |||
74 | while (atomic_read(&s->start_wait)) | ||
75 | ; /* spin */ | ||
76 | |||
77 | while (cont) { | ||
78 | raw_spin_lock(&s->lock); | ||
79 | loops--; | ||
80 | if (loops == 0) | ||
81 | cont = 0; | ||
82 | raw_spin_unlock(&s->lock); | ||
83 | } | ||
84 | |||
85 | atomic_dec(&s->exit_wait); | ||
86 | while (atomic_read(&s->exit_wait)) | ||
87 | ; /* spin */ | ||
88 | return 0; | ||
89 | } | ||
90 | |||
91 | static int multi_get(void *data, u64 *val) | ||
92 | { | ||
93 | ktime_t finish; | ||
94 | struct spin_multi_state ms; | ||
95 | struct spin_multi_per_thread t1, t2; | ||
96 | |||
97 | ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get"); | ||
98 | ms.loops = 1000000; | ||
99 | |||
100 | atomic_set(&ms.start_wait, 2); | ||
101 | atomic_set(&ms.enter_wait, 2); | ||
102 | atomic_set(&ms.exit_wait, 2); | ||
103 | t1.state = &ms; | ||
104 | t2.state = &ms; | ||
105 | |||
106 | kthread_run(multi_other, &t2, "multi_get"); | ||
107 | |||
108 | multi_other(&t1); | ||
109 | |||
110 | finish = ktime_get(); | ||
111 | |||
112 | *val = ktime_us_delta(finish, t1.start); | ||
113 | |||
114 | return 0; | ||
115 | } | ||
116 | |||
117 | DEFINE_SIMPLE_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n"); | ||
118 | |||
119 | static int __init spinlock_test(void) | ||
120 | { | ||
121 | debugfs_create_file("spin_single", S_IRUGO, mips_debugfs_dir, NULL, | ||
122 | &fops_ss); | ||
123 | debugfs_create_file("spin_multi", S_IRUGO, mips_debugfs_dir, NULL, | ||
124 | &fops_multi); | ||
125 | return 0; | ||
126 | } | ||
127 | device_initcall(spinlock_test); | ||
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c new file mode 100644 index 000000000..d5d96214c --- /dev/null +++ b/arch/mips/kernel/spram.c | |||
@@ -0,0 +1,220 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * MIPS SPRAM support | ||
4 | * | ||
5 | * Copyright (C) 2007, 2008 MIPS Technologies, Inc. | ||
6 | */ | ||
7 | #include <linux/kernel.h> | ||
8 | #include <linux/ptrace.h> | ||
9 | #include <linux/stddef.h> | ||
10 | |||
11 | #include <asm/fpu.h> | ||
12 | #include <asm/mipsregs.h> | ||
13 | #include <asm/r4kcache.h> | ||
14 | #include <asm/hazards.h> | ||
15 | |||
16 | /* | ||
17 | * These definitions are correct for the 24K/34K/74K SPRAM sample | ||
18 | * implementation. The 4KS interpreted the tags differently... | ||
19 | */ | ||
20 | #define SPRAM_TAG0_ENABLE 0x00000080 | ||
21 | #define SPRAM_TAG0_PA_MASK 0xfffff000 | ||
22 | #define SPRAM_TAG1_SIZE_MASK 0xfffff000 | ||
23 | |||
24 | #define SPRAM_TAG_STRIDE 8 | ||
25 | |||
26 | #define ERRCTL_SPRAM (1 << 28) | ||
27 | |||
28 | /* errctl access */ | ||
29 | #define read_c0_errctl(x) read_c0_ecc(x) | ||
30 | #define write_c0_errctl(x) write_c0_ecc(x) | ||
31 | |||
32 | /* | ||
33 | * Different semantics to the set_c0_* function built by __BUILD_SET_C0 | ||
34 | */ | ||
35 | static unsigned int bis_c0_errctl(unsigned int set) | ||
36 | { | ||
37 | unsigned int res; | ||
38 | res = read_c0_errctl(); | ||
39 | write_c0_errctl(res | set); | ||
40 | return res; | ||
41 | } | ||
42 | |||
43 | static void ispram_store_tag(unsigned int offset, unsigned int data) | ||
44 | { | ||
45 | unsigned int errctl; | ||
46 | |||
47 | /* enable SPRAM tag access */ | ||
48 | errctl = bis_c0_errctl(ERRCTL_SPRAM); | ||
49 | ehb(); | ||
50 | |||
51 | write_c0_taglo(data); | ||
52 | ehb(); | ||
53 | |||
54 | cache_op(Index_Store_Tag_I, CKSEG0|offset); | ||
55 | ehb(); | ||
56 | |||
57 | write_c0_errctl(errctl); | ||
58 | ehb(); | ||
59 | } | ||
60 | |||
61 | |||
62 | static unsigned int ispram_load_tag(unsigned int offset) | ||
63 | { | ||
64 | unsigned int data; | ||
65 | unsigned int errctl; | ||
66 | |||
67 | /* enable SPRAM tag access */ | ||
68 | errctl = bis_c0_errctl(ERRCTL_SPRAM); | ||
69 | ehb(); | ||
70 | cache_op(Index_Load_Tag_I, CKSEG0 | offset); | ||
71 | ehb(); | ||
72 | data = read_c0_taglo(); | ||
73 | ehb(); | ||
74 | write_c0_errctl(errctl); | ||
75 | ehb(); | ||
76 | |||
77 | return data; | ||
78 | } | ||
79 | |||
80 | static void dspram_store_tag(unsigned int offset, unsigned int data) | ||
81 | { | ||
82 | unsigned int errctl; | ||
83 | |||
84 | /* enable SPRAM tag access */ | ||
85 | errctl = bis_c0_errctl(ERRCTL_SPRAM); | ||
86 | ehb(); | ||
87 | write_c0_dtaglo(data); | ||
88 | ehb(); | ||
89 | cache_op(Index_Store_Tag_D, CKSEG0 | offset); | ||
90 | ehb(); | ||
91 | write_c0_errctl(errctl); | ||
92 | ehb(); | ||
93 | } | ||
94 | |||
95 | |||
96 | static unsigned int dspram_load_tag(unsigned int offset) | ||
97 | { | ||
98 | unsigned int data; | ||
99 | unsigned int errctl; | ||
100 | |||
101 | errctl = bis_c0_errctl(ERRCTL_SPRAM); | ||
102 | ehb(); | ||
103 | cache_op(Index_Load_Tag_D, CKSEG0 | offset); | ||
104 | ehb(); | ||
105 | data = read_c0_dtaglo(); | ||
106 | ehb(); | ||
107 | write_c0_errctl(errctl); | ||
108 | ehb(); | ||
109 | |||
110 | return data; | ||
111 | } | ||
112 | |||
113 | static void probe_spram(char *type, | ||
114 | unsigned int base, | ||
115 | unsigned int (*read)(unsigned int), | ||
116 | void (*write)(unsigned int, unsigned int)) | ||
117 | { | ||
118 | unsigned int firstsize = 0, lastsize = 0; | ||
119 | unsigned int firstpa = 0, lastpa = 0, pa = 0; | ||
120 | unsigned int offset = 0; | ||
121 | unsigned int size, tag0, tag1; | ||
122 | unsigned int enabled; | ||
123 | int i; | ||
124 | |||
125 | /* | ||
126 | * The limit is arbitrary but avoids the loop running away if | ||
127 | * the SPRAM tags are implemented differently | ||
128 | */ | ||
129 | |||
130 | for (i = 0; i < 8; i++) { | ||
131 | tag0 = read(offset); | ||
132 | tag1 = read(offset+SPRAM_TAG_STRIDE); | ||
133 | pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n", | ||
134 | type, i, tag0, tag1); | ||
135 | |||
136 | size = tag1 & SPRAM_TAG1_SIZE_MASK; | ||
137 | |||
138 | if (size == 0) | ||
139 | break; | ||
140 | |||
141 | if (i != 0) { | ||
142 | /* tags may repeat... */ | ||
143 | if ((pa == firstpa && size == firstsize) || | ||
144 | (pa == lastpa && size == lastsize)) | ||
145 | break; | ||
146 | } | ||
147 | |||
148 | /* Align base with size */ | ||
149 | base = (base + size - 1) & ~(size-1); | ||
150 | |||
151 | /* reprogram the base address base address and enable */ | ||
152 | tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE; | ||
153 | write(offset, tag0); | ||
154 | |||
155 | base += size; | ||
156 | |||
157 | /* reread the tag */ | ||
158 | tag0 = read(offset); | ||
159 | pa = tag0 & SPRAM_TAG0_PA_MASK; | ||
160 | enabled = tag0 & SPRAM_TAG0_ENABLE; | ||
161 | |||
162 | if (i == 0) { | ||
163 | firstpa = pa; | ||
164 | firstsize = size; | ||
165 | } | ||
166 | |||
167 | lastpa = pa; | ||
168 | lastsize = size; | ||
169 | |||
170 | if (strcmp(type, "DSPRAM") == 0) { | ||
171 | unsigned int *vp = (unsigned int *)(CKSEG1 | pa); | ||
172 | unsigned int v; | ||
173 | #define TDAT 0x5a5aa5a5 | ||
174 | vp[0] = TDAT; | ||
175 | vp[1] = ~TDAT; | ||
176 | |||
177 | mb(); | ||
178 | |||
179 | v = vp[0]; | ||
180 | if (v != TDAT) | ||
181 | printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n", | ||
182 | vp, TDAT, v); | ||
183 | v = vp[1]; | ||
184 | if (v != ~TDAT) | ||
185 | printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n", | ||
186 | vp+1, ~TDAT, v); | ||
187 | } | ||
188 | |||
189 | pr_info("%s%d: PA=%08x,Size=%08x%s\n", | ||
190 | type, i, pa, size, enabled ? ",enabled" : ""); | ||
191 | offset += 2 * SPRAM_TAG_STRIDE; | ||
192 | } | ||
193 | } | ||
194 | void spram_config(void) | ||
195 | { | ||
196 | unsigned int config0; | ||
197 | |||
198 | switch (current_cpu_type()) { | ||
199 | case CPU_24K: | ||
200 | case CPU_34K: | ||
201 | case CPU_74K: | ||
202 | case CPU_1004K: | ||
203 | case CPU_1074K: | ||
204 | case CPU_INTERAPTIV: | ||
205 | case CPU_PROAPTIV: | ||
206 | case CPU_P5600: | ||
207 | case CPU_QEMU_GENERIC: | ||
208 | case CPU_I6400: | ||
209 | case CPU_P6600: | ||
210 | config0 = read_c0_config(); | ||
211 | /* FIXME: addresses are Malta specific */ | ||
212 | if (config0 & MIPS_CONF_ISP) { | ||
213 | probe_spram("ISPRAM", 0x1c000000, | ||
214 | &ispram_load_tag, &ispram_store_tag); | ||
215 | } | ||
216 | if (config0 & MIPS_CONF_DSP) | ||
217 | probe_spram("DSPRAM", 0x1c100000, | ||
218 | &dspram_load_tag, &dspram_store_tag); | ||
219 | } | ||
220 | } | ||
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c new file mode 100644 index 000000000..f2e720940 --- /dev/null +++ b/arch/mips/kernel/stacktrace.c | |||
@@ -0,0 +1,93 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-only | ||
2 | /* | ||
3 | * Stack trace management functions | ||
4 | * | ||
5 | * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp> | ||
6 | */ | ||
7 | #include <linux/sched.h> | ||
8 | #include <linux/sched/debug.h> | ||
9 | #include <linux/sched/task_stack.h> | ||
10 | #include <linux/stacktrace.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <asm/stacktrace.h> | ||
13 | |||
14 | /* | ||
15 | * Save stack-backtrace addresses into a stack_trace buffer: | ||
16 | */ | ||
17 | static void save_raw_context_stack(struct stack_trace *trace, | ||
18 | unsigned long reg29, int savesched) | ||
19 | { | ||
20 | unsigned long *sp = (unsigned long *)reg29; | ||
21 | unsigned long addr; | ||
22 | |||
23 | while (!kstack_end(sp)) { | ||
24 | addr = *sp++; | ||
25 | if (__kernel_text_address(addr) && | ||
26 | (savesched || !in_sched_functions(addr))) { | ||
27 | if (trace->skip > 0) | ||
28 | trace->skip--; | ||
29 | else | ||
30 | trace->entries[trace->nr_entries++] = addr; | ||
31 | if (trace->nr_entries >= trace->max_entries) | ||
32 | break; | ||
33 | } | ||
34 | } | ||
35 | } | ||
36 | |||
37 | static void save_context_stack(struct stack_trace *trace, | ||
38 | struct task_struct *tsk, struct pt_regs *regs, int savesched) | ||
39 | { | ||
40 | unsigned long sp = regs->regs[29]; | ||
41 | #ifdef CONFIG_KALLSYMS | ||
42 | unsigned long ra = regs->regs[31]; | ||
43 | unsigned long pc = regs->cp0_epc; | ||
44 | |||
45 | if (raw_show_trace || !__kernel_text_address(pc)) { | ||
46 | unsigned long stack_page = | ||
47 | (unsigned long)task_stack_page(tsk); | ||
48 | if (stack_page && sp >= stack_page && | ||
49 | sp <= stack_page + THREAD_SIZE - 32) | ||
50 | save_raw_context_stack(trace, sp, savesched); | ||
51 | return; | ||
52 | } | ||
53 | do { | ||
54 | if (savesched || !in_sched_functions(pc)) { | ||
55 | if (trace->skip > 0) | ||
56 | trace->skip--; | ||
57 | else | ||
58 | trace->entries[trace->nr_entries++] = pc; | ||
59 | if (trace->nr_entries >= trace->max_entries) | ||
60 | break; | ||
61 | } | ||
62 | pc = unwind_stack(tsk, &sp, pc, &ra); | ||
63 | } while (pc); | ||
64 | #else | ||
65 | save_raw_context_stack(trace, sp, savesched); | ||
66 | #endif | ||
67 | } | ||
68 | |||
69 | /* | ||
70 | * Save stack-backtrace addresses into a stack_trace buffer. | ||
71 | */ | ||
72 | void save_stack_trace(struct stack_trace *trace) | ||
73 | { | ||
74 | save_stack_trace_tsk(current, trace); | ||
75 | } | ||
76 | EXPORT_SYMBOL_GPL(save_stack_trace); | ||
77 | |||
78 | void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) | ||
79 | { | ||
80 | struct pt_regs dummyregs; | ||
81 | struct pt_regs *regs = &dummyregs; | ||
82 | |||
83 | WARN_ON(trace->nr_entries || !trace->max_entries); | ||
84 | |||
85 | if (tsk != current) { | ||
86 | regs->regs[29] = tsk->thread.reg29; | ||
87 | regs->regs[31] = 0; | ||
88 | regs->cp0_epc = tsk->thread.reg31; | ||
89 | } else | ||
90 | prepare_frametrace(regs); | ||
91 | save_context_stack(trace, tsk, regs, tsk == current); | ||
92 | } | ||
93 | EXPORT_SYMBOL_GPL(save_stack_trace_tsk); | ||
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c new file mode 100644 index 000000000..abdd7aaa3 --- /dev/null +++ b/arch/mips/kernel/sync-r4k.c | |||
@@ -0,0 +1,122 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Count register synchronisation. | ||
4 | * | ||
5 | * All CPUs will have their count registers synchronised to the CPU0 next time | ||
6 | * value. This can cause a small timewarp for CPU0. All other CPU's should | ||
7 | * not have done anything significant (but they may have had interrupts | ||
8 | * enabled briefly - prom_smp_finish() should not be responsible for enabling | ||
9 | * interrupts...) | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irqflags.h> | ||
14 | #include <linux/cpumask.h> | ||
15 | |||
16 | #include <asm/r4k-timer.h> | ||
17 | #include <linux/atomic.h> | ||
18 | #include <asm/barrier.h> | ||
19 | #include <asm/mipsregs.h> | ||
20 | |||
21 | static unsigned int initcount = 0; | ||
22 | static atomic_t count_count_start = ATOMIC_INIT(0); | ||
23 | static atomic_t count_count_stop = ATOMIC_INIT(0); | ||
24 | |||
25 | #define COUNTON 100 | ||
26 | #define NR_LOOPS 3 | ||
27 | |||
28 | void synchronise_count_master(int cpu) | ||
29 | { | ||
30 | int i; | ||
31 | unsigned long flags; | ||
32 | |||
33 | pr_info("Synchronize counters for CPU %u: ", cpu); | ||
34 | |||
35 | local_irq_save(flags); | ||
36 | |||
37 | /* | ||
38 | * We loop a few times to get a primed instruction cache, | ||
39 | * then the last pass is more or less synchronised and | ||
40 | * the master and slaves each set their cycle counters to a known | ||
41 | * value all at once. This reduces the chance of having random offsets | ||
42 | * between the processors, and guarantees that the maximum | ||
43 | * delay between the cycle counters is never bigger than | ||
44 | * the latency of information-passing (cachelines) between | ||
45 | * two CPUs. | ||
46 | */ | ||
47 | |||
48 | for (i = 0; i < NR_LOOPS; i++) { | ||
49 | /* slaves loop on '!= 2' */ | ||
50 | while (atomic_read(&count_count_start) != 1) | ||
51 | mb(); | ||
52 | atomic_set(&count_count_stop, 0); | ||
53 | smp_wmb(); | ||
54 | |||
55 | /* Let the slave writes its count register */ | ||
56 | atomic_inc(&count_count_start); | ||
57 | |||
58 | /* Count will be initialised to current timer */ | ||
59 | if (i == 1) | ||
60 | initcount = read_c0_count(); | ||
61 | |||
62 | /* | ||
63 | * Everyone initialises count in the last loop: | ||
64 | */ | ||
65 | if (i == NR_LOOPS-1) | ||
66 | write_c0_count(initcount); | ||
67 | |||
68 | /* | ||
69 | * Wait for slave to leave the synchronization point: | ||
70 | */ | ||
71 | while (atomic_read(&count_count_stop) != 1) | ||
72 | mb(); | ||
73 | atomic_set(&count_count_start, 0); | ||
74 | smp_wmb(); | ||
75 | atomic_inc(&count_count_stop); | ||
76 | } | ||
77 | /* Arrange for an interrupt in a short while */ | ||
78 | write_c0_compare(read_c0_count() + COUNTON); | ||
79 | |||
80 | local_irq_restore(flags); | ||
81 | |||
82 | /* | ||
83 | * i386 code reported the skew here, but the | ||
84 | * count registers were almost certainly out of sync | ||
85 | * so no point in alarming people | ||
86 | */ | ||
87 | pr_cont("done.\n"); | ||
88 | } | ||
89 | |||
90 | void synchronise_count_slave(int cpu) | ||
91 | { | ||
92 | int i; | ||
93 | unsigned long flags; | ||
94 | |||
95 | local_irq_save(flags); | ||
96 | |||
97 | /* | ||
98 | * Not every cpu is online at the time this gets called, | ||
99 | * so we first wait for the master to say everyone is ready | ||
100 | */ | ||
101 | |||
102 | for (i = 0; i < NR_LOOPS; i++) { | ||
103 | atomic_inc(&count_count_start); | ||
104 | while (atomic_read(&count_count_start) != 2) | ||
105 | mb(); | ||
106 | |||
107 | /* | ||
108 | * Everyone initialises count in the last loop: | ||
109 | */ | ||
110 | if (i == NR_LOOPS-1) | ||
111 | write_c0_count(initcount); | ||
112 | |||
113 | atomic_inc(&count_count_stop); | ||
114 | while (atomic_read(&count_count_stop) != 2) | ||
115 | mb(); | ||
116 | } | ||
117 | /* Arrange for an interrupt in a short while */ | ||
118 | write_c0_compare(read_c0_count() + COUNTON); | ||
119 | |||
120 | local_irq_restore(flags); | ||
121 | } | ||
122 | #undef NR_LOOPS | ||
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c new file mode 100644 index 000000000..5512cd586 --- /dev/null +++ b/arch/mips/kernel/syscall.c | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle | ||
7 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
8 | * Copyright (C) 2001 MIPS Technologies, Inc. | ||
9 | */ | ||
10 | #include <linux/capability.h> | ||
11 | #include <linux/errno.h> | ||
12 | #include <linux/linkage.h> | ||
13 | #include <linux/fs.h> | ||
14 | #include <linux/smp.h> | ||
15 | #include <linux/ptrace.h> | ||
16 | #include <linux/string.h> | ||
17 | #include <linux/syscalls.h> | ||
18 | #include <linux/file.h> | ||
19 | #include <linux/utsname.h> | ||
20 | #include <linux/unistd.h> | ||
21 | #include <linux/sem.h> | ||
22 | #include <linux/msg.h> | ||
23 | #include <linux/shm.h> | ||
24 | #include <linux/compiler.h> | ||
25 | #include <linux/ipc.h> | ||
26 | #include <linux/uaccess.h> | ||
27 | #include <linux/slab.h> | ||
28 | #include <linux/elf.h> | ||
29 | #include <linux/sched/task_stack.h> | ||
30 | |||
31 | #include <asm/asm.h> | ||
32 | #include <asm/asm-eva.h> | ||
33 | #include <asm/branch.h> | ||
34 | #include <asm/cachectl.h> | ||
35 | #include <asm/cacheflush.h> | ||
36 | #include <asm/asm-offsets.h> | ||
37 | #include <asm/signal.h> | ||
38 | #include <asm/sim.h> | ||
39 | #include <asm/shmparam.h> | ||
40 | #include <asm/sync.h> | ||
41 | #include <asm/sysmips.h> | ||
42 | #include <asm/switch_to.h> | ||
43 | |||
44 | /* | ||
45 | * For historic reasons the pipe(2) syscall on MIPS has an unusual calling | ||
46 | * convention. It returns results in registers $v0 / $v1 which means there | ||
47 | * is no need for it to do verify the validity of a userspace pointer | ||
48 | * argument. Historically that used to be expensive in Linux. These days | ||
49 | * the performance advantage is negligible. | ||
50 | */ | ||
51 | asmlinkage int sysm_pipe(void) | ||
52 | { | ||
53 | int fd[2]; | ||
54 | int error = do_pipe_flags(fd, 0); | ||
55 | if (error) | ||
56 | return error; | ||
57 | current_pt_regs()->regs[3] = fd[1]; | ||
58 | return fd[0]; | ||
59 | } | ||
60 | |||
61 | SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len, | ||
62 | unsigned long, prot, unsigned long, flags, unsigned long, | ||
63 | fd, off_t, offset) | ||
64 | { | ||
65 | if (offset & ~PAGE_MASK) | ||
66 | return -EINVAL; | ||
67 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, | ||
68 | offset >> PAGE_SHIFT); | ||
69 | } | ||
70 | |||
71 | SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len, | ||
72 | unsigned long, prot, unsigned long, flags, unsigned long, fd, | ||
73 | unsigned long, pgoff) | ||
74 | { | ||
75 | if (pgoff & (~PAGE_MASK >> 12)) | ||
76 | return -EINVAL; | ||
77 | |||
78 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, | ||
79 | pgoff >> (PAGE_SHIFT - 12)); | ||
80 | } | ||
81 | |||
82 | save_static_function(sys_fork); | ||
83 | save_static_function(sys_clone); | ||
84 | save_static_function(sys_clone3); | ||
85 | |||
86 | SYSCALL_DEFINE1(set_thread_area, unsigned long, addr) | ||
87 | { | ||
88 | struct thread_info *ti = task_thread_info(current); | ||
89 | |||
90 | ti->tp_value = addr; | ||
91 | if (cpu_has_userlocal) | ||
92 | write_c0_userlocal(addr); | ||
93 | |||
94 | return 0; | ||
95 | } | ||
96 | |||
97 | static inline int mips_atomic_set(unsigned long addr, unsigned long new) | ||
98 | { | ||
99 | unsigned long old, tmp; | ||
100 | struct pt_regs *regs; | ||
101 | unsigned int err; | ||
102 | |||
103 | if (unlikely(addr & 3)) | ||
104 | return -EINVAL; | ||
105 | |||
106 | if (unlikely(!access_ok((const void __user *)addr, 4))) | ||
107 | return -EINVAL; | ||
108 | |||
109 | if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) { | ||
110 | __asm__ __volatile__ ( | ||
111 | " .set push \n" | ||
112 | " .set arch=r4000 \n" | ||
113 | " li %[err], 0 \n" | ||
114 | "1: ll %[old], (%[addr]) \n" | ||
115 | " move %[tmp], %[new] \n" | ||
116 | "2: sc %[tmp], (%[addr]) \n" | ||
117 | " beqzl %[tmp], 1b \n" | ||
118 | "3: \n" | ||
119 | " .insn \n" | ||
120 | " .section .fixup,\"ax\" \n" | ||
121 | "4: li %[err], %[efault] \n" | ||
122 | " j 3b \n" | ||
123 | " .previous \n" | ||
124 | " .section __ex_table,\"a\" \n" | ||
125 | " "STR(PTR)" 1b, 4b \n" | ||
126 | " "STR(PTR)" 2b, 4b \n" | ||
127 | " .previous \n" | ||
128 | " .set pop \n" | ||
129 | : [old] "=&r" (old), | ||
130 | [err] "=&r" (err), | ||
131 | [tmp] "=&r" (tmp) | ||
132 | : [addr] "r" (addr), | ||
133 | [new] "r" (new), | ||
134 | [efault] "i" (-EFAULT) | ||
135 | : "memory"); | ||
136 | } else if (cpu_has_llsc) { | ||
137 | __asm__ __volatile__ ( | ||
138 | " .set push \n" | ||
139 | " .set "MIPS_ISA_ARCH_LEVEL" \n" | ||
140 | " li %[err], 0 \n" | ||
141 | "1: \n" | ||
142 | " " __SYNC(full, loongson3_war) " \n" | ||
143 | user_ll("%[old]", "(%[addr])") | ||
144 | " move %[tmp], %[new] \n" | ||
145 | "2: \n" | ||
146 | user_sc("%[tmp]", "(%[addr])") | ||
147 | " beqz %[tmp], 1b \n" | ||
148 | "3: \n" | ||
149 | " .insn \n" | ||
150 | " .section .fixup,\"ax\" \n" | ||
151 | "5: li %[err], %[efault] \n" | ||
152 | " j 3b \n" | ||
153 | " .previous \n" | ||
154 | " .section __ex_table,\"a\" \n" | ||
155 | " "STR(PTR)" 1b, 5b \n" | ||
156 | " "STR(PTR)" 2b, 5b \n" | ||
157 | " .previous \n" | ||
158 | " .set pop \n" | ||
159 | : [old] "=&r" (old), | ||
160 | [err] "=&r" (err), | ||
161 | [tmp] "=&r" (tmp) | ||
162 | : [addr] "r" (addr), | ||
163 | [new] "r" (new), | ||
164 | [efault] "i" (-EFAULT) | ||
165 | : "memory"); | ||
166 | } else { | ||
167 | do { | ||
168 | preempt_disable(); | ||
169 | ll_bit = 1; | ||
170 | ll_task = current; | ||
171 | preempt_enable(); | ||
172 | |||
173 | err = __get_user(old, (unsigned int *) addr); | ||
174 | err |= __put_user(new, (unsigned int *) addr); | ||
175 | if (err) | ||
176 | break; | ||
177 | rmb(); | ||
178 | } while (!ll_bit); | ||
179 | } | ||
180 | |||
181 | if (unlikely(err)) | ||
182 | return err; | ||
183 | |||
184 | regs = current_pt_regs(); | ||
185 | regs->regs[2] = old; | ||
186 | regs->regs[7] = 0; /* No error */ | ||
187 | |||
188 | /* | ||
189 | * Don't let your children do this ... | ||
190 | */ | ||
191 | __asm__ __volatile__( | ||
192 | " move $29, %0 \n" | ||
193 | " j syscall_exit \n" | ||
194 | : /* no outputs */ | ||
195 | : "r" (regs)); | ||
196 | |||
197 | /* unreached. Honestly. */ | ||
198 | unreachable(); | ||
199 | } | ||
200 | |||
201 | /* | ||
202 | * mips_atomic_set() normally returns directly via syscall_exit potentially | ||
203 | * clobbering static registers, so be sure to preserve them. | ||
204 | */ | ||
205 | save_static_function(sys_sysmips); | ||
206 | |||
207 | SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2) | ||
208 | { | ||
209 | switch (cmd) { | ||
210 | case MIPS_ATOMIC_SET: | ||
211 | return mips_atomic_set(arg1, arg2); | ||
212 | |||
213 | case MIPS_FIXADE: | ||
214 | if (arg1 & ~3) | ||
215 | return -EINVAL; | ||
216 | |||
217 | if (arg1 & 1) | ||
218 | set_thread_flag(TIF_FIXADE); | ||
219 | else | ||
220 | clear_thread_flag(TIF_FIXADE); | ||
221 | if (arg1 & 2) | ||
222 | set_thread_flag(TIF_LOGADE); | ||
223 | else | ||
224 | clear_thread_flag(TIF_LOGADE); | ||
225 | |||
226 | return 0; | ||
227 | |||
228 | case FLUSH_CACHE: | ||
229 | __flush_cache_all(); | ||
230 | return 0; | ||
231 | } | ||
232 | |||
233 | return -EINVAL; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * No implemented yet ... | ||
238 | */ | ||
239 | SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op) | ||
240 | { | ||
241 | return -ENOSYS; | ||
242 | } | ||
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile new file mode 100644 index 000000000..6efb2f688 --- /dev/null +++ b/arch/mips/kernel/syscalls/Makefile | |||
@@ -0,0 +1,96 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 | ||
2 | kapi := arch/$(SRCARCH)/include/generated/asm | ||
3 | uapi := arch/$(SRCARCH)/include/generated/uapi/asm | ||
4 | |||
5 | _dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \ | ||
6 | $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)') | ||
7 | |||
8 | syscalln32 := $(srctree)/$(src)/syscall_n32.tbl | ||
9 | syscalln64 := $(srctree)/$(src)/syscall_n64.tbl | ||
10 | syscallo32 := $(srctree)/$(src)/syscall_o32.tbl | ||
11 | syshdr := $(srctree)/$(src)/syscallhdr.sh | ||
12 | sysnr := $(srctree)/$(src)/syscallnr.sh | ||
13 | systbl := $(srctree)/$(src)/syscalltbl.sh | ||
14 | |||
15 | quiet_cmd_syshdr = SYSHDR $@ | ||
16 | cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \ | ||
17 | '$(syshdr_abis_$(basetarget))' \ | ||
18 | '$(syshdr_pfx_$(basetarget))' \ | ||
19 | '$(syshdr_offset_$(basetarget))' | ||
20 | |||
21 | quiet_cmd_sysnr = SYSNR $@ | ||
22 | cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \ | ||
23 | '$(sysnr_abis_$(basetarget))' \ | ||
24 | '$(sysnr_pfx_$(basetarget))' \ | ||
25 | '$(sysnr_offset_$(basetarget))' | ||
26 | |||
27 | quiet_cmd_systbl = SYSTBL $@ | ||
28 | cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \ | ||
29 | '$(systbl_abis_$(basetarget))' \ | ||
30 | '$(systbl_abi_$(basetarget))' \ | ||
31 | '$(systbl_offset_$(basetarget))' | ||
32 | |||
33 | syshdr_offset_unistd_n32 := __NR_Linux | ||
34 | $(uapi)/unistd_n32.h: $(syscalln32) $(syshdr) | ||
35 | $(call if_changed,syshdr) | ||
36 | |||
37 | syshdr_offset_unistd_n64 := __NR_Linux | ||
38 | $(uapi)/unistd_n64.h: $(syscalln64) $(syshdr) | ||
39 | $(call if_changed,syshdr) | ||
40 | |||
41 | syshdr_offset_unistd_o32 := __NR_Linux | ||
42 | $(uapi)/unistd_o32.h: $(syscallo32) $(syshdr) | ||
43 | $(call if_changed,syshdr) | ||
44 | |||
45 | sysnr_pfx_unistd_nr_n32 := N32 | ||
46 | sysnr_offset_unistd_nr_n32 := 6000 | ||
47 | $(uapi)/unistd_nr_n32.h: $(syscalln32) $(sysnr) | ||
48 | $(call if_changed,sysnr) | ||
49 | |||
50 | sysnr_pfx_unistd_nr_n64 := 64 | ||
51 | sysnr_offset_unistd_nr_n64 := 5000 | ||
52 | $(uapi)/unistd_nr_n64.h: $(syscalln64) $(sysnr) | ||
53 | $(call if_changed,sysnr) | ||
54 | |||
55 | sysnr_pfx_unistd_nr_o32 := O32 | ||
56 | sysnr_offset_unistd_nr_o32 := 4000 | ||
57 | $(uapi)/unistd_nr_o32.h: $(syscallo32) $(sysnr) | ||
58 | $(call if_changed,sysnr) | ||
59 | |||
60 | systbl_abi_syscall_table_32_o32 := 32_o32 | ||
61 | systbl_offset_syscall_table_32_o32 := 4000 | ||
62 | $(kapi)/syscall_table_32_o32.h: $(syscallo32) $(systbl) | ||
63 | $(call if_changed,systbl) | ||
64 | |||
65 | systbl_abi_syscall_table_64_n32 := 64_n32 | ||
66 | systbl_offset_syscall_table_64_n32 := 6000 | ||
67 | $(kapi)/syscall_table_64_n32.h: $(syscalln32) $(systbl) | ||
68 | $(call if_changed,systbl) | ||
69 | |||
70 | systbl_abi_syscall_table_64_n64 := 64_n64 | ||
71 | systbl_offset_syscall_table_64_n64 := 5000 | ||
72 | $(kapi)/syscall_table_64_n64.h: $(syscalln64) $(systbl) | ||
73 | $(call if_changed,systbl) | ||
74 | |||
75 | systbl_abi_syscall_table_64_o32 := 64_o32 | ||
76 | systbl_offset_syscall_table_64_o32 := 4000 | ||
77 | $(kapi)/syscall_table_64_o32.h: $(syscallo32) $(systbl) | ||
78 | $(call if_changed,systbl) | ||
79 | |||
80 | uapisyshdr-y += unistd_n32.h \ | ||
81 | unistd_n64.h \ | ||
82 | unistd_o32.h \ | ||
83 | unistd_nr_n32.h \ | ||
84 | unistd_nr_n64.h \ | ||
85 | unistd_nr_o32.h | ||
86 | kapisyshdr-y += syscall_table_32_o32.h \ | ||
87 | syscall_table_64_n32.h \ | ||
88 | syscall_table_64_n64.h \ | ||
89 | syscall_table_64_o32.h | ||
90 | |||
91 | targets += $(uapisyshdr-y) $(kapisyshdr-y) | ||
92 | |||
93 | PHONY += all | ||
94 | all: $(addprefix $(uapi)/,$(uapisyshdr-y)) | ||
95 | all: $(addprefix $(kapi)/,$(kapisyshdr-y)) | ||
96 | @: | ||
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl new file mode 100644 index 000000000..32817c954 --- /dev/null +++ b/arch/mips/kernel/syscalls/syscall_n32.tbl | |||
@@ -0,0 +1,381 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note | ||
2 | # | ||
3 | # system call numbers and entry vectors for mips | ||
4 | # | ||
5 | # The format is: | ||
6 | # <number> <abi> <name> <entry point> <compat entry point> | ||
7 | # | ||
8 | # The <abi> is always "n32" for this file. | ||
9 | # | ||
10 | 0 n32 read sys_read | ||
11 | 1 n32 write sys_write | ||
12 | 2 n32 open sys_open | ||
13 | 3 n32 close sys_close | ||
14 | 4 n32 stat sys_newstat | ||
15 | 5 n32 fstat sys_newfstat | ||
16 | 6 n32 lstat sys_newlstat | ||
17 | 7 n32 poll sys_poll | ||
18 | 8 n32 lseek sys_lseek | ||
19 | 9 n32 mmap sys_mips_mmap | ||
20 | 10 n32 mprotect sys_mprotect | ||
21 | 11 n32 munmap sys_munmap | ||
22 | 12 n32 brk sys_brk | ||
23 | 13 n32 rt_sigaction compat_sys_rt_sigaction | ||
24 | 14 n32 rt_sigprocmask compat_sys_rt_sigprocmask | ||
25 | 15 n32 ioctl compat_sys_ioctl | ||
26 | 16 n32 pread64 sys_pread64 | ||
27 | 17 n32 pwrite64 sys_pwrite64 | ||
28 | 18 n32 readv sys_readv | ||
29 | 19 n32 writev sys_writev | ||
30 | 20 n32 access sys_access | ||
31 | 21 n32 pipe sysm_pipe | ||
32 | 22 n32 _newselect compat_sys_select | ||
33 | 23 n32 sched_yield sys_sched_yield | ||
34 | 24 n32 mremap sys_mremap | ||
35 | 25 n32 msync sys_msync | ||
36 | 26 n32 mincore sys_mincore | ||
37 | 27 n32 madvise sys_madvise | ||
38 | 28 n32 shmget sys_shmget | ||
39 | 29 n32 shmat sys_shmat | ||
40 | 30 n32 shmctl compat_sys_old_shmctl | ||
41 | 31 n32 dup sys_dup | ||
42 | 32 n32 dup2 sys_dup2 | ||
43 | 33 n32 pause sys_pause | ||
44 | 34 n32 nanosleep sys_nanosleep_time32 | ||
45 | 35 n32 getitimer compat_sys_getitimer | ||
46 | 36 n32 setitimer compat_sys_setitimer | ||
47 | 37 n32 alarm sys_alarm | ||
48 | 38 n32 getpid sys_getpid | ||
49 | 39 n32 sendfile compat_sys_sendfile | ||
50 | 40 n32 socket sys_socket | ||
51 | 41 n32 connect sys_connect | ||
52 | 42 n32 accept sys_accept | ||
53 | 43 n32 sendto sys_sendto | ||
54 | 44 n32 recvfrom compat_sys_recvfrom | ||
55 | 45 n32 sendmsg compat_sys_sendmsg | ||
56 | 46 n32 recvmsg compat_sys_recvmsg | ||
57 | 47 n32 shutdown sys_shutdown | ||
58 | 48 n32 bind sys_bind | ||
59 | 49 n32 listen sys_listen | ||
60 | 50 n32 getsockname sys_getsockname | ||
61 | 51 n32 getpeername sys_getpeername | ||
62 | 52 n32 socketpair sys_socketpair | ||
63 | 53 n32 setsockopt sys_setsockopt | ||
64 | 54 n32 getsockopt sys_getsockopt | ||
65 | 55 n32 clone __sys_clone | ||
66 | 56 n32 fork __sys_fork | ||
67 | 57 n32 execve compat_sys_execve | ||
68 | 58 n32 exit sys_exit | ||
69 | 59 n32 wait4 compat_sys_wait4 | ||
70 | 60 n32 kill sys_kill | ||
71 | 61 n32 uname sys_newuname | ||
72 | 62 n32 semget sys_semget | ||
73 | 63 n32 semop sys_semop | ||
74 | 64 n32 semctl compat_sys_old_semctl | ||
75 | 65 n32 shmdt sys_shmdt | ||
76 | 66 n32 msgget sys_msgget | ||
77 | 67 n32 msgsnd compat_sys_msgsnd | ||
78 | 68 n32 msgrcv compat_sys_msgrcv | ||
79 | 69 n32 msgctl compat_sys_old_msgctl | ||
80 | 70 n32 fcntl compat_sys_fcntl | ||
81 | 71 n32 flock sys_flock | ||
82 | 72 n32 fsync sys_fsync | ||
83 | 73 n32 fdatasync sys_fdatasync | ||
84 | 74 n32 truncate sys_truncate | ||
85 | 75 n32 ftruncate sys_ftruncate | ||
86 | 76 n32 getdents compat_sys_getdents | ||
87 | 77 n32 getcwd sys_getcwd | ||
88 | 78 n32 chdir sys_chdir | ||
89 | 79 n32 fchdir sys_fchdir | ||
90 | 80 n32 rename sys_rename | ||
91 | 81 n32 mkdir sys_mkdir | ||
92 | 82 n32 rmdir sys_rmdir | ||
93 | 83 n32 creat sys_creat | ||
94 | 84 n32 link sys_link | ||
95 | 85 n32 unlink sys_unlink | ||
96 | 86 n32 symlink sys_symlink | ||
97 | 87 n32 readlink sys_readlink | ||
98 | 88 n32 chmod sys_chmod | ||
99 | 89 n32 fchmod sys_fchmod | ||
100 | 90 n32 chown sys_chown | ||
101 | 91 n32 fchown sys_fchown | ||
102 | 92 n32 lchown sys_lchown | ||
103 | 93 n32 umask sys_umask | ||
104 | 94 n32 gettimeofday compat_sys_gettimeofday | ||
105 | 95 n32 getrlimit compat_sys_getrlimit | ||
106 | 96 n32 getrusage compat_sys_getrusage | ||
107 | 97 n32 sysinfo compat_sys_sysinfo | ||
108 | 98 n32 times compat_sys_times | ||
109 | 99 n32 ptrace compat_sys_ptrace | ||
110 | 100 n32 getuid sys_getuid | ||
111 | 101 n32 syslog sys_syslog | ||
112 | 102 n32 getgid sys_getgid | ||
113 | 103 n32 setuid sys_setuid | ||
114 | 104 n32 setgid sys_setgid | ||
115 | 105 n32 geteuid sys_geteuid | ||
116 | 106 n32 getegid sys_getegid | ||
117 | 107 n32 setpgid sys_setpgid | ||
118 | 108 n32 getppid sys_getppid | ||
119 | 109 n32 getpgrp sys_getpgrp | ||
120 | 110 n32 setsid sys_setsid | ||
121 | 111 n32 setreuid sys_setreuid | ||
122 | 112 n32 setregid sys_setregid | ||
123 | 113 n32 getgroups sys_getgroups | ||
124 | 114 n32 setgroups sys_setgroups | ||
125 | 115 n32 setresuid sys_setresuid | ||
126 | 116 n32 getresuid sys_getresuid | ||
127 | 117 n32 setresgid sys_setresgid | ||
128 | 118 n32 getresgid sys_getresgid | ||
129 | 119 n32 getpgid sys_getpgid | ||
130 | 120 n32 setfsuid sys_setfsuid | ||
131 | 121 n32 setfsgid sys_setfsgid | ||
132 | 122 n32 getsid sys_getsid | ||
133 | 123 n32 capget sys_capget | ||
134 | 124 n32 capset sys_capset | ||
135 | 125 n32 rt_sigpending compat_sys_rt_sigpending | ||
136 | 126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait_time32 | ||
137 | 127 n32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo | ||
138 | 128 n32 rt_sigsuspend compat_sys_rt_sigsuspend | ||
139 | 129 n32 sigaltstack compat_sys_sigaltstack | ||
140 | 130 n32 utime sys_utime32 | ||
141 | 131 n32 mknod sys_mknod | ||
142 | 132 n32 personality sys_32_personality | ||
143 | 133 n32 ustat compat_sys_ustat | ||
144 | 134 n32 statfs compat_sys_statfs | ||
145 | 135 n32 fstatfs compat_sys_fstatfs | ||
146 | 136 n32 sysfs sys_sysfs | ||
147 | 137 n32 getpriority sys_getpriority | ||
148 | 138 n32 setpriority sys_setpriority | ||
149 | 139 n32 sched_setparam sys_sched_setparam | ||
150 | 140 n32 sched_getparam sys_sched_getparam | ||
151 | 141 n32 sched_setscheduler sys_sched_setscheduler | ||
152 | 142 n32 sched_getscheduler sys_sched_getscheduler | ||
153 | 143 n32 sched_get_priority_max sys_sched_get_priority_max | ||
154 | 144 n32 sched_get_priority_min sys_sched_get_priority_min | ||
155 | 145 n32 sched_rr_get_interval sys_sched_rr_get_interval_time32 | ||
156 | 146 n32 mlock sys_mlock | ||
157 | 147 n32 munlock sys_munlock | ||
158 | 148 n32 mlockall sys_mlockall | ||
159 | 149 n32 munlockall sys_munlockall | ||
160 | 150 n32 vhangup sys_vhangup | ||
161 | 151 n32 pivot_root sys_pivot_root | ||
162 | 152 n32 _sysctl sys_ni_syscall | ||
163 | 153 n32 prctl sys_prctl | ||
164 | 154 n32 adjtimex sys_adjtimex_time32 | ||
165 | 155 n32 setrlimit compat_sys_setrlimit | ||
166 | 156 n32 chroot sys_chroot | ||
167 | 157 n32 sync sys_sync | ||
168 | 158 n32 acct sys_acct | ||
169 | 159 n32 settimeofday compat_sys_settimeofday | ||
170 | 160 n32 mount sys_mount | ||
171 | 161 n32 umount2 sys_umount | ||
172 | 162 n32 swapon sys_swapon | ||
173 | 163 n32 swapoff sys_swapoff | ||
174 | 164 n32 reboot sys_reboot | ||
175 | 165 n32 sethostname sys_sethostname | ||
176 | 166 n32 setdomainname sys_setdomainname | ||
177 | 167 n32 create_module sys_ni_syscall | ||
178 | 168 n32 init_module sys_init_module | ||
179 | 169 n32 delete_module sys_delete_module | ||
180 | 170 n32 get_kernel_syms sys_ni_syscall | ||
181 | 171 n32 query_module sys_ni_syscall | ||
182 | 172 n32 quotactl sys_quotactl | ||
183 | 173 n32 nfsservctl sys_ni_syscall | ||
184 | 174 n32 getpmsg sys_ni_syscall | ||
185 | 175 n32 putpmsg sys_ni_syscall | ||
186 | 176 n32 afs_syscall sys_ni_syscall | ||
187 | # 177 reserved for security | ||
188 | 177 n32 reserved177 sys_ni_syscall | ||
189 | 178 n32 gettid sys_gettid | ||
190 | 179 n32 readahead sys_readahead | ||
191 | 180 n32 setxattr sys_setxattr | ||
192 | 181 n32 lsetxattr sys_lsetxattr | ||
193 | 182 n32 fsetxattr sys_fsetxattr | ||
194 | 183 n32 getxattr sys_getxattr | ||
195 | 184 n32 lgetxattr sys_lgetxattr | ||
196 | 185 n32 fgetxattr sys_fgetxattr | ||
197 | 186 n32 listxattr sys_listxattr | ||
198 | 187 n32 llistxattr sys_llistxattr | ||
199 | 188 n32 flistxattr sys_flistxattr | ||
200 | 189 n32 removexattr sys_removexattr | ||
201 | 190 n32 lremovexattr sys_lremovexattr | ||
202 | 191 n32 fremovexattr sys_fremovexattr | ||
203 | 192 n32 tkill sys_tkill | ||
204 | 193 n32 reserved193 sys_ni_syscall | ||
205 | 194 n32 futex sys_futex_time32 | ||
206 | 195 n32 sched_setaffinity compat_sys_sched_setaffinity | ||
207 | 196 n32 sched_getaffinity compat_sys_sched_getaffinity | ||
208 | 197 n32 cacheflush sys_cacheflush | ||
209 | 198 n32 cachectl sys_cachectl | ||
210 | 199 n32 sysmips __sys_sysmips | ||
211 | 200 n32 io_setup compat_sys_io_setup | ||
212 | 201 n32 io_destroy sys_io_destroy | ||
213 | 202 n32 io_getevents sys_io_getevents_time32 | ||
214 | 203 n32 io_submit compat_sys_io_submit | ||
215 | 204 n32 io_cancel sys_io_cancel | ||
216 | 205 n32 exit_group sys_exit_group | ||
217 | 206 n32 lookup_dcookie sys_lookup_dcookie | ||
218 | 207 n32 epoll_create sys_epoll_create | ||
219 | 208 n32 epoll_ctl sys_epoll_ctl | ||
220 | 209 n32 epoll_wait sys_epoll_wait | ||
221 | 210 n32 remap_file_pages sys_remap_file_pages | ||
222 | 211 n32 rt_sigreturn sysn32_rt_sigreturn | ||
223 | 212 n32 fcntl64 compat_sys_fcntl64 | ||
224 | 213 n32 set_tid_address sys_set_tid_address | ||
225 | 214 n32 restart_syscall sys_restart_syscall | ||
226 | 215 n32 semtimedop sys_semtimedop_time32 | ||
227 | 216 n32 fadvise64 sys_fadvise64_64 | ||
228 | 217 n32 statfs64 compat_sys_statfs64 | ||
229 | 218 n32 fstatfs64 compat_sys_fstatfs64 | ||
230 | 219 n32 sendfile64 sys_sendfile64 | ||
231 | 220 n32 timer_create compat_sys_timer_create | ||
232 | 221 n32 timer_settime sys_timer_settime32 | ||
233 | 222 n32 timer_gettime sys_timer_gettime32 | ||
234 | 223 n32 timer_getoverrun sys_timer_getoverrun | ||
235 | 224 n32 timer_delete sys_timer_delete | ||
236 | 225 n32 clock_settime sys_clock_settime32 | ||
237 | 226 n32 clock_gettime sys_clock_gettime32 | ||
238 | 227 n32 clock_getres sys_clock_getres_time32 | ||
239 | 228 n32 clock_nanosleep sys_clock_nanosleep_time32 | ||
240 | 229 n32 tgkill sys_tgkill | ||
241 | 230 n32 utimes sys_utimes_time32 | ||
242 | 231 n32 mbind compat_sys_mbind | ||
243 | 232 n32 get_mempolicy compat_sys_get_mempolicy | ||
244 | 233 n32 set_mempolicy compat_sys_set_mempolicy | ||
245 | 234 n32 mq_open compat_sys_mq_open | ||
246 | 235 n32 mq_unlink sys_mq_unlink | ||
247 | 236 n32 mq_timedsend sys_mq_timedsend_time32 | ||
248 | 237 n32 mq_timedreceive sys_mq_timedreceive_time32 | ||
249 | 238 n32 mq_notify compat_sys_mq_notify | ||
250 | 239 n32 mq_getsetattr compat_sys_mq_getsetattr | ||
251 | 240 n32 vserver sys_ni_syscall | ||
252 | 241 n32 waitid compat_sys_waitid | ||
253 | # 242 was sys_setaltroot | ||
254 | 243 n32 add_key sys_add_key | ||
255 | 244 n32 request_key sys_request_key | ||
256 | 245 n32 keyctl compat_sys_keyctl | ||
257 | 246 n32 set_thread_area sys_set_thread_area | ||
258 | 247 n32 inotify_init sys_inotify_init | ||
259 | 248 n32 inotify_add_watch sys_inotify_add_watch | ||
260 | 249 n32 inotify_rm_watch sys_inotify_rm_watch | ||
261 | 250 n32 migrate_pages compat_sys_migrate_pages | ||
262 | 251 n32 openat sys_openat | ||
263 | 252 n32 mkdirat sys_mkdirat | ||
264 | 253 n32 mknodat sys_mknodat | ||
265 | 254 n32 fchownat sys_fchownat | ||
266 | 255 n32 futimesat sys_futimesat_time32 | ||
267 | 256 n32 newfstatat sys_newfstatat | ||
268 | 257 n32 unlinkat sys_unlinkat | ||
269 | 258 n32 renameat sys_renameat | ||
270 | 259 n32 linkat sys_linkat | ||
271 | 260 n32 symlinkat sys_symlinkat | ||
272 | 261 n32 readlinkat sys_readlinkat | ||
273 | 262 n32 fchmodat sys_fchmodat | ||
274 | 263 n32 faccessat sys_faccessat | ||
275 | 264 n32 pselect6 compat_sys_pselect6_time32 | ||
276 | 265 n32 ppoll compat_sys_ppoll_time32 | ||
277 | 266 n32 unshare sys_unshare | ||
278 | 267 n32 splice sys_splice | ||
279 | 268 n32 sync_file_range sys_sync_file_range | ||
280 | 269 n32 tee sys_tee | ||
281 | 270 n32 vmsplice sys_vmsplice | ||
282 | 271 n32 move_pages compat_sys_move_pages | ||
283 | 272 n32 set_robust_list compat_sys_set_robust_list | ||
284 | 273 n32 get_robust_list compat_sys_get_robust_list | ||
285 | 274 n32 kexec_load compat_sys_kexec_load | ||
286 | 275 n32 getcpu sys_getcpu | ||
287 | 276 n32 epoll_pwait compat_sys_epoll_pwait | ||
288 | 277 n32 ioprio_set sys_ioprio_set | ||
289 | 278 n32 ioprio_get sys_ioprio_get | ||
290 | 279 n32 utimensat sys_utimensat_time32 | ||
291 | 280 n32 signalfd compat_sys_signalfd | ||
292 | 281 n32 timerfd sys_ni_syscall | ||
293 | 282 n32 eventfd sys_eventfd | ||
294 | 283 n32 fallocate sys_fallocate | ||
295 | 284 n32 timerfd_create sys_timerfd_create | ||
296 | 285 n32 timerfd_gettime sys_timerfd_gettime32 | ||
297 | 286 n32 timerfd_settime sys_timerfd_settime32 | ||
298 | 287 n32 signalfd4 compat_sys_signalfd4 | ||
299 | 288 n32 eventfd2 sys_eventfd2 | ||
300 | 289 n32 epoll_create1 sys_epoll_create1 | ||
301 | 290 n32 dup3 sys_dup3 | ||
302 | 291 n32 pipe2 sys_pipe2 | ||
303 | 292 n32 inotify_init1 sys_inotify_init1 | ||
304 | 293 n32 preadv compat_sys_preadv | ||
305 | 294 n32 pwritev compat_sys_pwritev | ||
306 | 295 n32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo | ||
307 | 296 n32 perf_event_open sys_perf_event_open | ||
308 | 297 n32 accept4 sys_accept4 | ||
309 | 298 n32 recvmmsg compat_sys_recvmmsg_time32 | ||
310 | 299 n32 getdents64 sys_getdents64 | ||
311 | 300 n32 fanotify_init sys_fanotify_init | ||
312 | 301 n32 fanotify_mark sys_fanotify_mark | ||
313 | 302 n32 prlimit64 sys_prlimit64 | ||
314 | 303 n32 name_to_handle_at sys_name_to_handle_at | ||
315 | 304 n32 open_by_handle_at sys_open_by_handle_at | ||
316 | 305 n32 clock_adjtime sys_clock_adjtime32 | ||
317 | 306 n32 syncfs sys_syncfs | ||
318 | 307 n32 sendmmsg compat_sys_sendmmsg | ||
319 | 308 n32 setns sys_setns | ||
320 | 309 n32 process_vm_readv sys_process_vm_readv | ||
321 | 310 n32 process_vm_writev sys_process_vm_writev | ||
322 | 311 n32 kcmp sys_kcmp | ||
323 | 312 n32 finit_module sys_finit_module | ||
324 | 313 n32 sched_setattr sys_sched_setattr | ||
325 | 314 n32 sched_getattr sys_sched_getattr | ||
326 | 315 n32 renameat2 sys_renameat2 | ||
327 | 316 n32 seccomp sys_seccomp | ||
328 | 317 n32 getrandom sys_getrandom | ||
329 | 318 n32 memfd_create sys_memfd_create | ||
330 | 319 n32 bpf sys_bpf | ||
331 | 320 n32 execveat compat_sys_execveat | ||
332 | 321 n32 userfaultfd sys_userfaultfd | ||
333 | 322 n32 membarrier sys_membarrier | ||
334 | 323 n32 mlock2 sys_mlock2 | ||
335 | 324 n32 copy_file_range sys_copy_file_range | ||
336 | 325 n32 preadv2 compat_sys_preadv2 | ||
337 | 326 n32 pwritev2 compat_sys_pwritev2 | ||
338 | 327 n32 pkey_mprotect sys_pkey_mprotect | ||
339 | 328 n32 pkey_alloc sys_pkey_alloc | ||
340 | 329 n32 pkey_free sys_pkey_free | ||
341 | 330 n32 statx sys_statx | ||
342 | 331 n32 rseq sys_rseq | ||
343 | 332 n32 io_pgetevents compat_sys_io_pgetevents | ||
344 | # 333 through 402 are unassigned to sync up with generic numbers | ||
345 | 403 n32 clock_gettime64 sys_clock_gettime | ||
346 | 404 n32 clock_settime64 sys_clock_settime | ||
347 | 405 n32 clock_adjtime64 sys_clock_adjtime | ||
348 | 406 n32 clock_getres_time64 sys_clock_getres | ||
349 | 407 n32 clock_nanosleep_time64 sys_clock_nanosleep | ||
350 | 408 n32 timer_gettime64 sys_timer_gettime | ||
351 | 409 n32 timer_settime64 sys_timer_settime | ||
352 | 410 n32 timerfd_gettime64 sys_timerfd_gettime | ||
353 | 411 n32 timerfd_settime64 sys_timerfd_settime | ||
354 | 412 n32 utimensat_time64 sys_utimensat | ||
355 | 413 n32 pselect6_time64 compat_sys_pselect6_time64 | ||
356 | 414 n32 ppoll_time64 compat_sys_ppoll_time64 | ||
357 | 416 n32 io_pgetevents_time64 sys_io_pgetevents | ||
358 | 417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64 | ||
359 | 418 n32 mq_timedsend_time64 sys_mq_timedsend | ||
360 | 419 n32 mq_timedreceive_time64 sys_mq_timedreceive | ||
361 | 420 n32 semtimedop_time64 sys_semtimedop | ||
362 | 421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64 | ||
363 | 422 n32 futex_time64 sys_futex | ||
364 | 423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval | ||
365 | 424 n32 pidfd_send_signal sys_pidfd_send_signal | ||
366 | 425 n32 io_uring_setup sys_io_uring_setup | ||
367 | 426 n32 io_uring_enter sys_io_uring_enter | ||
368 | 427 n32 io_uring_register sys_io_uring_register | ||
369 | 428 n32 open_tree sys_open_tree | ||
370 | 429 n32 move_mount sys_move_mount | ||
371 | 430 n32 fsopen sys_fsopen | ||
372 | 431 n32 fsconfig sys_fsconfig | ||
373 | 432 n32 fsmount sys_fsmount | ||
374 | 433 n32 fspick sys_fspick | ||
375 | 434 n32 pidfd_open sys_pidfd_open | ||
376 | 435 n32 clone3 __sys_clone3 | ||
377 | 436 n32 close_range sys_close_range | ||
378 | 437 n32 openat2 sys_openat2 | ||
379 | 438 n32 pidfd_getfd sys_pidfd_getfd | ||
380 | 439 n32 faccessat2 sys_faccessat2 | ||
381 | 440 n32 process_madvise sys_process_madvise | ||
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl new file mode 100644 index 000000000..9e4ea3c31 --- /dev/null +++ b/arch/mips/kernel/syscalls/syscall_n64.tbl | |||
@@ -0,0 +1,357 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note | ||
2 | # | ||
3 | # system call numbers and entry vectors for mips | ||
4 | # | ||
5 | # The format is: | ||
6 | # <number> <abi> <name> <entry point> | ||
7 | # | ||
8 | # The <abi> is always "n64" for this file. | ||
9 | # | ||
10 | 0 n64 read sys_read | ||
11 | 1 n64 write sys_write | ||
12 | 2 n64 open sys_open | ||
13 | 3 n64 close sys_close | ||
14 | 4 n64 stat sys_newstat | ||
15 | 5 n64 fstat sys_newfstat | ||
16 | 6 n64 lstat sys_newlstat | ||
17 | 7 n64 poll sys_poll | ||
18 | 8 n64 lseek sys_lseek | ||
19 | 9 n64 mmap sys_mips_mmap | ||
20 | 10 n64 mprotect sys_mprotect | ||
21 | 11 n64 munmap sys_munmap | ||
22 | 12 n64 brk sys_brk | ||
23 | 13 n64 rt_sigaction sys_rt_sigaction | ||
24 | 14 n64 rt_sigprocmask sys_rt_sigprocmask | ||
25 | 15 n64 ioctl sys_ioctl | ||
26 | 16 n64 pread64 sys_pread64 | ||
27 | 17 n64 pwrite64 sys_pwrite64 | ||
28 | 18 n64 readv sys_readv | ||
29 | 19 n64 writev sys_writev | ||
30 | 20 n64 access sys_access | ||
31 | 21 n64 pipe sysm_pipe | ||
32 | 22 n64 _newselect sys_select | ||
33 | 23 n64 sched_yield sys_sched_yield | ||
34 | 24 n64 mremap sys_mremap | ||
35 | 25 n64 msync sys_msync | ||
36 | 26 n64 mincore sys_mincore | ||
37 | 27 n64 madvise sys_madvise | ||
38 | 28 n64 shmget sys_shmget | ||
39 | 29 n64 shmat sys_shmat | ||
40 | 30 n64 shmctl sys_old_shmctl | ||
41 | 31 n64 dup sys_dup | ||
42 | 32 n64 dup2 sys_dup2 | ||
43 | 33 n64 pause sys_pause | ||
44 | 34 n64 nanosleep sys_nanosleep | ||
45 | 35 n64 getitimer sys_getitimer | ||
46 | 36 n64 setitimer sys_setitimer | ||
47 | 37 n64 alarm sys_alarm | ||
48 | 38 n64 getpid sys_getpid | ||
49 | 39 n64 sendfile sys_sendfile64 | ||
50 | 40 n64 socket sys_socket | ||
51 | 41 n64 connect sys_connect | ||
52 | 42 n64 accept sys_accept | ||
53 | 43 n64 sendto sys_sendto | ||
54 | 44 n64 recvfrom sys_recvfrom | ||
55 | 45 n64 sendmsg sys_sendmsg | ||
56 | 46 n64 recvmsg sys_recvmsg | ||
57 | 47 n64 shutdown sys_shutdown | ||
58 | 48 n64 bind sys_bind | ||
59 | 49 n64 listen sys_listen | ||
60 | 50 n64 getsockname sys_getsockname | ||
61 | 51 n64 getpeername sys_getpeername | ||
62 | 52 n64 socketpair sys_socketpair | ||
63 | 53 n64 setsockopt sys_setsockopt | ||
64 | 54 n64 getsockopt sys_getsockopt | ||
65 | 55 n64 clone __sys_clone | ||
66 | 56 n64 fork __sys_fork | ||
67 | 57 n64 execve sys_execve | ||
68 | 58 n64 exit sys_exit | ||
69 | 59 n64 wait4 sys_wait4 | ||
70 | 60 n64 kill sys_kill | ||
71 | 61 n64 uname sys_newuname | ||
72 | 62 n64 semget sys_semget | ||
73 | 63 n64 semop sys_semop | ||
74 | 64 n64 semctl sys_old_semctl | ||
75 | 65 n64 shmdt sys_shmdt | ||
76 | 66 n64 msgget sys_msgget | ||
77 | 67 n64 msgsnd sys_msgsnd | ||
78 | 68 n64 msgrcv sys_msgrcv | ||
79 | 69 n64 msgctl sys_old_msgctl | ||
80 | 70 n64 fcntl sys_fcntl | ||
81 | 71 n64 flock sys_flock | ||
82 | 72 n64 fsync sys_fsync | ||
83 | 73 n64 fdatasync sys_fdatasync | ||
84 | 74 n64 truncate sys_truncate | ||
85 | 75 n64 ftruncate sys_ftruncate | ||
86 | 76 n64 getdents sys_getdents | ||
87 | 77 n64 getcwd sys_getcwd | ||
88 | 78 n64 chdir sys_chdir | ||
89 | 79 n64 fchdir sys_fchdir | ||
90 | 80 n64 rename sys_rename | ||
91 | 81 n64 mkdir sys_mkdir | ||
92 | 82 n64 rmdir sys_rmdir | ||
93 | 83 n64 creat sys_creat | ||
94 | 84 n64 link sys_link | ||
95 | 85 n64 unlink sys_unlink | ||
96 | 86 n64 symlink sys_symlink | ||
97 | 87 n64 readlink sys_readlink | ||
98 | 88 n64 chmod sys_chmod | ||
99 | 89 n64 fchmod sys_fchmod | ||
100 | 90 n64 chown sys_chown | ||
101 | 91 n64 fchown sys_fchown | ||
102 | 92 n64 lchown sys_lchown | ||
103 | 93 n64 umask sys_umask | ||
104 | 94 n64 gettimeofday sys_gettimeofday | ||
105 | 95 n64 getrlimit sys_getrlimit | ||
106 | 96 n64 getrusage sys_getrusage | ||
107 | 97 n64 sysinfo sys_sysinfo | ||
108 | 98 n64 times sys_times | ||
109 | 99 n64 ptrace sys_ptrace | ||
110 | 100 n64 getuid sys_getuid | ||
111 | 101 n64 syslog sys_syslog | ||
112 | 102 n64 getgid sys_getgid | ||
113 | 103 n64 setuid sys_setuid | ||
114 | 104 n64 setgid sys_setgid | ||
115 | 105 n64 geteuid sys_geteuid | ||
116 | 106 n64 getegid sys_getegid | ||
117 | 107 n64 setpgid sys_setpgid | ||
118 | 108 n64 getppid sys_getppid | ||
119 | 109 n64 getpgrp sys_getpgrp | ||
120 | 110 n64 setsid sys_setsid | ||
121 | 111 n64 setreuid sys_setreuid | ||
122 | 112 n64 setregid sys_setregid | ||
123 | 113 n64 getgroups sys_getgroups | ||
124 | 114 n64 setgroups sys_setgroups | ||
125 | 115 n64 setresuid sys_setresuid | ||
126 | 116 n64 getresuid sys_getresuid | ||
127 | 117 n64 setresgid sys_setresgid | ||
128 | 118 n64 getresgid sys_getresgid | ||
129 | 119 n64 getpgid sys_getpgid | ||
130 | 120 n64 setfsuid sys_setfsuid | ||
131 | 121 n64 setfsgid sys_setfsgid | ||
132 | 122 n64 getsid sys_getsid | ||
133 | 123 n64 capget sys_capget | ||
134 | 124 n64 capset sys_capset | ||
135 | 125 n64 rt_sigpending sys_rt_sigpending | ||
136 | 126 n64 rt_sigtimedwait sys_rt_sigtimedwait | ||
137 | 127 n64 rt_sigqueueinfo sys_rt_sigqueueinfo | ||
138 | 128 n64 rt_sigsuspend sys_rt_sigsuspend | ||
139 | 129 n64 sigaltstack sys_sigaltstack | ||
140 | 130 n64 utime sys_utime | ||
141 | 131 n64 mknod sys_mknod | ||
142 | 132 n64 personality sys_personality | ||
143 | 133 n64 ustat sys_ustat | ||
144 | 134 n64 statfs sys_statfs | ||
145 | 135 n64 fstatfs sys_fstatfs | ||
146 | 136 n64 sysfs sys_sysfs | ||
147 | 137 n64 getpriority sys_getpriority | ||
148 | 138 n64 setpriority sys_setpriority | ||
149 | 139 n64 sched_setparam sys_sched_setparam | ||
150 | 140 n64 sched_getparam sys_sched_getparam | ||
151 | 141 n64 sched_setscheduler sys_sched_setscheduler | ||
152 | 142 n64 sched_getscheduler sys_sched_getscheduler | ||
153 | 143 n64 sched_get_priority_max sys_sched_get_priority_max | ||
154 | 144 n64 sched_get_priority_min sys_sched_get_priority_min | ||
155 | 145 n64 sched_rr_get_interval sys_sched_rr_get_interval | ||
156 | 146 n64 mlock sys_mlock | ||
157 | 147 n64 munlock sys_munlock | ||
158 | 148 n64 mlockall sys_mlockall | ||
159 | 149 n64 munlockall sys_munlockall | ||
160 | 150 n64 vhangup sys_vhangup | ||
161 | 151 n64 pivot_root sys_pivot_root | ||
162 | 152 n64 _sysctl sys_ni_syscall | ||
163 | 153 n64 prctl sys_prctl | ||
164 | 154 n64 adjtimex sys_adjtimex | ||
165 | 155 n64 setrlimit sys_setrlimit | ||
166 | 156 n64 chroot sys_chroot | ||
167 | 157 n64 sync sys_sync | ||
168 | 158 n64 acct sys_acct | ||
169 | 159 n64 settimeofday sys_settimeofday | ||
170 | 160 n64 mount sys_mount | ||
171 | 161 n64 umount2 sys_umount | ||
172 | 162 n64 swapon sys_swapon | ||
173 | 163 n64 swapoff sys_swapoff | ||
174 | 164 n64 reboot sys_reboot | ||
175 | 165 n64 sethostname sys_sethostname | ||
176 | 166 n64 setdomainname sys_setdomainname | ||
177 | 167 n64 create_module sys_ni_syscall | ||
178 | 168 n64 init_module sys_init_module | ||
179 | 169 n64 delete_module sys_delete_module | ||
180 | 170 n64 get_kernel_syms sys_ni_syscall | ||
181 | 171 n64 query_module sys_ni_syscall | ||
182 | 172 n64 quotactl sys_quotactl | ||
183 | 173 n64 nfsservctl sys_ni_syscall | ||
184 | 174 n64 getpmsg sys_ni_syscall | ||
185 | 175 n64 putpmsg sys_ni_syscall | ||
186 | 176 n64 afs_syscall sys_ni_syscall | ||
187 | # 177 reserved for security | ||
188 | 177 n64 reserved177 sys_ni_syscall | ||
189 | 178 n64 gettid sys_gettid | ||
190 | 179 n64 readahead sys_readahead | ||
191 | 180 n64 setxattr sys_setxattr | ||
192 | 181 n64 lsetxattr sys_lsetxattr | ||
193 | 182 n64 fsetxattr sys_fsetxattr | ||
194 | 183 n64 getxattr sys_getxattr | ||
195 | 184 n64 lgetxattr sys_lgetxattr | ||
196 | 185 n64 fgetxattr sys_fgetxattr | ||
197 | 186 n64 listxattr sys_listxattr | ||
198 | 187 n64 llistxattr sys_llistxattr | ||
199 | 188 n64 flistxattr sys_flistxattr | ||
200 | 189 n64 removexattr sys_removexattr | ||
201 | 190 n64 lremovexattr sys_lremovexattr | ||
202 | 191 n64 fremovexattr sys_fremovexattr | ||
203 | 192 n64 tkill sys_tkill | ||
204 | 193 n64 reserved193 sys_ni_syscall | ||
205 | 194 n64 futex sys_futex | ||
206 | 195 n64 sched_setaffinity sys_sched_setaffinity | ||
207 | 196 n64 sched_getaffinity sys_sched_getaffinity | ||
208 | 197 n64 cacheflush sys_cacheflush | ||
209 | 198 n64 cachectl sys_cachectl | ||
210 | 199 n64 sysmips __sys_sysmips | ||
211 | 200 n64 io_setup sys_io_setup | ||
212 | 201 n64 io_destroy sys_io_destroy | ||
213 | 202 n64 io_getevents sys_io_getevents | ||
214 | 203 n64 io_submit sys_io_submit | ||
215 | 204 n64 io_cancel sys_io_cancel | ||
216 | 205 n64 exit_group sys_exit_group | ||
217 | 206 n64 lookup_dcookie sys_lookup_dcookie | ||
218 | 207 n64 epoll_create sys_epoll_create | ||
219 | 208 n64 epoll_ctl sys_epoll_ctl | ||
220 | 209 n64 epoll_wait sys_epoll_wait | ||
221 | 210 n64 remap_file_pages sys_remap_file_pages | ||
222 | 211 n64 rt_sigreturn sys_rt_sigreturn | ||
223 | 212 n64 set_tid_address sys_set_tid_address | ||
224 | 213 n64 restart_syscall sys_restart_syscall | ||
225 | 214 n64 semtimedop sys_semtimedop | ||
226 | 215 n64 fadvise64 sys_fadvise64_64 | ||
227 | 216 n64 timer_create sys_timer_create | ||
228 | 217 n64 timer_settime sys_timer_settime | ||
229 | 218 n64 timer_gettime sys_timer_gettime | ||
230 | 219 n64 timer_getoverrun sys_timer_getoverrun | ||
231 | 220 n64 timer_delete sys_timer_delete | ||
232 | 221 n64 clock_settime sys_clock_settime | ||
233 | 222 n64 clock_gettime sys_clock_gettime | ||
234 | 223 n64 clock_getres sys_clock_getres | ||
235 | 224 n64 clock_nanosleep sys_clock_nanosleep | ||
236 | 225 n64 tgkill sys_tgkill | ||
237 | 226 n64 utimes sys_utimes | ||
238 | 227 n64 mbind sys_mbind | ||
239 | 228 n64 get_mempolicy sys_get_mempolicy | ||
240 | 229 n64 set_mempolicy sys_set_mempolicy | ||
241 | 230 n64 mq_open sys_mq_open | ||
242 | 231 n64 mq_unlink sys_mq_unlink | ||
243 | 232 n64 mq_timedsend sys_mq_timedsend | ||
244 | 233 n64 mq_timedreceive sys_mq_timedreceive | ||
245 | 234 n64 mq_notify sys_mq_notify | ||
246 | 235 n64 mq_getsetattr sys_mq_getsetattr | ||
247 | 236 n64 vserver sys_ni_syscall | ||
248 | 237 n64 waitid sys_waitid | ||
249 | # 238 was sys_setaltroot | ||
250 | 239 n64 add_key sys_add_key | ||
251 | 240 n64 request_key sys_request_key | ||
252 | 241 n64 keyctl sys_keyctl | ||
253 | 242 n64 set_thread_area sys_set_thread_area | ||
254 | 243 n64 inotify_init sys_inotify_init | ||
255 | 244 n64 inotify_add_watch sys_inotify_add_watch | ||
256 | 245 n64 inotify_rm_watch sys_inotify_rm_watch | ||
257 | 246 n64 migrate_pages sys_migrate_pages | ||
258 | 247 n64 openat sys_openat | ||
259 | 248 n64 mkdirat sys_mkdirat | ||
260 | 249 n64 mknodat sys_mknodat | ||
261 | 250 n64 fchownat sys_fchownat | ||
262 | 251 n64 futimesat sys_futimesat | ||
263 | 252 n64 newfstatat sys_newfstatat | ||
264 | 253 n64 unlinkat sys_unlinkat | ||
265 | 254 n64 renameat sys_renameat | ||
266 | 255 n64 linkat sys_linkat | ||
267 | 256 n64 symlinkat sys_symlinkat | ||
268 | 257 n64 readlinkat sys_readlinkat | ||
269 | 258 n64 fchmodat sys_fchmodat | ||
270 | 259 n64 faccessat sys_faccessat | ||
271 | 260 n64 pselect6 sys_pselect6 | ||
272 | 261 n64 ppoll sys_ppoll | ||
273 | 262 n64 unshare sys_unshare | ||
274 | 263 n64 splice sys_splice | ||
275 | 264 n64 sync_file_range sys_sync_file_range | ||
276 | 265 n64 tee sys_tee | ||
277 | 266 n64 vmsplice sys_vmsplice | ||
278 | 267 n64 move_pages sys_move_pages | ||
279 | 268 n64 set_robust_list sys_set_robust_list | ||
280 | 269 n64 get_robust_list sys_get_robust_list | ||
281 | 270 n64 kexec_load sys_kexec_load | ||
282 | 271 n64 getcpu sys_getcpu | ||
283 | 272 n64 epoll_pwait sys_epoll_pwait | ||
284 | 273 n64 ioprio_set sys_ioprio_set | ||
285 | 274 n64 ioprio_get sys_ioprio_get | ||
286 | 275 n64 utimensat sys_utimensat | ||
287 | 276 n64 signalfd sys_signalfd | ||
288 | 277 n64 timerfd sys_ni_syscall | ||
289 | 278 n64 eventfd sys_eventfd | ||
290 | 279 n64 fallocate sys_fallocate | ||
291 | 280 n64 timerfd_create sys_timerfd_create | ||
292 | 281 n64 timerfd_gettime sys_timerfd_gettime | ||
293 | 282 n64 timerfd_settime sys_timerfd_settime | ||
294 | 283 n64 signalfd4 sys_signalfd4 | ||
295 | 284 n64 eventfd2 sys_eventfd2 | ||
296 | 285 n64 epoll_create1 sys_epoll_create1 | ||
297 | 286 n64 dup3 sys_dup3 | ||
298 | 287 n64 pipe2 sys_pipe2 | ||
299 | 288 n64 inotify_init1 sys_inotify_init1 | ||
300 | 289 n64 preadv sys_preadv | ||
301 | 290 n64 pwritev sys_pwritev | ||
302 | 291 n64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo | ||
303 | 292 n64 perf_event_open sys_perf_event_open | ||
304 | 293 n64 accept4 sys_accept4 | ||
305 | 294 n64 recvmmsg sys_recvmmsg | ||
306 | 295 n64 fanotify_init sys_fanotify_init | ||
307 | 296 n64 fanotify_mark sys_fanotify_mark | ||
308 | 297 n64 prlimit64 sys_prlimit64 | ||
309 | 298 n64 name_to_handle_at sys_name_to_handle_at | ||
310 | 299 n64 open_by_handle_at sys_open_by_handle_at | ||
311 | 300 n64 clock_adjtime sys_clock_adjtime | ||
312 | 301 n64 syncfs sys_syncfs | ||
313 | 302 n64 sendmmsg sys_sendmmsg | ||
314 | 303 n64 setns sys_setns | ||
315 | 304 n64 process_vm_readv sys_process_vm_readv | ||
316 | 305 n64 process_vm_writev sys_process_vm_writev | ||
317 | 306 n64 kcmp sys_kcmp | ||
318 | 307 n64 finit_module sys_finit_module | ||
319 | 308 n64 getdents64 sys_getdents64 | ||
320 | 309 n64 sched_setattr sys_sched_setattr | ||
321 | 310 n64 sched_getattr sys_sched_getattr | ||
322 | 311 n64 renameat2 sys_renameat2 | ||
323 | 312 n64 seccomp sys_seccomp | ||
324 | 313 n64 getrandom sys_getrandom | ||
325 | 314 n64 memfd_create sys_memfd_create | ||
326 | 315 n64 bpf sys_bpf | ||
327 | 316 n64 execveat sys_execveat | ||
328 | 317 n64 userfaultfd sys_userfaultfd | ||
329 | 318 n64 membarrier sys_membarrier | ||
330 | 319 n64 mlock2 sys_mlock2 | ||
331 | 320 n64 copy_file_range sys_copy_file_range | ||
332 | 321 n64 preadv2 sys_preadv2 | ||
333 | 322 n64 pwritev2 sys_pwritev2 | ||
334 | 323 n64 pkey_mprotect sys_pkey_mprotect | ||
335 | 324 n64 pkey_alloc sys_pkey_alloc | ||
336 | 325 n64 pkey_free sys_pkey_free | ||
337 | 326 n64 statx sys_statx | ||
338 | 327 n64 rseq sys_rseq | ||
339 | 328 n64 io_pgetevents sys_io_pgetevents | ||
340 | # 329 through 423 are reserved to sync up with other architectures | ||
341 | 424 n64 pidfd_send_signal sys_pidfd_send_signal | ||
342 | 425 n64 io_uring_setup sys_io_uring_setup | ||
343 | 426 n64 io_uring_enter sys_io_uring_enter | ||
344 | 427 n64 io_uring_register sys_io_uring_register | ||
345 | 428 n64 open_tree sys_open_tree | ||
346 | 429 n64 move_mount sys_move_mount | ||
347 | 430 n64 fsopen sys_fsopen | ||
348 | 431 n64 fsconfig sys_fsconfig | ||
349 | 432 n64 fsmount sys_fsmount | ||
350 | 433 n64 fspick sys_fspick | ||
351 | 434 n64 pidfd_open sys_pidfd_open | ||
352 | 435 n64 clone3 __sys_clone3 | ||
353 | 436 n64 close_range sys_close_range | ||
354 | 437 n64 openat2 sys_openat2 | ||
355 | 438 n64 pidfd_getfd sys_pidfd_getfd | ||
356 | 439 n64 faccessat2 sys_faccessat2 | ||
357 | 440 n64 process_madvise sys_process_madvise | ||
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl new file mode 100644 index 000000000..29f5f28cf --- /dev/null +++ b/arch/mips/kernel/syscalls/syscall_o32.tbl | |||
@@ -0,0 +1,430 @@ | |||
1 | # SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note | ||
2 | # | ||
3 | # system call numbers and entry vectors for mips | ||
4 | # | ||
5 | # The format is: | ||
6 | # <number> <abi> <name> <entry point> <compat entry point> | ||
7 | # | ||
8 | # The <abi> is always "o32" for this file. | ||
9 | # | ||
10 | 0 o32 syscall sys_syscall sys32_syscall | ||
11 | 1 o32 exit sys_exit | ||
12 | 2 o32 fork __sys_fork | ||
13 | 3 o32 read sys_read | ||
14 | 4 o32 write sys_write | ||
15 | 5 o32 open sys_open compat_sys_open | ||
16 | 6 o32 close sys_close | ||
17 | 7 o32 waitpid sys_waitpid | ||
18 | 8 o32 creat sys_creat | ||
19 | 9 o32 link sys_link | ||
20 | 10 o32 unlink sys_unlink | ||
21 | 11 o32 execve sys_execve compat_sys_execve | ||
22 | 12 o32 chdir sys_chdir | ||
23 | 13 o32 time sys_time32 | ||
24 | 14 o32 mknod sys_mknod | ||
25 | 15 o32 chmod sys_chmod | ||
26 | 16 o32 lchown sys_lchown | ||
27 | 17 o32 break sys_ni_syscall | ||
28 | # 18 was sys_stat | ||
29 | 18 o32 unused18 sys_ni_syscall | ||
30 | 19 o32 lseek sys_lseek | ||
31 | 20 o32 getpid sys_getpid | ||
32 | 21 o32 mount sys_mount | ||
33 | 22 o32 umount sys_oldumount | ||
34 | 23 o32 setuid sys_setuid | ||
35 | 24 o32 getuid sys_getuid | ||
36 | 25 o32 stime sys_stime32 | ||
37 | 26 o32 ptrace sys_ptrace compat_sys_ptrace | ||
38 | 27 o32 alarm sys_alarm | ||
39 | # 28 was sys_fstat | ||
40 | 28 o32 unused28 sys_ni_syscall | ||
41 | 29 o32 pause sys_pause | ||
42 | 30 o32 utime sys_utime32 | ||
43 | 31 o32 stty sys_ni_syscall | ||
44 | 32 o32 gtty sys_ni_syscall | ||
45 | 33 o32 access sys_access | ||
46 | 34 o32 nice sys_nice | ||
47 | 35 o32 ftime sys_ni_syscall | ||
48 | 36 o32 sync sys_sync | ||
49 | 37 o32 kill sys_kill | ||
50 | 38 o32 rename sys_rename | ||
51 | 39 o32 mkdir sys_mkdir | ||
52 | 40 o32 rmdir sys_rmdir | ||
53 | 41 o32 dup sys_dup | ||
54 | 42 o32 pipe sysm_pipe | ||
55 | 43 o32 times sys_times compat_sys_times | ||
56 | 44 o32 prof sys_ni_syscall | ||
57 | 45 o32 brk sys_brk | ||
58 | 46 o32 setgid sys_setgid | ||
59 | 47 o32 getgid sys_getgid | ||
60 | 48 o32 signal sys_ni_syscall | ||
61 | 49 o32 geteuid sys_geteuid | ||
62 | 50 o32 getegid sys_getegid | ||
63 | 51 o32 acct sys_acct | ||
64 | 52 o32 umount2 sys_umount | ||
65 | 53 o32 lock sys_ni_syscall | ||
66 | 54 o32 ioctl sys_ioctl compat_sys_ioctl | ||
67 | 55 o32 fcntl sys_fcntl compat_sys_fcntl | ||
68 | 56 o32 mpx sys_ni_syscall | ||
69 | 57 o32 setpgid sys_setpgid | ||
70 | 58 o32 ulimit sys_ni_syscall | ||
71 | 59 o32 unused59 sys_olduname | ||
72 | 60 o32 umask sys_umask | ||
73 | 61 o32 chroot sys_chroot | ||
74 | 62 o32 ustat sys_ustat compat_sys_ustat | ||
75 | 63 o32 dup2 sys_dup2 | ||
76 | 64 o32 getppid sys_getppid | ||
77 | 65 o32 getpgrp sys_getpgrp | ||
78 | 66 o32 setsid sys_setsid | ||
79 | 67 o32 sigaction sys_sigaction sys_32_sigaction | ||
80 | 68 o32 sgetmask sys_sgetmask | ||
81 | 69 o32 ssetmask sys_ssetmask | ||
82 | 70 o32 setreuid sys_setreuid | ||
83 | 71 o32 setregid sys_setregid | ||
84 | 72 o32 sigsuspend sys_sigsuspend sys32_sigsuspend | ||
85 | 73 o32 sigpending sys_sigpending compat_sys_sigpending | ||
86 | 74 o32 sethostname sys_sethostname | ||
87 | 75 o32 setrlimit sys_setrlimit compat_sys_setrlimit | ||
88 | 76 o32 getrlimit sys_getrlimit compat_sys_getrlimit | ||
89 | 77 o32 getrusage sys_getrusage compat_sys_getrusage | ||
90 | 78 o32 gettimeofday sys_gettimeofday compat_sys_gettimeofday | ||
91 | 79 o32 settimeofday sys_settimeofday compat_sys_settimeofday | ||
92 | 80 o32 getgroups sys_getgroups | ||
93 | 81 o32 setgroups sys_setgroups | ||
94 | # 82 was old_select | ||
95 | 82 o32 reserved82 sys_ni_syscall | ||
96 | 83 o32 symlink sys_symlink | ||
97 | # 84 was sys_lstat | ||
98 | 84 o32 unused84 sys_ni_syscall | ||
99 | 85 o32 readlink sys_readlink | ||
100 | 86 o32 uselib sys_uselib | ||
101 | 87 o32 swapon sys_swapon | ||
102 | 88 o32 reboot sys_reboot | ||
103 | 89 o32 readdir sys_old_readdir compat_sys_old_readdir | ||
104 | 90 o32 mmap sys_mips_mmap | ||
105 | 91 o32 munmap sys_munmap | ||
106 | 92 o32 truncate sys_truncate compat_sys_truncate | ||
107 | 93 o32 ftruncate sys_ftruncate compat_sys_ftruncate | ||
108 | 94 o32 fchmod sys_fchmod | ||
109 | 95 o32 fchown sys_fchown | ||
110 | 96 o32 getpriority sys_getpriority | ||
111 | 97 o32 setpriority sys_setpriority | ||
112 | 98 o32 profil sys_ni_syscall | ||
113 | 99 o32 statfs sys_statfs compat_sys_statfs | ||
114 | 100 o32 fstatfs sys_fstatfs compat_sys_fstatfs | ||
115 | 101 o32 ioperm sys_ni_syscall | ||
116 | 102 o32 socketcall sys_socketcall compat_sys_socketcall | ||
117 | 103 o32 syslog sys_syslog | ||
118 | 104 o32 setitimer sys_setitimer compat_sys_setitimer | ||
119 | 105 o32 getitimer sys_getitimer compat_sys_getitimer | ||
120 | 106 o32 stat sys_newstat compat_sys_newstat | ||
121 | 107 o32 lstat sys_newlstat compat_sys_newlstat | ||
122 | 108 o32 fstat sys_newfstat compat_sys_newfstat | ||
123 | 109 o32 unused109 sys_uname | ||
124 | 110 o32 iopl sys_ni_syscall | ||
125 | 111 o32 vhangup sys_vhangup | ||
126 | 112 o32 idle sys_ni_syscall | ||
127 | 113 o32 vm86 sys_ni_syscall | ||
128 | 114 o32 wait4 sys_wait4 compat_sys_wait4 | ||
129 | 115 o32 swapoff sys_swapoff | ||
130 | 116 o32 sysinfo sys_sysinfo compat_sys_sysinfo | ||
131 | 117 o32 ipc sys_ipc compat_sys_ipc | ||
132 | 118 o32 fsync sys_fsync | ||
133 | 119 o32 sigreturn sys_sigreturn sys32_sigreturn | ||
134 | 120 o32 clone __sys_clone | ||
135 | 121 o32 setdomainname sys_setdomainname | ||
136 | 122 o32 uname sys_newuname | ||
137 | 123 o32 modify_ldt sys_ni_syscall | ||
138 | 124 o32 adjtimex sys_adjtimex_time32 | ||
139 | 125 o32 mprotect sys_mprotect | ||
140 | 126 o32 sigprocmask sys_sigprocmask compat_sys_sigprocmask | ||
141 | 127 o32 create_module sys_ni_syscall | ||
142 | 128 o32 init_module sys_init_module | ||
143 | 129 o32 delete_module sys_delete_module | ||
144 | 130 o32 get_kernel_syms sys_ni_syscall | ||
145 | 131 o32 quotactl sys_quotactl | ||
146 | 132 o32 getpgid sys_getpgid | ||
147 | 133 o32 fchdir sys_fchdir | ||
148 | 134 o32 bdflush sys_bdflush | ||
149 | 135 o32 sysfs sys_sysfs | ||
150 | 136 o32 personality sys_personality sys_32_personality | ||
151 | 137 o32 afs_syscall sys_ni_syscall | ||
152 | 138 o32 setfsuid sys_setfsuid | ||
153 | 139 o32 setfsgid sys_setfsgid | ||
154 | 140 o32 _llseek sys_llseek sys_32_llseek | ||
155 | 141 o32 getdents sys_getdents compat_sys_getdents | ||
156 | 142 o32 _newselect sys_select compat_sys_select | ||
157 | 143 o32 flock sys_flock | ||
158 | 144 o32 msync sys_msync | ||
159 | 145 o32 readv sys_readv | ||
160 | 146 o32 writev sys_writev | ||
161 | 147 o32 cacheflush sys_cacheflush | ||
162 | 148 o32 cachectl sys_cachectl | ||
163 | 149 o32 sysmips __sys_sysmips | ||
164 | 150 o32 unused150 sys_ni_syscall | ||
165 | 151 o32 getsid sys_getsid | ||
166 | 152 o32 fdatasync sys_fdatasync | ||
167 | 153 o32 _sysctl sys_ni_syscall | ||
168 | 154 o32 mlock sys_mlock | ||
169 | 155 o32 munlock sys_munlock | ||
170 | 156 o32 mlockall sys_mlockall | ||
171 | 157 o32 munlockall sys_munlockall | ||
172 | 158 o32 sched_setparam sys_sched_setparam | ||
173 | 159 o32 sched_getparam sys_sched_getparam | ||
174 | 160 o32 sched_setscheduler sys_sched_setscheduler | ||
175 | 161 o32 sched_getscheduler sys_sched_getscheduler | ||
176 | 162 o32 sched_yield sys_sched_yield | ||
177 | 163 o32 sched_get_priority_max sys_sched_get_priority_max | ||
178 | 164 o32 sched_get_priority_min sys_sched_get_priority_min | ||
179 | 165 o32 sched_rr_get_interval sys_sched_rr_get_interval_time32 | ||
180 | 166 o32 nanosleep sys_nanosleep_time32 | ||
181 | 167 o32 mremap sys_mremap | ||
182 | 168 o32 accept sys_accept | ||
183 | 169 o32 bind sys_bind | ||
184 | 170 o32 connect sys_connect | ||
185 | 171 o32 getpeername sys_getpeername | ||
186 | 172 o32 getsockname sys_getsockname | ||
187 | 173 o32 getsockopt sys_getsockopt sys_getsockopt | ||
188 | 174 o32 listen sys_listen | ||
189 | 175 o32 recv sys_recv compat_sys_recv | ||
190 | 176 o32 recvfrom sys_recvfrom compat_sys_recvfrom | ||
191 | 177 o32 recvmsg sys_recvmsg compat_sys_recvmsg | ||
192 | 178 o32 send sys_send | ||
193 | 179 o32 sendmsg sys_sendmsg compat_sys_sendmsg | ||
194 | 180 o32 sendto sys_sendto | ||
195 | 181 o32 setsockopt sys_setsockopt sys_setsockopt | ||
196 | 182 o32 shutdown sys_shutdown | ||
197 | 183 o32 socket sys_socket | ||
198 | 184 o32 socketpair sys_socketpair | ||
199 | 185 o32 setresuid sys_setresuid | ||
200 | 186 o32 getresuid sys_getresuid | ||
201 | 187 o32 query_module sys_ni_syscall | ||
202 | 188 o32 poll sys_poll | ||
203 | 189 o32 nfsservctl sys_ni_syscall | ||
204 | 190 o32 setresgid sys_setresgid | ||
205 | 191 o32 getresgid sys_getresgid | ||
206 | 192 o32 prctl sys_prctl | ||
207 | 193 o32 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn | ||
208 | 194 o32 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction | ||
209 | 195 o32 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask | ||
210 | 196 o32 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending | ||
211 | 197 o32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32 | ||
212 | 198 o32 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo | ||
213 | 199 o32 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend | ||
214 | 200 o32 pread64 sys_pread64 sys_32_pread | ||
215 | 201 o32 pwrite64 sys_pwrite64 sys_32_pwrite | ||
216 | 202 o32 chown sys_chown | ||
217 | 203 o32 getcwd sys_getcwd | ||
218 | 204 o32 capget sys_capget | ||
219 | 205 o32 capset sys_capset | ||
220 | 206 o32 sigaltstack sys_sigaltstack compat_sys_sigaltstack | ||
221 | 207 o32 sendfile sys_sendfile compat_sys_sendfile | ||
222 | 208 o32 getpmsg sys_ni_syscall | ||
223 | 209 o32 putpmsg sys_ni_syscall | ||
224 | 210 o32 mmap2 sys_mips_mmap2 | ||
225 | 211 o32 truncate64 sys_truncate64 sys_32_truncate64 | ||
226 | 212 o32 ftruncate64 sys_ftruncate64 sys_32_ftruncate64 | ||
227 | 213 o32 stat64 sys_stat64 sys_newstat | ||
228 | 214 o32 lstat64 sys_lstat64 sys_newlstat | ||
229 | 215 o32 fstat64 sys_fstat64 sys_newfstat | ||
230 | 216 o32 pivot_root sys_pivot_root | ||
231 | 217 o32 mincore sys_mincore | ||
232 | 218 o32 madvise sys_madvise | ||
233 | 219 o32 getdents64 sys_getdents64 | ||
234 | 220 o32 fcntl64 sys_fcntl64 compat_sys_fcntl64 | ||
235 | 221 o32 reserved221 sys_ni_syscall | ||
236 | 222 o32 gettid sys_gettid | ||
237 | 223 o32 readahead sys_readahead sys32_readahead | ||
238 | 224 o32 setxattr sys_setxattr | ||
239 | 225 o32 lsetxattr sys_lsetxattr | ||
240 | 226 o32 fsetxattr sys_fsetxattr | ||
241 | 227 o32 getxattr sys_getxattr | ||
242 | 228 o32 lgetxattr sys_lgetxattr | ||
243 | 229 o32 fgetxattr sys_fgetxattr | ||
244 | 230 o32 listxattr sys_listxattr | ||
245 | 231 o32 llistxattr sys_llistxattr | ||
246 | 232 o32 flistxattr sys_flistxattr | ||
247 | 233 o32 removexattr sys_removexattr | ||
248 | 234 o32 lremovexattr sys_lremovexattr | ||
249 | 235 o32 fremovexattr sys_fremovexattr | ||
250 | 236 o32 tkill sys_tkill | ||
251 | 237 o32 sendfile64 sys_sendfile64 | ||
252 | 238 o32 futex sys_futex_time32 | ||
253 | 239 o32 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity | ||
254 | 240 o32 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity | ||
255 | 241 o32 io_setup sys_io_setup compat_sys_io_setup | ||
256 | 242 o32 io_destroy sys_io_destroy | ||
257 | 243 o32 io_getevents sys_io_getevents_time32 | ||
258 | 244 o32 io_submit sys_io_submit compat_sys_io_submit | ||
259 | 245 o32 io_cancel sys_io_cancel | ||
260 | 246 o32 exit_group sys_exit_group | ||
261 | 247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie | ||
262 | 248 o32 epoll_create sys_epoll_create | ||
263 | 249 o32 epoll_ctl sys_epoll_ctl | ||
264 | 250 o32 epoll_wait sys_epoll_wait | ||
265 | 251 o32 remap_file_pages sys_remap_file_pages | ||
266 | 252 o32 set_tid_address sys_set_tid_address | ||
267 | 253 o32 restart_syscall sys_restart_syscall | ||
268 | 254 o32 fadvise64 sys_fadvise64_64 sys32_fadvise64_64 | ||
269 | 255 o32 statfs64 sys_statfs64 compat_sys_statfs64 | ||
270 | 256 o32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64 | ||
271 | 257 o32 timer_create sys_timer_create compat_sys_timer_create | ||
272 | 258 o32 timer_settime sys_timer_settime32 | ||
273 | 259 o32 timer_gettime sys_timer_gettime32 | ||
274 | 260 o32 timer_getoverrun sys_timer_getoverrun | ||
275 | 261 o32 timer_delete sys_timer_delete | ||
276 | 262 o32 clock_settime sys_clock_settime32 | ||
277 | 263 o32 clock_gettime sys_clock_gettime32 | ||
278 | 264 o32 clock_getres sys_clock_getres_time32 | ||
279 | 265 o32 clock_nanosleep sys_clock_nanosleep_time32 | ||
280 | 266 o32 tgkill sys_tgkill | ||
281 | 267 o32 utimes sys_utimes_time32 | ||
282 | 268 o32 mbind sys_mbind compat_sys_mbind | ||
283 | 269 o32 get_mempolicy sys_get_mempolicy compat_sys_get_mempolicy | ||
284 | 270 o32 set_mempolicy sys_set_mempolicy compat_sys_set_mempolicy | ||
285 | 271 o32 mq_open sys_mq_open compat_sys_mq_open | ||
286 | 272 o32 mq_unlink sys_mq_unlink | ||
287 | 273 o32 mq_timedsend sys_mq_timedsend_time32 | ||
288 | 274 o32 mq_timedreceive sys_mq_timedreceive_time32 | ||
289 | 275 o32 mq_notify sys_mq_notify compat_sys_mq_notify | ||
290 | 276 o32 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr | ||
291 | 277 o32 vserver sys_ni_syscall | ||
292 | 278 o32 waitid sys_waitid compat_sys_waitid | ||
293 | # 279 was sys_setaltroot | ||
294 | 280 o32 add_key sys_add_key | ||
295 | 281 o32 request_key sys_request_key | ||
296 | 282 o32 keyctl sys_keyctl compat_sys_keyctl | ||
297 | 283 o32 set_thread_area sys_set_thread_area | ||
298 | 284 o32 inotify_init sys_inotify_init | ||
299 | 285 o32 inotify_add_watch sys_inotify_add_watch | ||
300 | 286 o32 inotify_rm_watch sys_inotify_rm_watch | ||
301 | 287 o32 migrate_pages sys_migrate_pages compat_sys_migrate_pages | ||
302 | 288 o32 openat sys_openat compat_sys_openat | ||
303 | 289 o32 mkdirat sys_mkdirat | ||
304 | 290 o32 mknodat sys_mknodat | ||
305 | 291 o32 fchownat sys_fchownat | ||
306 | 292 o32 futimesat sys_futimesat_time32 | ||
307 | 293 o32 fstatat64 sys_fstatat64 sys_newfstatat | ||
308 | 294 o32 unlinkat sys_unlinkat | ||
309 | 295 o32 renameat sys_renameat | ||
310 | 296 o32 linkat sys_linkat | ||
311 | 297 o32 symlinkat sys_symlinkat | ||
312 | 298 o32 readlinkat sys_readlinkat | ||
313 | 299 o32 fchmodat sys_fchmodat | ||
314 | 300 o32 faccessat sys_faccessat | ||
315 | 301 o32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32 | ||
316 | 302 o32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32 | ||
317 | 303 o32 unshare sys_unshare | ||
318 | 304 o32 splice sys_splice | ||
319 | 305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range | ||
320 | 306 o32 tee sys_tee | ||
321 | 307 o32 vmsplice sys_vmsplice | ||
322 | 308 o32 move_pages sys_move_pages compat_sys_move_pages | ||
323 | 309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list | ||
324 | 310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list | ||
325 | 311 o32 kexec_load sys_kexec_load compat_sys_kexec_load | ||
326 | 312 o32 getcpu sys_getcpu | ||
327 | 313 o32 epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait | ||
328 | 314 o32 ioprio_set sys_ioprio_set | ||
329 | 315 o32 ioprio_get sys_ioprio_get | ||
330 | 316 o32 utimensat sys_utimensat_time32 | ||
331 | 317 o32 signalfd sys_signalfd compat_sys_signalfd | ||
332 | 318 o32 timerfd sys_ni_syscall | ||
333 | 319 o32 eventfd sys_eventfd | ||
334 | 320 o32 fallocate sys_fallocate sys32_fallocate | ||
335 | 321 o32 timerfd_create sys_timerfd_create | ||
336 | 322 o32 timerfd_gettime sys_timerfd_gettime32 | ||
337 | 323 o32 timerfd_settime sys_timerfd_settime32 | ||
338 | 324 o32 signalfd4 sys_signalfd4 compat_sys_signalfd4 | ||
339 | 325 o32 eventfd2 sys_eventfd2 | ||
340 | 326 o32 epoll_create1 sys_epoll_create1 | ||
341 | 327 o32 dup3 sys_dup3 | ||
342 | 328 o32 pipe2 sys_pipe2 | ||
343 | 329 o32 inotify_init1 sys_inotify_init1 | ||
344 | 330 o32 preadv sys_preadv compat_sys_preadv | ||
345 | 331 o32 pwritev sys_pwritev compat_sys_pwritev | ||
346 | 332 o32 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo | ||
347 | 333 o32 perf_event_open sys_perf_event_open | ||
348 | 334 o32 accept4 sys_accept4 | ||
349 | 335 o32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32 | ||
350 | 336 o32 fanotify_init sys_fanotify_init | ||
351 | 337 o32 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark | ||
352 | 338 o32 prlimit64 sys_prlimit64 | ||
353 | 339 o32 name_to_handle_at sys_name_to_handle_at | ||
354 | 340 o32 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at | ||
355 | 341 o32 clock_adjtime sys_clock_adjtime32 | ||
356 | 342 o32 syncfs sys_syncfs | ||
357 | 343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg | ||
358 | 344 o32 setns sys_setns | ||
359 | 345 o32 process_vm_readv sys_process_vm_readv | ||
360 | 346 o32 process_vm_writev sys_process_vm_writev | ||
361 | 347 o32 kcmp sys_kcmp | ||
362 | 348 o32 finit_module sys_finit_module | ||
363 | 349 o32 sched_setattr sys_sched_setattr | ||
364 | 350 o32 sched_getattr sys_sched_getattr | ||
365 | 351 o32 renameat2 sys_renameat2 | ||
366 | 352 o32 seccomp sys_seccomp | ||
367 | 353 o32 getrandom sys_getrandom | ||
368 | 354 o32 memfd_create sys_memfd_create | ||
369 | 355 o32 bpf sys_bpf | ||
370 | 356 o32 execveat sys_execveat compat_sys_execveat | ||
371 | 357 o32 userfaultfd sys_userfaultfd | ||
372 | 358 o32 membarrier sys_membarrier | ||
373 | 359 o32 mlock2 sys_mlock2 | ||
374 | 360 o32 copy_file_range sys_copy_file_range | ||
375 | 361 o32 preadv2 sys_preadv2 compat_sys_preadv2 | ||
376 | 362 o32 pwritev2 sys_pwritev2 compat_sys_pwritev2 | ||
377 | 363 o32 pkey_mprotect sys_pkey_mprotect | ||
378 | 364 o32 pkey_alloc sys_pkey_alloc | ||
379 | 365 o32 pkey_free sys_pkey_free | ||
380 | 366 o32 statx sys_statx | ||
381 | 367 o32 rseq sys_rseq | ||
382 | 368 o32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents | ||
383 | # room for arch specific calls | ||
384 | 393 o32 semget sys_semget | ||
385 | 394 o32 semctl sys_semctl compat_sys_semctl | ||
386 | 395 o32 shmget sys_shmget | ||
387 | 396 o32 shmctl sys_shmctl compat_sys_shmctl | ||
388 | 397 o32 shmat sys_shmat compat_sys_shmat | ||
389 | 398 o32 shmdt sys_shmdt | ||
390 | 399 o32 msgget sys_msgget | ||
391 | 400 o32 msgsnd sys_msgsnd compat_sys_msgsnd | ||
392 | 401 o32 msgrcv sys_msgrcv compat_sys_msgrcv | ||
393 | 402 o32 msgctl sys_msgctl compat_sys_msgctl | ||
394 | 403 o32 clock_gettime64 sys_clock_gettime sys_clock_gettime | ||
395 | 404 o32 clock_settime64 sys_clock_settime sys_clock_settime | ||
396 | 405 o32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime | ||
397 | 406 o32 clock_getres_time64 sys_clock_getres sys_clock_getres | ||
398 | 407 o32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep | ||
399 | 408 o32 timer_gettime64 sys_timer_gettime sys_timer_gettime | ||
400 | 409 o32 timer_settime64 sys_timer_settime sys_timer_settime | ||
401 | 410 o32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime | ||
402 | 411 o32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime | ||
403 | 412 o32 utimensat_time64 sys_utimensat sys_utimensat | ||
404 | 413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64 | ||
405 | 414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64 | ||
406 | 416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents | ||
407 | 417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64 | ||
408 | 418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend | ||
409 | 419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive | ||
410 | 420 o32 semtimedop_time64 sys_semtimedop sys_semtimedop | ||
411 | 421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64 | ||
412 | 422 o32 futex_time64 sys_futex sys_futex | ||
413 | 423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval | ||
414 | 424 o32 pidfd_send_signal sys_pidfd_send_signal | ||
415 | 425 o32 io_uring_setup sys_io_uring_setup | ||
416 | 426 o32 io_uring_enter sys_io_uring_enter | ||
417 | 427 o32 io_uring_register sys_io_uring_register | ||
418 | 428 o32 open_tree sys_open_tree | ||
419 | 429 o32 move_mount sys_move_mount | ||
420 | 430 o32 fsopen sys_fsopen | ||
421 | 431 o32 fsconfig sys_fsconfig | ||
422 | 432 o32 fsmount sys_fsmount | ||
423 | 433 o32 fspick sys_fspick | ||
424 | 434 o32 pidfd_open sys_pidfd_open | ||
425 | 435 o32 clone3 __sys_clone3 | ||
426 | 436 o32 close_range sys_close_range | ||
427 | 437 o32 openat2 sys_openat2 | ||
428 | 438 o32 pidfd_getfd sys_pidfd_getfd | ||
429 | 439 o32 faccessat2 sys_faccessat2 | ||
430 | 440 o32 process_madvise sys_process_madvise | ||
diff --git a/arch/mips/kernel/syscalls/syscallhdr.sh b/arch/mips/kernel/syscalls/syscallhdr.sh new file mode 100644 index 000000000..2e241e713 --- /dev/null +++ b/arch/mips/kernel/syscalls/syscallhdr.sh | |||
@@ -0,0 +1,36 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | in="$1" | ||
5 | out="$2" | ||
6 | my_abis=`echo "($3)" | tr ',' '|'` | ||
7 | prefix="$4" | ||
8 | offset="$5" | ||
9 | |||
10 | fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ | ||
11 | -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ | ||
12 | -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` | ||
13 | grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( | ||
14 | printf "#ifndef %s\n" "${fileguard}" | ||
15 | printf "#define %s\n" "${fileguard}" | ||
16 | printf "\n" | ||
17 | |||
18 | nxt=0 | ||
19 | while read nr abi name entry compat ; do | ||
20 | if [ -z "$offset" ]; then | ||
21 | printf "#define __NR_%s%s\t%s\n" \ | ||
22 | "${prefix}" "${name}" "${nr}" | ||
23 | else | ||
24 | printf "#define __NR_%s%s\t(%s + %s)\n" \ | ||
25 | "${prefix}" "${name}" "${offset}" "${nr}" | ||
26 | fi | ||
27 | nxt=$((nr+1)) | ||
28 | done | ||
29 | |||
30 | printf "\n" | ||
31 | printf "#ifdef __KERNEL__\n" | ||
32 | printf "#define __NR_syscalls\t%s\n" "${nxt}" | ||
33 | printf "#endif\n" | ||
34 | printf "\n" | ||
35 | printf "#endif /* %s */\n" "${fileguard}" | ||
36 | ) > "$out" | ||
diff --git a/arch/mips/kernel/syscalls/syscallnr.sh b/arch/mips/kernel/syscalls/syscallnr.sh new file mode 100644 index 000000000..60bbdb3fe --- /dev/null +++ b/arch/mips/kernel/syscalls/syscallnr.sh | |||
@@ -0,0 +1,28 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | in="$1" | ||
5 | out="$2" | ||
6 | my_abis=`echo "($3)" | tr ',' '|'` | ||
7 | prefix="$4" | ||
8 | offset="$5" | ||
9 | |||
10 | fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \ | ||
11 | -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \ | ||
12 | -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'` | ||
13 | grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( | ||
14 | printf "#ifndef %s\n" "${fileguard}" | ||
15 | printf "#define %s\n" "${fileguard}" | ||
16 | printf "\n" | ||
17 | |||
18 | nxt=0 | ||
19 | while read nr abi name entry compat ; do | ||
20 | nxt=$((nr+1)) | ||
21 | done | ||
22 | |||
23 | printf "#define __NR_%s_Linux\t%s\n" "${prefix}" "${offset}" | ||
24 | printf "#define __NR_%s_Linux_syscalls\t%s\n" "${prefix}" "${nxt}" | ||
25 | printf "\n" | ||
26 | printf "#endif /* %s */" "${fileguard}" | ||
27 | printf "\n" | ||
28 | ) > "$out" | ||
diff --git a/arch/mips/kernel/syscalls/syscalltbl.sh b/arch/mips/kernel/syscalls/syscalltbl.sh new file mode 100644 index 000000000..1e2570740 --- /dev/null +++ b/arch/mips/kernel/syscalls/syscalltbl.sh | |||
@@ -0,0 +1,36 @@ | |||
1 | #!/bin/sh | ||
2 | # SPDX-License-Identifier: GPL-2.0 | ||
3 | |||
4 | in="$1" | ||
5 | out="$2" | ||
6 | my_abis=`echo "($3)" | tr ',' '|'` | ||
7 | my_abi="$4" | ||
8 | offset="$5" | ||
9 | |||
10 | emit() { | ||
11 | t_nxt="$1" | ||
12 | t_nr="$2" | ||
13 | t_entry="$3" | ||
14 | |||
15 | while [ $t_nxt -lt $t_nr ]; do | ||
16 | printf "__SYSCALL(%s,sys_ni_syscall)\n" "${t_nxt}" | ||
17 | t_nxt=$((t_nxt+1)) | ||
18 | done | ||
19 | printf "__SYSCALL(%s,%s)\n" "${t_nxt}" "${t_entry}" | ||
20 | } | ||
21 | |||
22 | grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | ( | ||
23 | nxt=0 | ||
24 | if [ -z "$offset" ]; then | ||
25 | offset=0 | ||
26 | fi | ||
27 | |||
28 | while read nr abi name entry compat ; do | ||
29 | if [ "$my_abi" = "64_o32" ] && [ ! -z "$compat" ]; then | ||
30 | emit $((nxt+offset)) $((nr+offset)) $compat | ||
31 | else | ||
32 | emit $((nxt+offset)) $((nr+offset)) $entry | ||
33 | fi | ||
34 | nxt=$((nr+1)) | ||
35 | done | ||
36 | ) > "$out" | ||
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c new file mode 100644 index 000000000..9c1a20191 --- /dev/null +++ b/arch/mips/kernel/sysrq.c | |||
@@ -0,0 +1,66 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * MIPS specific sysrq operations. | ||
4 | * | ||
5 | * Copyright (C) 2015 Imagination Technologies Ltd. | ||
6 | */ | ||
7 | #include <linux/init.h> | ||
8 | #include <linux/smp.h> | ||
9 | #include <linux/spinlock.h> | ||
10 | #include <linux/sysrq.h> | ||
11 | #include <linux/workqueue.h> | ||
12 | |||
13 | #include <asm/cpu-features.h> | ||
14 | #include <asm/mipsregs.h> | ||
15 | #include <asm/tlbdebug.h> | ||
16 | |||
17 | /* | ||
18 | * Dump TLB entries on all CPUs. | ||
19 | */ | ||
20 | |||
21 | static DEFINE_SPINLOCK(show_lock); | ||
22 | |||
23 | static void sysrq_tlbdump_single(void *dummy) | ||
24 | { | ||
25 | unsigned long flags; | ||
26 | |||
27 | spin_lock_irqsave(&show_lock, flags); | ||
28 | |||
29 | pr_info("CPU%d:\n", smp_processor_id()); | ||
30 | dump_tlb_regs(); | ||
31 | pr_info("\n"); | ||
32 | dump_tlb_all(); | ||
33 | pr_info("\n"); | ||
34 | |||
35 | spin_unlock_irqrestore(&show_lock, flags); | ||
36 | } | ||
37 | |||
38 | #ifdef CONFIG_SMP | ||
39 | static void sysrq_tlbdump_othercpus(struct work_struct *dummy) | ||
40 | { | ||
41 | smp_call_function(sysrq_tlbdump_single, NULL, 0); | ||
42 | } | ||
43 | |||
44 | static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus); | ||
45 | #endif | ||
46 | |||
47 | static void sysrq_handle_tlbdump(int key) | ||
48 | { | ||
49 | sysrq_tlbdump_single(NULL); | ||
50 | #ifdef CONFIG_SMP | ||
51 | schedule_work(&sysrq_tlbdump); | ||
52 | #endif | ||
53 | } | ||
54 | |||
55 | static const struct sysrq_key_op sysrq_tlbdump_op = { | ||
56 | .handler = sysrq_handle_tlbdump, | ||
57 | .help_msg = "show-tlbs(x)", | ||
58 | .action_msg = "Show TLB entries", | ||
59 | .enable_mask = SYSRQ_ENABLE_DUMP, | ||
60 | }; | ||
61 | |||
62 | static int __init mips_sysrq_init(void) | ||
63 | { | ||
64 | return register_sysrq_key('x', &sysrq_tlbdump_op); | ||
65 | } | ||
66 | arch_initcall(mips_sysrq_init); | ||
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c new file mode 100644 index 000000000..ed339d797 --- /dev/null +++ b/arch/mips/kernel/time.c | |||
@@ -0,0 +1,167 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright 2001 MontaVista Software Inc. | ||
4 | * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net | ||
5 | * Copyright (c) 2003, 2004 Maciej W. Rozycki | ||
6 | * | ||
7 | * Common time service routines for MIPS machines. | ||
8 | */ | ||
9 | #include <linux/bug.h> | ||
10 | #include <linux/clockchips.h> | ||
11 | #include <linux/types.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/init.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/param.h> | ||
16 | #include <linux/time.h> | ||
17 | #include <linux/timex.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/spinlock.h> | ||
20 | #include <linux/export.h> | ||
21 | #include <linux/cpufreq.h> | ||
22 | #include <linux/delay.h> | ||
23 | |||
24 | #include <asm/cpu-features.h> | ||
25 | #include <asm/cpu-type.h> | ||
26 | #include <asm/div64.h> | ||
27 | #include <asm/time.h> | ||
28 | |||
29 | #ifdef CONFIG_CPU_FREQ | ||
30 | |||
31 | static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref); | ||
32 | static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq); | ||
33 | static unsigned long glb_lpj_ref; | ||
34 | static unsigned long glb_lpj_ref_freq; | ||
35 | |||
36 | static int cpufreq_callback(struct notifier_block *nb, | ||
37 | unsigned long val, void *data) | ||
38 | { | ||
39 | struct cpufreq_freqs *freq = data; | ||
40 | struct cpumask *cpus = freq->policy->cpus; | ||
41 | unsigned long lpj; | ||
42 | int cpu; | ||
43 | |||
44 | /* | ||
45 | * Skip lpj numbers adjustment if the CPU-freq transition is safe for | ||
46 | * the loops delay. (Is this possible?) | ||
47 | */ | ||
48 | if (freq->flags & CPUFREQ_CONST_LOOPS) | ||
49 | return NOTIFY_OK; | ||
50 | |||
51 | /* Save the initial values of the lpjes for future scaling. */ | ||
52 | if (!glb_lpj_ref) { | ||
53 | glb_lpj_ref = boot_cpu_data.udelay_val; | ||
54 | glb_lpj_ref_freq = freq->old; | ||
55 | |||
56 | for_each_online_cpu(cpu) { | ||
57 | per_cpu(pcp_lpj_ref, cpu) = | ||
58 | cpu_data[cpu].udelay_val; | ||
59 | per_cpu(pcp_lpj_ref_freq, cpu) = freq->old; | ||
60 | } | ||
61 | } | ||
62 | |||
63 | /* | ||
64 | * Adjust global lpj variable and per-CPU udelay_val number in | ||
65 | * accordance with the new CPU frequency. | ||
66 | */ | ||
67 | if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) || | ||
68 | (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) { | ||
69 | loops_per_jiffy = cpufreq_scale(glb_lpj_ref, | ||
70 | glb_lpj_ref_freq, | ||
71 | freq->new); | ||
72 | |||
73 | for_each_cpu(cpu, cpus) { | ||
74 | lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu), | ||
75 | per_cpu(pcp_lpj_ref_freq, cpu), | ||
76 | freq->new); | ||
77 | cpu_data[cpu].udelay_val = (unsigned int)lpj; | ||
78 | } | ||
79 | } | ||
80 | |||
81 | return NOTIFY_OK; | ||
82 | } | ||
83 | |||
84 | static struct notifier_block cpufreq_notifier = { | ||
85 | .notifier_call = cpufreq_callback, | ||
86 | }; | ||
87 | |||
88 | static int __init register_cpufreq_notifier(void) | ||
89 | { | ||
90 | return cpufreq_register_notifier(&cpufreq_notifier, | ||
91 | CPUFREQ_TRANSITION_NOTIFIER); | ||
92 | } | ||
93 | core_initcall(register_cpufreq_notifier); | ||
94 | |||
95 | #endif /* CONFIG_CPU_FREQ */ | ||
96 | |||
97 | /* | ||
98 | * forward reference | ||
99 | */ | ||
100 | DEFINE_SPINLOCK(rtc_lock); | ||
101 | EXPORT_SYMBOL(rtc_lock); | ||
102 | |||
103 | static int null_perf_irq(void) | ||
104 | { | ||
105 | return 0; | ||
106 | } | ||
107 | |||
108 | int (*perf_irq)(void) = null_perf_irq; | ||
109 | |||
110 | EXPORT_SYMBOL(perf_irq); | ||
111 | |||
112 | /* | ||
113 | * time_init() - it does the following things. | ||
114 | * | ||
115 | * 1) plat_time_init() - | ||
116 | * a) (optional) set up RTC routines, | ||
117 | * b) (optional) calibrate and set the mips_hpt_frequency | ||
118 | * (only needed if you intended to use cpu counter as timer interrupt | ||
119 | * source) | ||
120 | * 2) calculate a couple of cached variables for later usage | ||
121 | */ | ||
122 | |||
123 | unsigned int mips_hpt_frequency; | ||
124 | EXPORT_SYMBOL_GPL(mips_hpt_frequency); | ||
125 | |||
126 | static __init int cpu_has_mfc0_count_bug(void) | ||
127 | { | ||
128 | switch (current_cpu_type()) { | ||
129 | case CPU_R4000PC: | ||
130 | case CPU_R4000SC: | ||
131 | case CPU_R4000MC: | ||
132 | /* | ||
133 | * V3.0 is documented as suffering from the mfc0 from count bug. | ||
134 | * Afaik this is the last version of the R4000. Later versions | ||
135 | * were marketed as R4400. | ||
136 | */ | ||
137 | return 1; | ||
138 | |||
139 | case CPU_R4400PC: | ||
140 | case CPU_R4400SC: | ||
141 | case CPU_R4400MC: | ||
142 | /* | ||
143 | * The published errata for the R4400 up to 3.0 say the CPU | ||
144 | * has the mfc0 from count bug. This seems the last version | ||
145 | * produced. | ||
146 | */ | ||
147 | return 1; | ||
148 | } | ||
149 | |||
150 | return 0; | ||
151 | } | ||
152 | |||
153 | void __init time_init(void) | ||
154 | { | ||
155 | plat_time_init(); | ||
156 | |||
157 | /* | ||
158 | * The use of the R4k timer as a clock event takes precedence; | ||
159 | * if reading the Count register might interfere with the timer | ||
160 | * interrupt, then we don't use the timer as a clock source. | ||
161 | * We may still use the timer as a clock source though if the | ||
162 | * timer interrupt isn't reliable; the interference doesn't | ||
163 | * matter then, because we don't use the interrupt. | ||
164 | */ | ||
165 | if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug()) | ||
166 | init_mips_clocksource(); | ||
167 | } | ||
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c new file mode 100644 index 000000000..08ad6371f --- /dev/null +++ b/arch/mips/kernel/topology.c | |||
@@ -0,0 +1,33 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/cpu.h> | ||
3 | #include <linux/cpumask.h> | ||
4 | #include <linux/init.h> | ||
5 | #include <linux/node.h> | ||
6 | #include <linux/nodemask.h> | ||
7 | #include <linux/percpu.h> | ||
8 | |||
9 | static DEFINE_PER_CPU(struct cpu, cpu_devices); | ||
10 | |||
11 | static int __init topology_init(void) | ||
12 | { | ||
13 | int i, ret; | ||
14 | |||
15 | #ifdef CONFIG_NUMA | ||
16 | for_each_online_node(i) | ||
17 | register_one_node(i); | ||
18 | #endif /* CONFIG_NUMA */ | ||
19 | |||
20 | for_each_present_cpu(i) { | ||
21 | struct cpu *c = &per_cpu(cpu_devices, i); | ||
22 | |||
23 | c->hotpluggable = !!i; | ||
24 | ret = register_cpu(c, i); | ||
25 | if (ret) | ||
26 | printk(KERN_WARNING "topology_init: register_cpu %d " | ||
27 | "failed (%d)\n", i, ret); | ||
28 | } | ||
29 | |||
30 | return 0; | ||
31 | } | ||
32 | |||
33 | subsys_initcall(topology_init); | ||
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c new file mode 100644 index 000000000..b1fe4518b --- /dev/null +++ b/arch/mips/kernel/traps.c | |||
@@ -0,0 +1,2571 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle | ||
7 | * Copyright (C) 1995, 1996 Paul M. Antoine | ||
8 | * Copyright (C) 1998 Ulf Carlsson | ||
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
10 | * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com | ||
11 | * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki | ||
12 | * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved. | ||
13 | * Copyright (C) 2014, Imagination Technologies Ltd. | ||
14 | */ | ||
15 | #include <linux/bitops.h> | ||
16 | #include <linux/bug.h> | ||
17 | #include <linux/compiler.h> | ||
18 | #include <linux/context_tracking.h> | ||
19 | #include <linux/cpu_pm.h> | ||
20 | #include <linux/kexec.h> | ||
21 | #include <linux/init.h> | ||
22 | #include <linux/kernel.h> | ||
23 | #include <linux/module.h> | ||
24 | #include <linux/extable.h> | ||
25 | #include <linux/mm.h> | ||
26 | #include <linux/sched/mm.h> | ||
27 | #include <linux/sched/debug.h> | ||
28 | #include <linux/smp.h> | ||
29 | #include <linux/spinlock.h> | ||
30 | #include <linux/kallsyms.h> | ||
31 | #include <linux/memblock.h> | ||
32 | #include <linux/interrupt.h> | ||
33 | #include <linux/ptrace.h> | ||
34 | #include <linux/kgdb.h> | ||
35 | #include <linux/kdebug.h> | ||
36 | #include <linux/kprobes.h> | ||
37 | #include <linux/notifier.h> | ||
38 | #include <linux/kdb.h> | ||
39 | #include <linux/irq.h> | ||
40 | #include <linux/perf_event.h> | ||
41 | |||
42 | #include <asm/addrspace.h> | ||
43 | #include <asm/bootinfo.h> | ||
44 | #include <asm/branch.h> | ||
45 | #include <asm/break.h> | ||
46 | #include <asm/cop2.h> | ||
47 | #include <asm/cpu.h> | ||
48 | #include <asm/cpu-type.h> | ||
49 | #include <asm/dsp.h> | ||
50 | #include <asm/fpu.h> | ||
51 | #include <asm/fpu_emulator.h> | ||
52 | #include <asm/idle.h> | ||
53 | #include <asm/isa-rev.h> | ||
54 | #include <asm/mips-cps.h> | ||
55 | #include <asm/mips-r2-to-r6-emul.h> | ||
56 | #include <asm/mipsregs.h> | ||
57 | #include <asm/mipsmtregs.h> | ||
58 | #include <asm/module.h> | ||
59 | #include <asm/msa.h> | ||
60 | #include <asm/ptrace.h> | ||
61 | #include <asm/sections.h> | ||
62 | #include <asm/siginfo.h> | ||
63 | #include <asm/tlbdebug.h> | ||
64 | #include <asm/traps.h> | ||
65 | #include <linux/uaccess.h> | ||
66 | #include <asm/watch.h> | ||
67 | #include <asm/mmu_context.h> | ||
68 | #include <asm/types.h> | ||
69 | #include <asm/stacktrace.h> | ||
70 | #include <asm/tlbex.h> | ||
71 | #include <asm/uasm.h> | ||
72 | |||
73 | #include <asm/mach-loongson64/cpucfg-emul.h> | ||
74 | |||
75 | extern void check_wait(void); | ||
76 | extern asmlinkage void rollback_handle_int(void); | ||
77 | extern asmlinkage void handle_int(void); | ||
78 | extern asmlinkage void handle_adel(void); | ||
79 | extern asmlinkage void handle_ades(void); | ||
80 | extern asmlinkage void handle_ibe(void); | ||
81 | extern asmlinkage void handle_dbe(void); | ||
82 | extern asmlinkage void handle_sys(void); | ||
83 | extern asmlinkage void handle_bp(void); | ||
84 | extern asmlinkage void handle_ri(void); | ||
85 | extern asmlinkage void handle_ri_rdhwr_tlbp(void); | ||
86 | extern asmlinkage void handle_ri_rdhwr(void); | ||
87 | extern asmlinkage void handle_cpu(void); | ||
88 | extern asmlinkage void handle_ov(void); | ||
89 | extern asmlinkage void handle_tr(void); | ||
90 | extern asmlinkage void handle_msa_fpe(void); | ||
91 | extern asmlinkage void handle_fpe(void); | ||
92 | extern asmlinkage void handle_ftlb(void); | ||
93 | extern asmlinkage void handle_gsexc(void); | ||
94 | extern asmlinkage void handle_msa(void); | ||
95 | extern asmlinkage void handle_mdmx(void); | ||
96 | extern asmlinkage void handle_watch(void); | ||
97 | extern asmlinkage void handle_mt(void); | ||
98 | extern asmlinkage void handle_dsp(void); | ||
99 | extern asmlinkage void handle_mcheck(void); | ||
100 | extern asmlinkage void handle_reserved(void); | ||
101 | extern void tlb_do_page_fault_0(void); | ||
102 | |||
103 | void (*board_be_init)(void); | ||
104 | int (*board_be_handler)(struct pt_regs *regs, int is_fixup); | ||
105 | void (*board_nmi_handler_setup)(void); | ||
106 | void (*board_ejtag_handler_setup)(void); | ||
107 | void (*board_bind_eic_interrupt)(int irq, int regset); | ||
108 | void (*board_ebase_setup)(void); | ||
109 | void(*board_cache_error_setup)(void); | ||
110 | |||
111 | static void show_raw_backtrace(unsigned long reg29, const char *loglvl) | ||
112 | { | ||
113 | unsigned long *sp = (unsigned long *)(reg29 & ~3); | ||
114 | unsigned long addr; | ||
115 | |||
116 | printk("%sCall Trace:", loglvl); | ||
117 | #ifdef CONFIG_KALLSYMS | ||
118 | printk("%s\n", loglvl); | ||
119 | #endif | ||
120 | while (!kstack_end(sp)) { | ||
121 | unsigned long __user *p = | ||
122 | (unsigned long __user *)(unsigned long)sp++; | ||
123 | if (__get_user(addr, p)) { | ||
124 | printk("%s (Bad stack address)", loglvl); | ||
125 | break; | ||
126 | } | ||
127 | if (__kernel_text_address(addr)) | ||
128 | print_ip_sym(loglvl, addr); | ||
129 | } | ||
130 | printk("%s\n", loglvl); | ||
131 | } | ||
132 | |||
133 | #ifdef CONFIG_KALLSYMS | ||
134 | int raw_show_trace; | ||
135 | static int __init set_raw_show_trace(char *str) | ||
136 | { | ||
137 | raw_show_trace = 1; | ||
138 | return 1; | ||
139 | } | ||
140 | __setup("raw_show_trace", set_raw_show_trace); | ||
141 | #endif | ||
142 | |||
143 | static void show_backtrace(struct task_struct *task, const struct pt_regs *regs, | ||
144 | const char *loglvl) | ||
145 | { | ||
146 | unsigned long sp = regs->regs[29]; | ||
147 | unsigned long ra = regs->regs[31]; | ||
148 | unsigned long pc = regs->cp0_epc; | ||
149 | |||
150 | if (!task) | ||
151 | task = current; | ||
152 | |||
153 | if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) { | ||
154 | show_raw_backtrace(sp, loglvl); | ||
155 | return; | ||
156 | } | ||
157 | printk("%sCall Trace:\n", loglvl); | ||
158 | do { | ||
159 | print_ip_sym(loglvl, pc); | ||
160 | pc = unwind_stack(task, &sp, pc, &ra); | ||
161 | } while (pc); | ||
162 | pr_cont("\n"); | ||
163 | } | ||
164 | |||
165 | /* | ||
166 | * This routine abuses get_user()/put_user() to reference pointers | ||
167 | * with at least a bit of error checking ... | ||
168 | */ | ||
169 | static void show_stacktrace(struct task_struct *task, | ||
170 | const struct pt_regs *regs, const char *loglvl) | ||
171 | { | ||
172 | const int field = 2 * sizeof(unsigned long); | ||
173 | long stackdata; | ||
174 | int i; | ||
175 | unsigned long __user *sp = (unsigned long __user *)regs->regs[29]; | ||
176 | |||
177 | printk("%sStack :", loglvl); | ||
178 | i = 0; | ||
179 | while ((unsigned long) sp & (PAGE_SIZE - 1)) { | ||
180 | if (i && ((i % (64 / field)) == 0)) { | ||
181 | pr_cont("\n"); | ||
182 | printk("%s ", loglvl); | ||
183 | } | ||
184 | if (i > 39) { | ||
185 | pr_cont(" ..."); | ||
186 | break; | ||
187 | } | ||
188 | |||
189 | if (__get_user(stackdata, sp++)) { | ||
190 | pr_cont(" (Bad stack address)"); | ||
191 | break; | ||
192 | } | ||
193 | |||
194 | pr_cont(" %0*lx", field, stackdata); | ||
195 | i++; | ||
196 | } | ||
197 | pr_cont("\n"); | ||
198 | show_backtrace(task, regs, loglvl); | ||
199 | } | ||
200 | |||
201 | void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl) | ||
202 | { | ||
203 | struct pt_regs regs; | ||
204 | mm_segment_t old_fs = get_fs(); | ||
205 | |||
206 | regs.cp0_status = KSU_KERNEL; | ||
207 | if (sp) { | ||
208 | regs.regs[29] = (unsigned long)sp; | ||
209 | regs.regs[31] = 0; | ||
210 | regs.cp0_epc = 0; | ||
211 | } else { | ||
212 | if (task && task != current) { | ||
213 | regs.regs[29] = task->thread.reg29; | ||
214 | regs.regs[31] = 0; | ||
215 | regs.cp0_epc = task->thread.reg31; | ||
216 | } else { | ||
217 | prepare_frametrace(®s); | ||
218 | } | ||
219 | } | ||
220 | /* | ||
221 | * show_stack() deals exclusively with kernel mode, so be sure to access | ||
222 | * the stack in the kernel (not user) address space. | ||
223 | */ | ||
224 | set_fs(KERNEL_DS); | ||
225 | show_stacktrace(task, ®s, loglvl); | ||
226 | set_fs(old_fs); | ||
227 | } | ||
228 | |||
229 | static void show_code(unsigned int __user *pc) | ||
230 | { | ||
231 | long i; | ||
232 | unsigned short __user *pc16 = NULL; | ||
233 | |||
234 | printk("Code:"); | ||
235 | |||
236 | if ((unsigned long)pc & 1) | ||
237 | pc16 = (unsigned short __user *)((unsigned long)pc & ~1); | ||
238 | for(i = -3 ; i < 6 ; i++) { | ||
239 | unsigned int insn; | ||
240 | if (pc16 ? __get_user(insn, pc16 + i) : __get_user(insn, pc + i)) { | ||
241 | pr_cont(" (Bad address in epc)\n"); | ||
242 | break; | ||
243 | } | ||
244 | pr_cont("%c%0*x%c", (i?' ':'<'), pc16 ? 4 : 8, insn, (i?' ':'>')); | ||
245 | } | ||
246 | pr_cont("\n"); | ||
247 | } | ||
248 | |||
249 | static void __show_regs(const struct pt_regs *regs) | ||
250 | { | ||
251 | const int field = 2 * sizeof(unsigned long); | ||
252 | unsigned int cause = regs->cp0_cause; | ||
253 | unsigned int exccode; | ||
254 | int i; | ||
255 | |||
256 | show_regs_print_info(KERN_DEFAULT); | ||
257 | |||
258 | /* | ||
259 | * Saved main processor registers | ||
260 | */ | ||
261 | for (i = 0; i < 32; ) { | ||
262 | if ((i % 4) == 0) | ||
263 | printk("$%2d :", i); | ||
264 | if (i == 0) | ||
265 | pr_cont(" %0*lx", field, 0UL); | ||
266 | else if (i == 26 || i == 27) | ||
267 | pr_cont(" %*s", field, ""); | ||
268 | else | ||
269 | pr_cont(" %0*lx", field, regs->regs[i]); | ||
270 | |||
271 | i++; | ||
272 | if ((i % 4) == 0) | ||
273 | pr_cont("\n"); | ||
274 | } | ||
275 | |||
276 | #ifdef CONFIG_CPU_HAS_SMARTMIPS | ||
277 | printk("Acx : %0*lx\n", field, regs->acx); | ||
278 | #endif | ||
279 | if (MIPS_ISA_REV < 6) { | ||
280 | printk("Hi : %0*lx\n", field, regs->hi); | ||
281 | printk("Lo : %0*lx\n", field, regs->lo); | ||
282 | } | ||
283 | |||
284 | /* | ||
285 | * Saved cp0 registers | ||
286 | */ | ||
287 | printk("epc : %0*lx %pS\n", field, regs->cp0_epc, | ||
288 | (void *) regs->cp0_epc); | ||
289 | printk("ra : %0*lx %pS\n", field, regs->regs[31], | ||
290 | (void *) regs->regs[31]); | ||
291 | |||
292 | printk("Status: %08x ", (uint32_t) regs->cp0_status); | ||
293 | |||
294 | if (cpu_has_3kex) { | ||
295 | if (regs->cp0_status & ST0_KUO) | ||
296 | pr_cont("KUo "); | ||
297 | if (regs->cp0_status & ST0_IEO) | ||
298 | pr_cont("IEo "); | ||
299 | if (regs->cp0_status & ST0_KUP) | ||
300 | pr_cont("KUp "); | ||
301 | if (regs->cp0_status & ST0_IEP) | ||
302 | pr_cont("IEp "); | ||
303 | if (regs->cp0_status & ST0_KUC) | ||
304 | pr_cont("KUc "); | ||
305 | if (regs->cp0_status & ST0_IEC) | ||
306 | pr_cont("IEc "); | ||
307 | } else if (cpu_has_4kex) { | ||
308 | if (regs->cp0_status & ST0_KX) | ||
309 | pr_cont("KX "); | ||
310 | if (regs->cp0_status & ST0_SX) | ||
311 | pr_cont("SX "); | ||
312 | if (regs->cp0_status & ST0_UX) | ||
313 | pr_cont("UX "); | ||
314 | switch (regs->cp0_status & ST0_KSU) { | ||
315 | case KSU_USER: | ||
316 | pr_cont("USER "); | ||
317 | break; | ||
318 | case KSU_SUPERVISOR: | ||
319 | pr_cont("SUPERVISOR "); | ||
320 | break; | ||
321 | case KSU_KERNEL: | ||
322 | pr_cont("KERNEL "); | ||
323 | break; | ||
324 | default: | ||
325 | pr_cont("BAD_MODE "); | ||
326 | break; | ||
327 | } | ||
328 | if (regs->cp0_status & ST0_ERL) | ||
329 | pr_cont("ERL "); | ||
330 | if (regs->cp0_status & ST0_EXL) | ||
331 | pr_cont("EXL "); | ||
332 | if (regs->cp0_status & ST0_IE) | ||
333 | pr_cont("IE "); | ||
334 | } | ||
335 | pr_cont("\n"); | ||
336 | |||
337 | exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; | ||
338 | printk("Cause : %08x (ExcCode %02x)\n", cause, exccode); | ||
339 | |||
340 | if (1 <= exccode && exccode <= 5) | ||
341 | printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr); | ||
342 | |||
343 | printk("PrId : %08x (%s)\n", read_c0_prid(), | ||
344 | cpu_name_string()); | ||
345 | } | ||
346 | |||
347 | /* | ||
348 | * FIXME: really the generic show_regs should take a const pointer argument. | ||
349 | */ | ||
350 | void show_regs(struct pt_regs *regs) | ||
351 | { | ||
352 | __show_regs(regs); | ||
353 | dump_stack(); | ||
354 | } | ||
355 | |||
356 | void show_registers(struct pt_regs *regs) | ||
357 | { | ||
358 | const int field = 2 * sizeof(unsigned long); | ||
359 | mm_segment_t old_fs = get_fs(); | ||
360 | |||
361 | __show_regs(regs); | ||
362 | print_modules(); | ||
363 | printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n", | ||
364 | current->comm, current->pid, current_thread_info(), current, | ||
365 | field, current_thread_info()->tp_value); | ||
366 | if (cpu_has_userlocal) { | ||
367 | unsigned long tls; | ||
368 | |||
369 | tls = read_c0_userlocal(); | ||
370 | if (tls != current_thread_info()->tp_value) | ||
371 | printk("*HwTLS: %0*lx\n", field, tls); | ||
372 | } | ||
373 | |||
374 | if (!user_mode(regs)) | ||
375 | /* Necessary for getting the correct stack content */ | ||
376 | set_fs(KERNEL_DS); | ||
377 | show_stacktrace(current, regs, KERN_DEFAULT); | ||
378 | show_code((unsigned int __user *) regs->cp0_epc); | ||
379 | printk("\n"); | ||
380 | set_fs(old_fs); | ||
381 | } | ||
382 | |||
383 | static DEFINE_RAW_SPINLOCK(die_lock); | ||
384 | |||
385 | void __noreturn die(const char *str, struct pt_regs *regs) | ||
386 | { | ||
387 | static int die_counter; | ||
388 | int sig = SIGSEGV; | ||
389 | |||
390 | oops_enter(); | ||
391 | |||
392 | if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr, | ||
393 | SIGSEGV) == NOTIFY_STOP) | ||
394 | sig = 0; | ||
395 | |||
396 | console_verbose(); | ||
397 | raw_spin_lock_irq(&die_lock); | ||
398 | bust_spinlocks(1); | ||
399 | |||
400 | printk("%s[#%d]:\n", str, ++die_counter); | ||
401 | show_registers(regs); | ||
402 | add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); | ||
403 | raw_spin_unlock_irq(&die_lock); | ||
404 | |||
405 | oops_exit(); | ||
406 | |||
407 | if (in_interrupt()) | ||
408 | panic("Fatal exception in interrupt"); | ||
409 | |||
410 | if (panic_on_oops) | ||
411 | panic("Fatal exception"); | ||
412 | |||
413 | if (regs && kexec_should_crash(current)) | ||
414 | crash_kexec(regs); | ||
415 | |||
416 | do_exit(sig); | ||
417 | } | ||
418 | |||
419 | extern struct exception_table_entry __start___dbe_table[]; | ||
420 | extern struct exception_table_entry __stop___dbe_table[]; | ||
421 | |||
422 | __asm__( | ||
423 | " .section __dbe_table, \"a\"\n" | ||
424 | " .previous \n"); | ||
425 | |||
426 | /* Given an address, look for it in the exception tables. */ | ||
427 | static const struct exception_table_entry *search_dbe_tables(unsigned long addr) | ||
428 | { | ||
429 | const struct exception_table_entry *e; | ||
430 | |||
431 | e = search_extable(__start___dbe_table, | ||
432 | __stop___dbe_table - __start___dbe_table, addr); | ||
433 | if (!e) | ||
434 | e = search_module_dbetables(addr); | ||
435 | return e; | ||
436 | } | ||
437 | |||
438 | asmlinkage void do_be(struct pt_regs *regs) | ||
439 | { | ||
440 | const int field = 2 * sizeof(unsigned long); | ||
441 | const struct exception_table_entry *fixup = NULL; | ||
442 | int data = regs->cp0_cause & 4; | ||
443 | int action = MIPS_BE_FATAL; | ||
444 | enum ctx_state prev_state; | ||
445 | |||
446 | prev_state = exception_enter(); | ||
447 | /* XXX For now. Fixme, this searches the wrong table ... */ | ||
448 | if (data && !user_mode(regs)) | ||
449 | fixup = search_dbe_tables(exception_epc(regs)); | ||
450 | |||
451 | if (fixup) | ||
452 | action = MIPS_BE_FIXUP; | ||
453 | |||
454 | if (board_be_handler) | ||
455 | action = board_be_handler(regs, fixup != NULL); | ||
456 | else | ||
457 | mips_cm_error_report(); | ||
458 | |||
459 | switch (action) { | ||
460 | case MIPS_BE_DISCARD: | ||
461 | goto out; | ||
462 | case MIPS_BE_FIXUP: | ||
463 | if (fixup) { | ||
464 | regs->cp0_epc = fixup->nextinsn; | ||
465 | goto out; | ||
466 | } | ||
467 | break; | ||
468 | default: | ||
469 | break; | ||
470 | } | ||
471 | |||
472 | /* | ||
473 | * Assume it would be too dangerous to continue ... | ||
474 | */ | ||
475 | printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n", | ||
476 | data ? "Data" : "Instruction", | ||
477 | field, regs->cp0_epc, field, regs->regs[31]); | ||
478 | if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr, | ||
479 | SIGBUS) == NOTIFY_STOP) | ||
480 | goto out; | ||
481 | |||
482 | die_if_kernel("Oops", regs); | ||
483 | force_sig(SIGBUS); | ||
484 | |||
485 | out: | ||
486 | exception_exit(prev_state); | ||
487 | } | ||
488 | |||
489 | /* | ||
490 | * ll/sc, rdhwr, sync emulation | ||
491 | */ | ||
492 | |||
493 | #define OPCODE 0xfc000000 | ||
494 | #define BASE 0x03e00000 | ||
495 | #define RT 0x001f0000 | ||
496 | #define OFFSET 0x0000ffff | ||
497 | #define LL 0xc0000000 | ||
498 | #define SC 0xe0000000 | ||
499 | #define SPEC0 0x00000000 | ||
500 | #define SPEC3 0x7c000000 | ||
501 | #define RD 0x0000f800 | ||
502 | #define FUNC 0x0000003f | ||
503 | #define SYNC 0x0000000f | ||
504 | #define RDHWR 0x0000003b | ||
505 | |||
506 | /* microMIPS definitions */ | ||
507 | #define MM_POOL32A_FUNC 0xfc00ffff | ||
508 | #define MM_RDHWR 0x00006b3c | ||
509 | #define MM_RS 0x001f0000 | ||
510 | #define MM_RT 0x03e00000 | ||
511 | |||
512 | /* | ||
513 | * The ll_bit is cleared by r*_switch.S | ||
514 | */ | ||
515 | |||
516 | unsigned int ll_bit; | ||
517 | struct task_struct *ll_task; | ||
518 | |||
519 | static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode) | ||
520 | { | ||
521 | unsigned long value, __user *vaddr; | ||
522 | long offset; | ||
523 | |||
524 | /* | ||
525 | * analyse the ll instruction that just caused a ri exception | ||
526 | * and put the referenced address to addr. | ||
527 | */ | ||
528 | |||
529 | /* sign extend offset */ | ||
530 | offset = opcode & OFFSET; | ||
531 | offset <<= 16; | ||
532 | offset >>= 16; | ||
533 | |||
534 | vaddr = (unsigned long __user *) | ||
535 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | ||
536 | |||
537 | if ((unsigned long)vaddr & 3) | ||
538 | return SIGBUS; | ||
539 | if (get_user(value, vaddr)) | ||
540 | return SIGSEGV; | ||
541 | |||
542 | preempt_disable(); | ||
543 | |||
544 | if (ll_task == NULL || ll_task == current) { | ||
545 | ll_bit = 1; | ||
546 | } else { | ||
547 | ll_bit = 0; | ||
548 | } | ||
549 | ll_task = current; | ||
550 | |||
551 | preempt_enable(); | ||
552 | |||
553 | regs->regs[(opcode & RT) >> 16] = value; | ||
554 | |||
555 | return 0; | ||
556 | } | ||
557 | |||
558 | static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode) | ||
559 | { | ||
560 | unsigned long __user *vaddr; | ||
561 | unsigned long reg; | ||
562 | long offset; | ||
563 | |||
564 | /* | ||
565 | * analyse the sc instruction that just caused a ri exception | ||
566 | * and put the referenced address to addr. | ||
567 | */ | ||
568 | |||
569 | /* sign extend offset */ | ||
570 | offset = opcode & OFFSET; | ||
571 | offset <<= 16; | ||
572 | offset >>= 16; | ||
573 | |||
574 | vaddr = (unsigned long __user *) | ||
575 | ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset); | ||
576 | reg = (opcode & RT) >> 16; | ||
577 | |||
578 | if ((unsigned long)vaddr & 3) | ||
579 | return SIGBUS; | ||
580 | |||
581 | preempt_disable(); | ||
582 | |||
583 | if (ll_bit == 0 || ll_task != current) { | ||
584 | regs->regs[reg] = 0; | ||
585 | preempt_enable(); | ||
586 | return 0; | ||
587 | } | ||
588 | |||
589 | preempt_enable(); | ||
590 | |||
591 | if (put_user(regs->regs[reg], vaddr)) | ||
592 | return SIGSEGV; | ||
593 | |||
594 | regs->regs[reg] = 1; | ||
595 | |||
596 | return 0; | ||
597 | } | ||
598 | |||
599 | /* | ||
600 | * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both | ||
601 | * opcodes are supposed to result in coprocessor unusable exceptions if | ||
602 | * executed on ll/sc-less processors. That's the theory. In practice a | ||
603 | * few processors such as NEC's VR4100 throw reserved instruction exceptions | ||
604 | * instead, so we're doing the emulation thing in both exception handlers. | ||
605 | */ | ||
606 | static int simulate_llsc(struct pt_regs *regs, unsigned int opcode) | ||
607 | { | ||
608 | if ((opcode & OPCODE) == LL) { | ||
609 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | ||
610 | 1, regs, 0); | ||
611 | return simulate_ll(regs, opcode); | ||
612 | } | ||
613 | if ((opcode & OPCODE) == SC) { | ||
614 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | ||
615 | 1, regs, 0); | ||
616 | return simulate_sc(regs, opcode); | ||
617 | } | ||
618 | |||
619 | return -1; /* Must be something else ... */ | ||
620 | } | ||
621 | |||
622 | /* | ||
623 | * Simulate trapping 'rdhwr' instructions to provide user accessible | ||
624 | * registers not implemented in hardware. | ||
625 | */ | ||
626 | static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt) | ||
627 | { | ||
628 | struct thread_info *ti = task_thread_info(current); | ||
629 | |||
630 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | ||
631 | 1, regs, 0); | ||
632 | switch (rd) { | ||
633 | case MIPS_HWR_CPUNUM: /* CPU number */ | ||
634 | regs->regs[rt] = smp_processor_id(); | ||
635 | return 0; | ||
636 | case MIPS_HWR_SYNCISTEP: /* SYNCI length */ | ||
637 | regs->regs[rt] = min(current_cpu_data.dcache.linesz, | ||
638 | current_cpu_data.icache.linesz); | ||
639 | return 0; | ||
640 | case MIPS_HWR_CC: /* Read count register */ | ||
641 | regs->regs[rt] = read_c0_count(); | ||
642 | return 0; | ||
643 | case MIPS_HWR_CCRES: /* Count register resolution */ | ||
644 | switch (current_cpu_type()) { | ||
645 | case CPU_20KC: | ||
646 | case CPU_25KF: | ||
647 | regs->regs[rt] = 1; | ||
648 | break; | ||
649 | default: | ||
650 | regs->regs[rt] = 2; | ||
651 | } | ||
652 | return 0; | ||
653 | case MIPS_HWR_ULR: /* Read UserLocal register */ | ||
654 | regs->regs[rt] = ti->tp_value; | ||
655 | return 0; | ||
656 | default: | ||
657 | return -1; | ||
658 | } | ||
659 | } | ||
660 | |||
661 | static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode) | ||
662 | { | ||
663 | if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) { | ||
664 | int rd = (opcode & RD) >> 11; | ||
665 | int rt = (opcode & RT) >> 16; | ||
666 | |||
667 | simulate_rdhwr(regs, rd, rt); | ||
668 | return 0; | ||
669 | } | ||
670 | |||
671 | /* Not ours. */ | ||
672 | return -1; | ||
673 | } | ||
674 | |||
675 | static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode) | ||
676 | { | ||
677 | if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) { | ||
678 | int rd = (opcode & MM_RS) >> 16; | ||
679 | int rt = (opcode & MM_RT) >> 21; | ||
680 | simulate_rdhwr(regs, rd, rt); | ||
681 | return 0; | ||
682 | } | ||
683 | |||
684 | /* Not ours. */ | ||
685 | return -1; | ||
686 | } | ||
687 | |||
688 | static int simulate_sync(struct pt_regs *regs, unsigned int opcode) | ||
689 | { | ||
690 | if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) { | ||
691 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, | ||
692 | 1, regs, 0); | ||
693 | return 0; | ||
694 | } | ||
695 | |||
696 | return -1; /* Must be something else ... */ | ||
697 | } | ||
698 | |||
699 | /* | ||
700 | * Loongson-3 CSR instructions emulation | ||
701 | */ | ||
702 | |||
703 | #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION | ||
704 | |||
705 | #define LWC2 0xc8000000 | ||
706 | #define RS BASE | ||
707 | #define CSR_OPCODE2 0x00000118 | ||
708 | #define CSR_OPCODE2_MASK 0x000007ff | ||
709 | #define CSR_FUNC_MASK RT | ||
710 | #define CSR_FUNC_CPUCFG 0x8 | ||
711 | |||
712 | static int simulate_loongson3_cpucfg(struct pt_regs *regs, | ||
713 | unsigned int opcode) | ||
714 | { | ||
715 | int op = opcode & OPCODE; | ||
716 | int op2 = opcode & CSR_OPCODE2_MASK; | ||
717 | int csr_func = (opcode & CSR_FUNC_MASK) >> 16; | ||
718 | |||
719 | if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) { | ||
720 | int rd = (opcode & RD) >> 11; | ||
721 | int rs = (opcode & RS) >> 21; | ||
722 | __u64 sel = regs->regs[rs]; | ||
723 | |||
724 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | ||
725 | |||
726 | /* Do not emulate on unsupported core models. */ | ||
727 | preempt_disable(); | ||
728 | if (!loongson3_cpucfg_emulation_enabled(¤t_cpu_data)) { | ||
729 | preempt_enable(); | ||
730 | return -1; | ||
731 | } | ||
732 | regs->regs[rd] = loongson3_cpucfg_read_synthesized( | ||
733 | ¤t_cpu_data, sel); | ||
734 | preempt_enable(); | ||
735 | return 0; | ||
736 | } | ||
737 | |||
738 | /* Not ours. */ | ||
739 | return -1; | ||
740 | } | ||
741 | #endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */ | ||
742 | |||
743 | asmlinkage void do_ov(struct pt_regs *regs) | ||
744 | { | ||
745 | enum ctx_state prev_state; | ||
746 | |||
747 | prev_state = exception_enter(); | ||
748 | die_if_kernel("Integer overflow", regs); | ||
749 | |||
750 | force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc); | ||
751 | exception_exit(prev_state); | ||
752 | } | ||
753 | |||
754 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
755 | |||
756 | /* | ||
757 | * Send SIGFPE according to FCSR Cause bits, which must have already | ||
758 | * been masked against Enable bits. This is impotant as Inexact can | ||
759 | * happen together with Overflow or Underflow, and `ptrace' can set | ||
760 | * any bits. | ||
761 | */ | ||
762 | void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr, | ||
763 | struct task_struct *tsk) | ||
764 | { | ||
765 | int si_code = FPE_FLTUNK; | ||
766 | |||
767 | if (fcr31 & FPU_CSR_INV_X) | ||
768 | si_code = FPE_FLTINV; | ||
769 | else if (fcr31 & FPU_CSR_DIV_X) | ||
770 | si_code = FPE_FLTDIV; | ||
771 | else if (fcr31 & FPU_CSR_OVF_X) | ||
772 | si_code = FPE_FLTOVF; | ||
773 | else if (fcr31 & FPU_CSR_UDF_X) | ||
774 | si_code = FPE_FLTUND; | ||
775 | else if (fcr31 & FPU_CSR_INE_X) | ||
776 | si_code = FPE_FLTRES; | ||
777 | |||
778 | force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk); | ||
779 | } | ||
780 | |||
781 | int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31) | ||
782 | { | ||
783 | int si_code; | ||
784 | struct vm_area_struct *vma; | ||
785 | |||
786 | switch (sig) { | ||
787 | case 0: | ||
788 | return 0; | ||
789 | |||
790 | case SIGFPE: | ||
791 | force_fcr31_sig(fcr31, fault_addr, current); | ||
792 | return 1; | ||
793 | |||
794 | case SIGBUS: | ||
795 | force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr); | ||
796 | return 1; | ||
797 | |||
798 | case SIGSEGV: | ||
799 | mmap_read_lock(current->mm); | ||
800 | vma = find_vma(current->mm, (unsigned long)fault_addr); | ||
801 | if (vma && (vma->vm_start <= (unsigned long)fault_addr)) | ||
802 | si_code = SEGV_ACCERR; | ||
803 | else | ||
804 | si_code = SEGV_MAPERR; | ||
805 | mmap_read_unlock(current->mm); | ||
806 | force_sig_fault(SIGSEGV, si_code, fault_addr); | ||
807 | return 1; | ||
808 | |||
809 | default: | ||
810 | force_sig(sig); | ||
811 | return 1; | ||
812 | } | ||
813 | } | ||
814 | |||
815 | static int simulate_fp(struct pt_regs *regs, unsigned int opcode, | ||
816 | unsigned long old_epc, unsigned long old_ra) | ||
817 | { | ||
818 | union mips_instruction inst = { .word = opcode }; | ||
819 | void __user *fault_addr; | ||
820 | unsigned long fcr31; | ||
821 | int sig; | ||
822 | |||
823 | /* If it's obviously not an FP instruction, skip it */ | ||
824 | switch (inst.i_format.opcode) { | ||
825 | case cop1_op: | ||
826 | case cop1x_op: | ||
827 | case lwc1_op: | ||
828 | case ldc1_op: | ||
829 | case swc1_op: | ||
830 | case sdc1_op: | ||
831 | break; | ||
832 | |||
833 | default: | ||
834 | return -1; | ||
835 | } | ||
836 | |||
837 | /* | ||
838 | * do_ri skipped over the instruction via compute_return_epc, undo | ||
839 | * that for the FPU emulator. | ||
840 | */ | ||
841 | regs->cp0_epc = old_epc; | ||
842 | regs->regs[31] = old_ra; | ||
843 | |||
844 | /* Run the emulator */ | ||
845 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | ||
846 | &fault_addr); | ||
847 | |||
848 | /* | ||
849 | * We can't allow the emulated instruction to leave any | ||
850 | * enabled Cause bits set in $fcr31. | ||
851 | */ | ||
852 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); | ||
853 | current->thread.fpu.fcr31 &= ~fcr31; | ||
854 | |||
855 | /* Restore the hardware register state */ | ||
856 | own_fpu(1); | ||
857 | |||
858 | /* Send a signal if required. */ | ||
859 | process_fpemu_return(sig, fault_addr, fcr31); | ||
860 | |||
861 | return 0; | ||
862 | } | ||
863 | |||
864 | /* | ||
865 | * XXX Delayed fp exceptions when doing a lazy ctx switch XXX | ||
866 | */ | ||
867 | asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31) | ||
868 | { | ||
869 | enum ctx_state prev_state; | ||
870 | void __user *fault_addr; | ||
871 | int sig; | ||
872 | |||
873 | prev_state = exception_enter(); | ||
874 | if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr, | ||
875 | SIGFPE) == NOTIFY_STOP) | ||
876 | goto out; | ||
877 | |||
878 | /* Clear FCSR.Cause before enabling interrupts */ | ||
879 | write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31)); | ||
880 | local_irq_enable(); | ||
881 | |||
882 | die_if_kernel("FP exception in kernel code", regs); | ||
883 | |||
884 | if (fcr31 & FPU_CSR_UNI_X) { | ||
885 | /* | ||
886 | * Unimplemented operation exception. If we've got the full | ||
887 | * software emulator on-board, let's use it... | ||
888 | * | ||
889 | * Force FPU to dump state into task/thread context. We're | ||
890 | * moving a lot of data here for what is probably a single | ||
891 | * instruction, but the alternative is to pre-decode the FP | ||
892 | * register operands before invoking the emulator, which seems | ||
893 | * a bit extreme for what should be an infrequent event. | ||
894 | */ | ||
895 | |||
896 | /* Run the emulator */ | ||
897 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | ||
898 | &fault_addr); | ||
899 | |||
900 | /* | ||
901 | * We can't allow the emulated instruction to leave any | ||
902 | * enabled Cause bits set in $fcr31. | ||
903 | */ | ||
904 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); | ||
905 | current->thread.fpu.fcr31 &= ~fcr31; | ||
906 | |||
907 | /* Restore the hardware register state */ | ||
908 | own_fpu(1); /* Using the FPU again. */ | ||
909 | } else { | ||
910 | sig = SIGFPE; | ||
911 | fault_addr = (void __user *) regs->cp0_epc; | ||
912 | } | ||
913 | |||
914 | /* Send a signal if required. */ | ||
915 | process_fpemu_return(sig, fault_addr, fcr31); | ||
916 | |||
917 | out: | ||
918 | exception_exit(prev_state); | ||
919 | } | ||
920 | |||
921 | /* | ||
922 | * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've | ||
923 | * emulated more than some threshold number of instructions, force migration to | ||
924 | * a "CPU" that has FP support. | ||
925 | */ | ||
926 | static void mt_ase_fp_affinity(void) | ||
927 | { | ||
928 | #ifdef CONFIG_MIPS_MT_FPAFF | ||
929 | if (mt_fpemul_threshold > 0 && | ||
930 | ((current->thread.emulated_fp++ > mt_fpemul_threshold))) { | ||
931 | /* | ||
932 | * If there's no FPU present, or if the application has already | ||
933 | * restricted the allowed set to exclude any CPUs with FPUs, | ||
934 | * we'll skip the procedure. | ||
935 | */ | ||
936 | if (cpumask_intersects(¤t->cpus_mask, &mt_fpu_cpumask)) { | ||
937 | cpumask_t tmask; | ||
938 | |||
939 | current->thread.user_cpus_allowed | ||
940 | = current->cpus_mask; | ||
941 | cpumask_and(&tmask, ¤t->cpus_mask, | ||
942 | &mt_fpu_cpumask); | ||
943 | set_cpus_allowed_ptr(current, &tmask); | ||
944 | set_thread_flag(TIF_FPUBOUND); | ||
945 | } | ||
946 | } | ||
947 | #endif /* CONFIG_MIPS_MT_FPAFF */ | ||
948 | } | ||
949 | |||
950 | #else /* !CONFIG_MIPS_FP_SUPPORT */ | ||
951 | |||
952 | static int simulate_fp(struct pt_regs *regs, unsigned int opcode, | ||
953 | unsigned long old_epc, unsigned long old_ra) | ||
954 | { | ||
955 | return -1; | ||
956 | } | ||
957 | |||
958 | #endif /* !CONFIG_MIPS_FP_SUPPORT */ | ||
959 | |||
960 | void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code, | ||
961 | const char *str) | ||
962 | { | ||
963 | char b[40]; | ||
964 | |||
965 | #ifdef CONFIG_KGDB_LOW_LEVEL_TRAP | ||
966 | if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr, | ||
967 | SIGTRAP) == NOTIFY_STOP) | ||
968 | return; | ||
969 | #endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */ | ||
970 | |||
971 | if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr, | ||
972 | SIGTRAP) == NOTIFY_STOP) | ||
973 | return; | ||
974 | |||
975 | /* | ||
976 | * A short test says that IRIX 5.3 sends SIGTRAP for all trap | ||
977 | * insns, even for trap and break codes that indicate arithmetic | ||
978 | * failures. Weird ... | ||
979 | * But should we continue the brokenness??? --macro | ||
980 | */ | ||
981 | switch (code) { | ||
982 | case BRK_OVERFLOW: | ||
983 | case BRK_DIVZERO: | ||
984 | scnprintf(b, sizeof(b), "%s instruction in kernel code", str); | ||
985 | die_if_kernel(b, regs); | ||
986 | force_sig_fault(SIGFPE, | ||
987 | code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF, | ||
988 | (void __user *) regs->cp0_epc); | ||
989 | break; | ||
990 | case BRK_BUG: | ||
991 | die_if_kernel("Kernel bug detected", regs); | ||
992 | force_sig(SIGTRAP); | ||
993 | break; | ||
994 | case BRK_MEMU: | ||
995 | /* | ||
996 | * This breakpoint code is used by the FPU emulator to retake | ||
997 | * control of the CPU after executing the instruction from the | ||
998 | * delay slot of an emulated branch. | ||
999 | * | ||
1000 | * Terminate if exception was recognized as a delay slot return | ||
1001 | * otherwise handle as normal. | ||
1002 | */ | ||
1003 | if (do_dsemulret(regs)) | ||
1004 | return; | ||
1005 | |||
1006 | die_if_kernel("Math emu break/trap", regs); | ||
1007 | force_sig(SIGTRAP); | ||
1008 | break; | ||
1009 | default: | ||
1010 | scnprintf(b, sizeof(b), "%s instruction in kernel code", str); | ||
1011 | die_if_kernel(b, regs); | ||
1012 | if (si_code) { | ||
1013 | force_sig_fault(SIGTRAP, si_code, NULL); | ||
1014 | } else { | ||
1015 | force_sig(SIGTRAP); | ||
1016 | } | ||
1017 | } | ||
1018 | } | ||
1019 | |||
1020 | asmlinkage void do_bp(struct pt_regs *regs) | ||
1021 | { | ||
1022 | unsigned long epc = msk_isa16_mode(exception_epc(regs)); | ||
1023 | unsigned int opcode, bcode; | ||
1024 | enum ctx_state prev_state; | ||
1025 | mm_segment_t seg; | ||
1026 | |||
1027 | seg = get_fs(); | ||
1028 | if (!user_mode(regs)) | ||
1029 | set_fs(KERNEL_DS); | ||
1030 | |||
1031 | prev_state = exception_enter(); | ||
1032 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; | ||
1033 | if (get_isa16_mode(regs->cp0_epc)) { | ||
1034 | u16 instr[2]; | ||
1035 | |||
1036 | if (__get_user(instr[0], (u16 __user *)epc)) | ||
1037 | goto out_sigsegv; | ||
1038 | |||
1039 | if (!cpu_has_mmips) { | ||
1040 | /* MIPS16e mode */ | ||
1041 | bcode = (instr[0] >> 5) & 0x3f; | ||
1042 | } else if (mm_insn_16bit(instr[0])) { | ||
1043 | /* 16-bit microMIPS BREAK */ | ||
1044 | bcode = instr[0] & 0xf; | ||
1045 | } else { | ||
1046 | /* 32-bit microMIPS BREAK */ | ||
1047 | if (__get_user(instr[1], (u16 __user *)(epc + 2))) | ||
1048 | goto out_sigsegv; | ||
1049 | opcode = (instr[0] << 16) | instr[1]; | ||
1050 | bcode = (opcode >> 6) & ((1 << 20) - 1); | ||
1051 | } | ||
1052 | } else { | ||
1053 | if (__get_user(opcode, (unsigned int __user *)epc)) | ||
1054 | goto out_sigsegv; | ||
1055 | bcode = (opcode >> 6) & ((1 << 20) - 1); | ||
1056 | } | ||
1057 | |||
1058 | /* | ||
1059 | * There is the ancient bug in the MIPS assemblers that the break | ||
1060 | * code starts left to bit 16 instead to bit 6 in the opcode. | ||
1061 | * Gas is bug-compatible, but not always, grrr... | ||
1062 | * We handle both cases with a simple heuristics. --macro | ||
1063 | */ | ||
1064 | if (bcode >= (1 << 10)) | ||
1065 | bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10); | ||
1066 | |||
1067 | /* | ||
1068 | * notify the kprobe handlers, if instruction is likely to | ||
1069 | * pertain to them. | ||
1070 | */ | ||
1071 | switch (bcode) { | ||
1072 | case BRK_UPROBE: | ||
1073 | if (notify_die(DIE_UPROBE, "uprobe", regs, bcode, | ||
1074 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) | ||
1075 | goto out; | ||
1076 | else | ||
1077 | break; | ||
1078 | case BRK_UPROBE_XOL: | ||
1079 | if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode, | ||
1080 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) | ||
1081 | goto out; | ||
1082 | else | ||
1083 | break; | ||
1084 | case BRK_KPROBE_BP: | ||
1085 | if (notify_die(DIE_BREAK, "debug", regs, bcode, | ||
1086 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) | ||
1087 | goto out; | ||
1088 | else | ||
1089 | break; | ||
1090 | case BRK_KPROBE_SSTEPBP: | ||
1091 | if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode, | ||
1092 | current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP) | ||
1093 | goto out; | ||
1094 | else | ||
1095 | break; | ||
1096 | default: | ||
1097 | break; | ||
1098 | } | ||
1099 | |||
1100 | do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break"); | ||
1101 | |||
1102 | out: | ||
1103 | set_fs(seg); | ||
1104 | exception_exit(prev_state); | ||
1105 | return; | ||
1106 | |||
1107 | out_sigsegv: | ||
1108 | force_sig(SIGSEGV); | ||
1109 | goto out; | ||
1110 | } | ||
1111 | |||
1112 | asmlinkage void do_tr(struct pt_regs *regs) | ||
1113 | { | ||
1114 | u32 opcode, tcode = 0; | ||
1115 | enum ctx_state prev_state; | ||
1116 | u16 instr[2]; | ||
1117 | mm_segment_t seg; | ||
1118 | unsigned long epc = msk_isa16_mode(exception_epc(regs)); | ||
1119 | |||
1120 | seg = get_fs(); | ||
1121 | if (!user_mode(regs)) | ||
1122 | set_fs(KERNEL_DS); | ||
1123 | |||
1124 | prev_state = exception_enter(); | ||
1125 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; | ||
1126 | if (get_isa16_mode(regs->cp0_epc)) { | ||
1127 | if (__get_user(instr[0], (u16 __user *)(epc + 0)) || | ||
1128 | __get_user(instr[1], (u16 __user *)(epc + 2))) | ||
1129 | goto out_sigsegv; | ||
1130 | opcode = (instr[0] << 16) | instr[1]; | ||
1131 | /* Immediate versions don't provide a code. */ | ||
1132 | if (!(opcode & OPCODE)) | ||
1133 | tcode = (opcode >> 12) & ((1 << 4) - 1); | ||
1134 | } else { | ||
1135 | if (__get_user(opcode, (u32 __user *)epc)) | ||
1136 | goto out_sigsegv; | ||
1137 | /* Immediate versions don't provide a code. */ | ||
1138 | if (!(opcode & OPCODE)) | ||
1139 | tcode = (opcode >> 6) & ((1 << 10) - 1); | ||
1140 | } | ||
1141 | |||
1142 | do_trap_or_bp(regs, tcode, 0, "Trap"); | ||
1143 | |||
1144 | out: | ||
1145 | set_fs(seg); | ||
1146 | exception_exit(prev_state); | ||
1147 | return; | ||
1148 | |||
1149 | out_sigsegv: | ||
1150 | force_sig(SIGSEGV); | ||
1151 | goto out; | ||
1152 | } | ||
1153 | |||
1154 | asmlinkage void do_ri(struct pt_regs *regs) | ||
1155 | { | ||
1156 | unsigned int __user *epc = (unsigned int __user *)exception_epc(regs); | ||
1157 | unsigned long old_epc = regs->cp0_epc; | ||
1158 | unsigned long old31 = regs->regs[31]; | ||
1159 | enum ctx_state prev_state; | ||
1160 | unsigned int opcode = 0; | ||
1161 | int status = -1; | ||
1162 | |||
1163 | /* | ||
1164 | * Avoid any kernel code. Just emulate the R2 instruction | ||
1165 | * as quickly as possible. | ||
1166 | */ | ||
1167 | if (mipsr2_emulation && cpu_has_mips_r6 && | ||
1168 | likely(user_mode(regs)) && | ||
1169 | likely(get_user(opcode, epc) >= 0)) { | ||
1170 | unsigned long fcr31 = 0; | ||
1171 | |||
1172 | status = mipsr2_decoder(regs, opcode, &fcr31); | ||
1173 | switch (status) { | ||
1174 | case 0: | ||
1175 | case SIGEMT: | ||
1176 | return; | ||
1177 | case SIGILL: | ||
1178 | goto no_r2_instr; | ||
1179 | default: | ||
1180 | process_fpemu_return(status, | ||
1181 | ¤t->thread.cp0_baduaddr, | ||
1182 | fcr31); | ||
1183 | return; | ||
1184 | } | ||
1185 | } | ||
1186 | |||
1187 | no_r2_instr: | ||
1188 | |||
1189 | prev_state = exception_enter(); | ||
1190 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; | ||
1191 | |||
1192 | if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr, | ||
1193 | SIGILL) == NOTIFY_STOP) | ||
1194 | goto out; | ||
1195 | |||
1196 | die_if_kernel("Reserved instruction in kernel code", regs); | ||
1197 | |||
1198 | if (unlikely(compute_return_epc(regs) < 0)) | ||
1199 | goto out; | ||
1200 | |||
1201 | if (!get_isa16_mode(regs->cp0_epc)) { | ||
1202 | if (unlikely(get_user(opcode, epc) < 0)) | ||
1203 | status = SIGSEGV; | ||
1204 | |||
1205 | if (!cpu_has_llsc && status < 0) | ||
1206 | status = simulate_llsc(regs, opcode); | ||
1207 | |||
1208 | if (status < 0) | ||
1209 | status = simulate_rdhwr_normal(regs, opcode); | ||
1210 | |||
1211 | if (status < 0) | ||
1212 | status = simulate_sync(regs, opcode); | ||
1213 | |||
1214 | if (status < 0) | ||
1215 | status = simulate_fp(regs, opcode, old_epc, old31); | ||
1216 | |||
1217 | #ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION | ||
1218 | if (status < 0) | ||
1219 | status = simulate_loongson3_cpucfg(regs, opcode); | ||
1220 | #endif | ||
1221 | } else if (cpu_has_mmips) { | ||
1222 | unsigned short mmop[2] = { 0 }; | ||
1223 | |||
1224 | if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0)) | ||
1225 | status = SIGSEGV; | ||
1226 | if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0)) | ||
1227 | status = SIGSEGV; | ||
1228 | opcode = mmop[0]; | ||
1229 | opcode = (opcode << 16) | mmop[1]; | ||
1230 | |||
1231 | if (status < 0) | ||
1232 | status = simulate_rdhwr_mm(regs, opcode); | ||
1233 | } | ||
1234 | |||
1235 | if (status < 0) | ||
1236 | status = SIGILL; | ||
1237 | |||
1238 | if (unlikely(status > 0)) { | ||
1239 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | ||
1240 | regs->regs[31] = old31; | ||
1241 | force_sig(status); | ||
1242 | } | ||
1243 | |||
1244 | out: | ||
1245 | exception_exit(prev_state); | ||
1246 | } | ||
1247 | |||
1248 | /* | ||
1249 | * No lock; only written during early bootup by CPU 0. | ||
1250 | */ | ||
1251 | static RAW_NOTIFIER_HEAD(cu2_chain); | ||
1252 | |||
1253 | int __ref register_cu2_notifier(struct notifier_block *nb) | ||
1254 | { | ||
1255 | return raw_notifier_chain_register(&cu2_chain, nb); | ||
1256 | } | ||
1257 | |||
1258 | int cu2_notifier_call_chain(unsigned long val, void *v) | ||
1259 | { | ||
1260 | return raw_notifier_call_chain(&cu2_chain, val, v); | ||
1261 | } | ||
1262 | |||
1263 | static int default_cu2_call(struct notifier_block *nfb, unsigned long action, | ||
1264 | void *data) | ||
1265 | { | ||
1266 | struct pt_regs *regs = data; | ||
1267 | |||
1268 | die_if_kernel("COP2: Unhandled kernel unaligned access or invalid " | ||
1269 | "instruction", regs); | ||
1270 | force_sig(SIGILL); | ||
1271 | |||
1272 | return NOTIFY_OK; | ||
1273 | } | ||
1274 | |||
1275 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
1276 | |||
1277 | static int enable_restore_fp_context(int msa) | ||
1278 | { | ||
1279 | int err, was_fpu_owner, prior_msa; | ||
1280 | bool first_fp; | ||
1281 | |||
1282 | /* Initialize context if it hasn't been used already */ | ||
1283 | first_fp = init_fp_ctx(current); | ||
1284 | |||
1285 | if (first_fp) { | ||
1286 | preempt_disable(); | ||
1287 | err = own_fpu_inatomic(1); | ||
1288 | if (msa && !err) { | ||
1289 | enable_msa(); | ||
1290 | /* | ||
1291 | * with MSA enabled, userspace can see MSACSR | ||
1292 | * and MSA regs, but the values in them are from | ||
1293 | * other task before current task, restore them | ||
1294 | * from saved fp/msa context | ||
1295 | */ | ||
1296 | write_msa_csr(current->thread.fpu.msacsr); | ||
1297 | /* | ||
1298 | * own_fpu_inatomic(1) just restore low 64bit, | ||
1299 | * fix the high 64bit | ||
1300 | */ | ||
1301 | init_msa_upper(); | ||
1302 | set_thread_flag(TIF_USEDMSA); | ||
1303 | set_thread_flag(TIF_MSA_CTX_LIVE); | ||
1304 | } | ||
1305 | preempt_enable(); | ||
1306 | return err; | ||
1307 | } | ||
1308 | |||
1309 | /* | ||
1310 | * This task has formerly used the FP context. | ||
1311 | * | ||
1312 | * If this thread has no live MSA vector context then we can simply | ||
1313 | * restore the scalar FP context. If it has live MSA vector context | ||
1314 | * (that is, it has or may have used MSA since last performing a | ||
1315 | * function call) then we'll need to restore the vector context. This | ||
1316 | * applies even if we're currently only executing a scalar FP | ||
1317 | * instruction. This is because if we were to later execute an MSA | ||
1318 | * instruction then we'd either have to: | ||
1319 | * | ||
1320 | * - Restore the vector context & clobber any registers modified by | ||
1321 | * scalar FP instructions between now & then. | ||
1322 | * | ||
1323 | * or | ||
1324 | * | ||
1325 | * - Not restore the vector context & lose the most significant bits | ||
1326 | * of all vector registers. | ||
1327 | * | ||
1328 | * Neither of those options is acceptable. We cannot restore the least | ||
1329 | * significant bits of the registers now & only restore the most | ||
1330 | * significant bits later because the most significant bits of any | ||
1331 | * vector registers whose aliased FP register is modified now will have | ||
1332 | * been zeroed. We'd have no way to know that when restoring the vector | ||
1333 | * context & thus may load an outdated value for the most significant | ||
1334 | * bits of a vector register. | ||
1335 | */ | ||
1336 | if (!msa && !thread_msa_context_live()) | ||
1337 | return own_fpu(1); | ||
1338 | |||
1339 | /* | ||
1340 | * This task is using or has previously used MSA. Thus we require | ||
1341 | * that Status.FR == 1. | ||
1342 | */ | ||
1343 | preempt_disable(); | ||
1344 | was_fpu_owner = is_fpu_owner(); | ||
1345 | err = own_fpu_inatomic(0); | ||
1346 | if (err) | ||
1347 | goto out; | ||
1348 | |||
1349 | enable_msa(); | ||
1350 | write_msa_csr(current->thread.fpu.msacsr); | ||
1351 | set_thread_flag(TIF_USEDMSA); | ||
1352 | |||
1353 | /* | ||
1354 | * If this is the first time that the task is using MSA and it has | ||
1355 | * previously used scalar FP in this time slice then we already nave | ||
1356 | * FP context which we shouldn't clobber. We do however need to clear | ||
1357 | * the upper 64b of each vector register so that this task has no | ||
1358 | * opportunity to see data left behind by another. | ||
1359 | */ | ||
1360 | prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE); | ||
1361 | if (!prior_msa && was_fpu_owner) { | ||
1362 | init_msa_upper(); | ||
1363 | |||
1364 | goto out; | ||
1365 | } | ||
1366 | |||
1367 | if (!prior_msa) { | ||
1368 | /* | ||
1369 | * Restore the least significant 64b of each vector register | ||
1370 | * from the existing scalar FP context. | ||
1371 | */ | ||
1372 | _restore_fp(current); | ||
1373 | |||
1374 | /* | ||
1375 | * The task has not formerly used MSA, so clear the upper 64b | ||
1376 | * of each vector register such that it cannot see data left | ||
1377 | * behind by another task. | ||
1378 | */ | ||
1379 | init_msa_upper(); | ||
1380 | } else { | ||
1381 | /* We need to restore the vector context. */ | ||
1382 | restore_msa(current); | ||
1383 | |||
1384 | /* Restore the scalar FP control & status register */ | ||
1385 | if (!was_fpu_owner) | ||
1386 | write_32bit_cp1_register(CP1_STATUS, | ||
1387 | current->thread.fpu.fcr31); | ||
1388 | } | ||
1389 | |||
1390 | out: | ||
1391 | preempt_enable(); | ||
1392 | |||
1393 | return 0; | ||
1394 | } | ||
1395 | |||
1396 | #else /* !CONFIG_MIPS_FP_SUPPORT */ | ||
1397 | |||
1398 | static int enable_restore_fp_context(int msa) | ||
1399 | { | ||
1400 | return SIGILL; | ||
1401 | } | ||
1402 | |||
1403 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
1404 | |||
1405 | asmlinkage void do_cpu(struct pt_regs *regs) | ||
1406 | { | ||
1407 | enum ctx_state prev_state; | ||
1408 | unsigned int __user *epc; | ||
1409 | unsigned long old_epc, old31; | ||
1410 | unsigned int opcode; | ||
1411 | unsigned int cpid; | ||
1412 | int status; | ||
1413 | |||
1414 | prev_state = exception_enter(); | ||
1415 | cpid = (regs->cp0_cause >> CAUSEB_CE) & 3; | ||
1416 | |||
1417 | if (cpid != 2) | ||
1418 | die_if_kernel("do_cpu invoked from kernel context!", regs); | ||
1419 | |||
1420 | switch (cpid) { | ||
1421 | case 0: | ||
1422 | epc = (unsigned int __user *)exception_epc(regs); | ||
1423 | old_epc = regs->cp0_epc; | ||
1424 | old31 = regs->regs[31]; | ||
1425 | opcode = 0; | ||
1426 | status = -1; | ||
1427 | |||
1428 | if (unlikely(compute_return_epc(regs) < 0)) | ||
1429 | break; | ||
1430 | |||
1431 | if (!get_isa16_mode(regs->cp0_epc)) { | ||
1432 | if (unlikely(get_user(opcode, epc) < 0)) | ||
1433 | status = SIGSEGV; | ||
1434 | |||
1435 | if (!cpu_has_llsc && status < 0) | ||
1436 | status = simulate_llsc(regs, opcode); | ||
1437 | } | ||
1438 | |||
1439 | if (status < 0) | ||
1440 | status = SIGILL; | ||
1441 | |||
1442 | if (unlikely(status > 0)) { | ||
1443 | regs->cp0_epc = old_epc; /* Undo skip-over. */ | ||
1444 | regs->regs[31] = old31; | ||
1445 | force_sig(status); | ||
1446 | } | ||
1447 | |||
1448 | break; | ||
1449 | |||
1450 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
1451 | case 3: | ||
1452 | /* | ||
1453 | * The COP3 opcode space and consequently the CP0.Status.CU3 | ||
1454 | * bit and the CP0.Cause.CE=3 encoding have been removed as | ||
1455 | * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs | ||
1456 | * up the space has been reused for COP1X instructions, that | ||
1457 | * are enabled by the CP0.Status.CU1 bit and consequently | ||
1458 | * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable | ||
1459 | * exceptions. Some FPU-less processors that implement one | ||
1460 | * of these ISAs however use this code erroneously for COP1X | ||
1461 | * instructions. Therefore we redirect this trap to the FP | ||
1462 | * emulator too. | ||
1463 | */ | ||
1464 | if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) { | ||
1465 | force_sig(SIGILL); | ||
1466 | break; | ||
1467 | } | ||
1468 | fallthrough; | ||
1469 | case 1: { | ||
1470 | void __user *fault_addr; | ||
1471 | unsigned long fcr31; | ||
1472 | int err, sig; | ||
1473 | |||
1474 | err = enable_restore_fp_context(0); | ||
1475 | |||
1476 | if (raw_cpu_has_fpu && !err) | ||
1477 | break; | ||
1478 | |||
1479 | sig = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 0, | ||
1480 | &fault_addr); | ||
1481 | |||
1482 | /* | ||
1483 | * We can't allow the emulated instruction to leave | ||
1484 | * any enabled Cause bits set in $fcr31. | ||
1485 | */ | ||
1486 | fcr31 = mask_fcr31_x(current->thread.fpu.fcr31); | ||
1487 | current->thread.fpu.fcr31 &= ~fcr31; | ||
1488 | |||
1489 | /* Send a signal if required. */ | ||
1490 | if (!process_fpemu_return(sig, fault_addr, fcr31) && !err) | ||
1491 | mt_ase_fp_affinity(); | ||
1492 | |||
1493 | break; | ||
1494 | } | ||
1495 | #else /* CONFIG_MIPS_FP_SUPPORT */ | ||
1496 | case 1: | ||
1497 | case 3: | ||
1498 | force_sig(SIGILL); | ||
1499 | break; | ||
1500 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
1501 | |||
1502 | case 2: | ||
1503 | raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs); | ||
1504 | break; | ||
1505 | } | ||
1506 | |||
1507 | exception_exit(prev_state); | ||
1508 | } | ||
1509 | |||
1510 | asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr) | ||
1511 | { | ||
1512 | enum ctx_state prev_state; | ||
1513 | |||
1514 | prev_state = exception_enter(); | ||
1515 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; | ||
1516 | if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0, | ||
1517 | current->thread.trap_nr, SIGFPE) == NOTIFY_STOP) | ||
1518 | goto out; | ||
1519 | |||
1520 | /* Clear MSACSR.Cause before enabling interrupts */ | ||
1521 | write_msa_csr(msacsr & ~MSA_CSR_CAUSEF); | ||
1522 | local_irq_enable(); | ||
1523 | |||
1524 | die_if_kernel("do_msa_fpe invoked from kernel context!", regs); | ||
1525 | force_sig(SIGFPE); | ||
1526 | out: | ||
1527 | exception_exit(prev_state); | ||
1528 | } | ||
1529 | |||
1530 | asmlinkage void do_msa(struct pt_regs *regs) | ||
1531 | { | ||
1532 | enum ctx_state prev_state; | ||
1533 | int err; | ||
1534 | |||
1535 | prev_state = exception_enter(); | ||
1536 | |||
1537 | if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) { | ||
1538 | force_sig(SIGILL); | ||
1539 | goto out; | ||
1540 | } | ||
1541 | |||
1542 | die_if_kernel("do_msa invoked from kernel context!", regs); | ||
1543 | |||
1544 | err = enable_restore_fp_context(1); | ||
1545 | if (err) | ||
1546 | force_sig(SIGILL); | ||
1547 | out: | ||
1548 | exception_exit(prev_state); | ||
1549 | } | ||
1550 | |||
1551 | asmlinkage void do_mdmx(struct pt_regs *regs) | ||
1552 | { | ||
1553 | enum ctx_state prev_state; | ||
1554 | |||
1555 | prev_state = exception_enter(); | ||
1556 | force_sig(SIGILL); | ||
1557 | exception_exit(prev_state); | ||
1558 | } | ||
1559 | |||
1560 | /* | ||
1561 | * Called with interrupts disabled. | ||
1562 | */ | ||
1563 | asmlinkage void do_watch(struct pt_regs *regs) | ||
1564 | { | ||
1565 | enum ctx_state prev_state; | ||
1566 | |||
1567 | prev_state = exception_enter(); | ||
1568 | /* | ||
1569 | * Clear WP (bit 22) bit of cause register so we don't loop | ||
1570 | * forever. | ||
1571 | */ | ||
1572 | clear_c0_cause(CAUSEF_WP); | ||
1573 | |||
1574 | /* | ||
1575 | * If the current thread has the watch registers loaded, save | ||
1576 | * their values and send SIGTRAP. Otherwise another thread | ||
1577 | * left the registers set, clear them and continue. | ||
1578 | */ | ||
1579 | if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) { | ||
1580 | mips_read_watch_registers(); | ||
1581 | local_irq_enable(); | ||
1582 | force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL); | ||
1583 | } else { | ||
1584 | mips_clear_watch_registers(); | ||
1585 | local_irq_enable(); | ||
1586 | } | ||
1587 | exception_exit(prev_state); | ||
1588 | } | ||
1589 | |||
1590 | asmlinkage void do_mcheck(struct pt_regs *regs) | ||
1591 | { | ||
1592 | int multi_match = regs->cp0_status & ST0_TS; | ||
1593 | enum ctx_state prev_state; | ||
1594 | mm_segment_t old_fs = get_fs(); | ||
1595 | |||
1596 | prev_state = exception_enter(); | ||
1597 | show_regs(regs); | ||
1598 | |||
1599 | if (multi_match) { | ||
1600 | dump_tlb_regs(); | ||
1601 | pr_info("\n"); | ||
1602 | dump_tlb_all(); | ||
1603 | } | ||
1604 | |||
1605 | if (!user_mode(regs)) | ||
1606 | set_fs(KERNEL_DS); | ||
1607 | |||
1608 | show_code((unsigned int __user *) regs->cp0_epc); | ||
1609 | |||
1610 | set_fs(old_fs); | ||
1611 | |||
1612 | /* | ||
1613 | * Some chips may have other causes of machine check (e.g. SB1 | ||
1614 | * graduation timer) | ||
1615 | */ | ||
1616 | panic("Caught Machine Check exception - %scaused by multiple " | ||
1617 | "matching entries in the TLB.", | ||
1618 | (multi_match) ? "" : "not "); | ||
1619 | } | ||
1620 | |||
1621 | asmlinkage void do_mt(struct pt_regs *regs) | ||
1622 | { | ||
1623 | int subcode; | ||
1624 | |||
1625 | subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT) | ||
1626 | >> VPECONTROL_EXCPT_SHIFT; | ||
1627 | switch (subcode) { | ||
1628 | case 0: | ||
1629 | printk(KERN_DEBUG "Thread Underflow\n"); | ||
1630 | break; | ||
1631 | case 1: | ||
1632 | printk(KERN_DEBUG "Thread Overflow\n"); | ||
1633 | break; | ||
1634 | case 2: | ||
1635 | printk(KERN_DEBUG "Invalid YIELD Qualifier\n"); | ||
1636 | break; | ||
1637 | case 3: | ||
1638 | printk(KERN_DEBUG "Gating Storage Exception\n"); | ||
1639 | break; | ||
1640 | case 4: | ||
1641 | printk(KERN_DEBUG "YIELD Scheduler Exception\n"); | ||
1642 | break; | ||
1643 | case 5: | ||
1644 | printk(KERN_DEBUG "Gating Storage Scheduler Exception\n"); | ||
1645 | break; | ||
1646 | default: | ||
1647 | printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n", | ||
1648 | subcode); | ||
1649 | break; | ||
1650 | } | ||
1651 | die_if_kernel("MIPS MT Thread exception in kernel", regs); | ||
1652 | |||
1653 | force_sig(SIGILL); | ||
1654 | } | ||
1655 | |||
1656 | |||
1657 | asmlinkage void do_dsp(struct pt_regs *regs) | ||
1658 | { | ||
1659 | if (cpu_has_dsp) | ||
1660 | panic("Unexpected DSP exception"); | ||
1661 | |||
1662 | force_sig(SIGILL); | ||
1663 | } | ||
1664 | |||
1665 | asmlinkage void do_reserved(struct pt_regs *regs) | ||
1666 | { | ||
1667 | /* | ||
1668 | * Game over - no way to handle this if it ever occurs. Most probably | ||
1669 | * caused by a new unknown cpu type or after another deadly | ||
1670 | * hard/software error. | ||
1671 | */ | ||
1672 | show_regs(regs); | ||
1673 | panic("Caught reserved exception %ld - should not happen.", | ||
1674 | (regs->cp0_cause & 0x7f) >> 2); | ||
1675 | } | ||
1676 | |||
1677 | static int __initdata l1parity = 1; | ||
1678 | static int __init nol1parity(char *s) | ||
1679 | { | ||
1680 | l1parity = 0; | ||
1681 | return 1; | ||
1682 | } | ||
1683 | __setup("nol1par", nol1parity); | ||
1684 | static int __initdata l2parity = 1; | ||
1685 | static int __init nol2parity(char *s) | ||
1686 | { | ||
1687 | l2parity = 0; | ||
1688 | return 1; | ||
1689 | } | ||
1690 | __setup("nol2par", nol2parity); | ||
1691 | |||
1692 | /* | ||
1693 | * Some MIPS CPUs can enable/disable for cache parity detection, but do | ||
1694 | * it different ways. | ||
1695 | */ | ||
1696 | static inline __init void parity_protection_init(void) | ||
1697 | { | ||
1698 | #define ERRCTL_PE 0x80000000 | ||
1699 | #define ERRCTL_L2P 0x00800000 | ||
1700 | |||
1701 | if (mips_cm_revision() >= CM_REV_CM3) { | ||
1702 | ulong gcr_ectl, cp0_ectl; | ||
1703 | |||
1704 | /* | ||
1705 | * With CM3 systems we need to ensure that the L1 & L2 | ||
1706 | * parity enables are set to the same value, since this | ||
1707 | * is presumed by the hardware engineers. | ||
1708 | * | ||
1709 | * If the user disabled either of L1 or L2 ECC checking, | ||
1710 | * disable both. | ||
1711 | */ | ||
1712 | l1parity &= l2parity; | ||
1713 | l2parity &= l1parity; | ||
1714 | |||
1715 | /* Probe L1 ECC support */ | ||
1716 | cp0_ectl = read_c0_ecc(); | ||
1717 | write_c0_ecc(cp0_ectl | ERRCTL_PE); | ||
1718 | back_to_back_c0_hazard(); | ||
1719 | cp0_ectl = read_c0_ecc(); | ||
1720 | |||
1721 | /* Probe L2 ECC support */ | ||
1722 | gcr_ectl = read_gcr_err_control(); | ||
1723 | |||
1724 | if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) || | ||
1725 | !(cp0_ectl & ERRCTL_PE)) { | ||
1726 | /* | ||
1727 | * One of L1 or L2 ECC checking isn't supported, | ||
1728 | * so we cannot enable either. | ||
1729 | */ | ||
1730 | l1parity = l2parity = 0; | ||
1731 | } | ||
1732 | |||
1733 | /* Configure L1 ECC checking */ | ||
1734 | if (l1parity) | ||
1735 | cp0_ectl |= ERRCTL_PE; | ||
1736 | else | ||
1737 | cp0_ectl &= ~ERRCTL_PE; | ||
1738 | write_c0_ecc(cp0_ectl); | ||
1739 | back_to_back_c0_hazard(); | ||
1740 | WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity); | ||
1741 | |||
1742 | /* Configure L2 ECC checking */ | ||
1743 | if (l2parity) | ||
1744 | gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN; | ||
1745 | else | ||
1746 | gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN; | ||
1747 | write_gcr_err_control(gcr_ectl); | ||
1748 | gcr_ectl = read_gcr_err_control(); | ||
1749 | gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN; | ||
1750 | WARN_ON(!!gcr_ectl != l2parity); | ||
1751 | |||
1752 | pr_info("Cache parity protection %sabled\n", | ||
1753 | l1parity ? "en" : "dis"); | ||
1754 | return; | ||
1755 | } | ||
1756 | |||
1757 | switch (current_cpu_type()) { | ||
1758 | case CPU_24K: | ||
1759 | case CPU_34K: | ||
1760 | case CPU_74K: | ||
1761 | case CPU_1004K: | ||
1762 | case CPU_1074K: | ||
1763 | case CPU_INTERAPTIV: | ||
1764 | case CPU_PROAPTIV: | ||
1765 | case CPU_P5600: | ||
1766 | case CPU_QEMU_GENERIC: | ||
1767 | case CPU_P6600: | ||
1768 | { | ||
1769 | unsigned long errctl; | ||
1770 | unsigned int l1parity_present, l2parity_present; | ||
1771 | |||
1772 | errctl = read_c0_ecc(); | ||
1773 | errctl &= ~(ERRCTL_PE|ERRCTL_L2P); | ||
1774 | |||
1775 | /* probe L1 parity support */ | ||
1776 | write_c0_ecc(errctl | ERRCTL_PE); | ||
1777 | back_to_back_c0_hazard(); | ||
1778 | l1parity_present = (read_c0_ecc() & ERRCTL_PE); | ||
1779 | |||
1780 | /* probe L2 parity support */ | ||
1781 | write_c0_ecc(errctl|ERRCTL_L2P); | ||
1782 | back_to_back_c0_hazard(); | ||
1783 | l2parity_present = (read_c0_ecc() & ERRCTL_L2P); | ||
1784 | |||
1785 | if (l1parity_present && l2parity_present) { | ||
1786 | if (l1parity) | ||
1787 | errctl |= ERRCTL_PE; | ||
1788 | if (l1parity ^ l2parity) | ||
1789 | errctl |= ERRCTL_L2P; | ||
1790 | } else if (l1parity_present) { | ||
1791 | if (l1parity) | ||
1792 | errctl |= ERRCTL_PE; | ||
1793 | } else if (l2parity_present) { | ||
1794 | if (l2parity) | ||
1795 | errctl |= ERRCTL_L2P; | ||
1796 | } else { | ||
1797 | /* No parity available */ | ||
1798 | } | ||
1799 | |||
1800 | printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl); | ||
1801 | |||
1802 | write_c0_ecc(errctl); | ||
1803 | back_to_back_c0_hazard(); | ||
1804 | errctl = read_c0_ecc(); | ||
1805 | printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl); | ||
1806 | |||
1807 | if (l1parity_present) | ||
1808 | printk(KERN_INFO "Cache parity protection %sabled\n", | ||
1809 | (errctl & ERRCTL_PE) ? "en" : "dis"); | ||
1810 | |||
1811 | if (l2parity_present) { | ||
1812 | if (l1parity_present && l1parity) | ||
1813 | errctl ^= ERRCTL_L2P; | ||
1814 | printk(KERN_INFO "L2 cache parity protection %sabled\n", | ||
1815 | (errctl & ERRCTL_L2P) ? "en" : "dis"); | ||
1816 | } | ||
1817 | } | ||
1818 | break; | ||
1819 | |||
1820 | case CPU_5KC: | ||
1821 | case CPU_5KE: | ||
1822 | case CPU_LOONGSON32: | ||
1823 | write_c0_ecc(0x80000000); | ||
1824 | back_to_back_c0_hazard(); | ||
1825 | /* Set the PE bit (bit 31) in the c0_errctl register. */ | ||
1826 | printk(KERN_INFO "Cache parity protection %sabled\n", | ||
1827 | (read_c0_ecc() & 0x80000000) ? "en" : "dis"); | ||
1828 | break; | ||
1829 | case CPU_20KC: | ||
1830 | case CPU_25KF: | ||
1831 | /* Clear the DE bit (bit 16) in the c0_status register. */ | ||
1832 | printk(KERN_INFO "Enable cache parity protection for " | ||
1833 | "MIPS 20KC/25KF CPUs.\n"); | ||
1834 | clear_c0_status(ST0_DE); | ||
1835 | break; | ||
1836 | default: | ||
1837 | break; | ||
1838 | } | ||
1839 | } | ||
1840 | |||
1841 | asmlinkage void cache_parity_error(void) | ||
1842 | { | ||
1843 | const int field = 2 * sizeof(unsigned long); | ||
1844 | unsigned int reg_val; | ||
1845 | |||
1846 | /* For the moment, report the problem and hang. */ | ||
1847 | printk("Cache error exception:\n"); | ||
1848 | printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); | ||
1849 | reg_val = read_c0_cacheerr(); | ||
1850 | printk("c0_cacheerr == %08x\n", reg_val); | ||
1851 | |||
1852 | printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n", | ||
1853 | reg_val & (1<<30) ? "secondary" : "primary", | ||
1854 | reg_val & (1<<31) ? "data" : "insn"); | ||
1855 | if ((cpu_has_mips_r2_r6) && | ||
1856 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) { | ||
1857 | pr_err("Error bits: %s%s%s%s%s%s%s%s\n", | ||
1858 | reg_val & (1<<29) ? "ED " : "", | ||
1859 | reg_val & (1<<28) ? "ET " : "", | ||
1860 | reg_val & (1<<27) ? "ES " : "", | ||
1861 | reg_val & (1<<26) ? "EE " : "", | ||
1862 | reg_val & (1<<25) ? "EB " : "", | ||
1863 | reg_val & (1<<24) ? "EI " : "", | ||
1864 | reg_val & (1<<23) ? "E1 " : "", | ||
1865 | reg_val & (1<<22) ? "E0 " : ""); | ||
1866 | } else { | ||
1867 | pr_err("Error bits: %s%s%s%s%s%s%s\n", | ||
1868 | reg_val & (1<<29) ? "ED " : "", | ||
1869 | reg_val & (1<<28) ? "ET " : "", | ||
1870 | reg_val & (1<<26) ? "EE " : "", | ||
1871 | reg_val & (1<<25) ? "EB " : "", | ||
1872 | reg_val & (1<<24) ? "EI " : "", | ||
1873 | reg_val & (1<<23) ? "E1 " : "", | ||
1874 | reg_val & (1<<22) ? "E0 " : ""); | ||
1875 | } | ||
1876 | printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1)); | ||
1877 | |||
1878 | #if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64) | ||
1879 | if (reg_val & (1<<22)) | ||
1880 | printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0()); | ||
1881 | |||
1882 | if (reg_val & (1<<23)) | ||
1883 | printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1()); | ||
1884 | #endif | ||
1885 | |||
1886 | panic("Can't handle the cache error!"); | ||
1887 | } | ||
1888 | |||
1889 | asmlinkage void do_ftlb(void) | ||
1890 | { | ||
1891 | const int field = 2 * sizeof(unsigned long); | ||
1892 | unsigned int reg_val; | ||
1893 | |||
1894 | /* For the moment, report the problem and hang. */ | ||
1895 | if ((cpu_has_mips_r2_r6) && | ||
1896 | (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) || | ||
1897 | ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) { | ||
1898 | pr_err("FTLB error exception, cp0_ecc=0x%08x:\n", | ||
1899 | read_c0_ecc()); | ||
1900 | pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc()); | ||
1901 | reg_val = read_c0_cacheerr(); | ||
1902 | pr_err("c0_cacheerr == %08x\n", reg_val); | ||
1903 | |||
1904 | if ((reg_val & 0xc0000000) == 0xc0000000) { | ||
1905 | pr_err("Decoded c0_cacheerr: FTLB parity error\n"); | ||
1906 | } else { | ||
1907 | pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n", | ||
1908 | reg_val & (1<<30) ? "secondary" : "primary", | ||
1909 | reg_val & (1<<31) ? "data" : "insn"); | ||
1910 | } | ||
1911 | } else { | ||
1912 | pr_err("FTLB error exception\n"); | ||
1913 | } | ||
1914 | /* Just print the cacheerr bits for now */ | ||
1915 | cache_parity_error(); | ||
1916 | } | ||
1917 | |||
1918 | asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1) | ||
1919 | { | ||
1920 | u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >> | ||
1921 | LOONGSON_DIAG1_EXCCODE_SHIFT; | ||
1922 | enum ctx_state prev_state; | ||
1923 | |||
1924 | prev_state = exception_enter(); | ||
1925 | |||
1926 | switch (exccode) { | ||
1927 | case 0x08: | ||
1928 | /* Undocumented exception, will trigger on certain | ||
1929 | * also-undocumented instructions accessible from userspace. | ||
1930 | * Processor state is not otherwise corrupted, but currently | ||
1931 | * we don't know how to proceed. Maybe there is some | ||
1932 | * undocumented control flag to enable the instructions? | ||
1933 | */ | ||
1934 | force_sig(SIGILL); | ||
1935 | break; | ||
1936 | |||
1937 | default: | ||
1938 | /* None of the other exceptions, documented or not, have | ||
1939 | * further details given; none are encountered in the wild | ||
1940 | * either. Panic in case some of them turn out to be fatal. | ||
1941 | */ | ||
1942 | show_regs(regs); | ||
1943 | panic("Unhandled Loongson exception - GSCause = %08x", diag1); | ||
1944 | } | ||
1945 | |||
1946 | exception_exit(prev_state); | ||
1947 | } | ||
1948 | |||
1949 | /* | ||
1950 | * SDBBP EJTAG debug exception handler. | ||
1951 | * We skip the instruction and return to the next instruction. | ||
1952 | */ | ||
1953 | void ejtag_exception_handler(struct pt_regs *regs) | ||
1954 | { | ||
1955 | const int field = 2 * sizeof(unsigned long); | ||
1956 | unsigned long depc, old_epc, old_ra; | ||
1957 | unsigned int debug; | ||
1958 | |||
1959 | printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n"); | ||
1960 | depc = read_c0_depc(); | ||
1961 | debug = read_c0_debug(); | ||
1962 | printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug); | ||
1963 | if (debug & 0x80000000) { | ||
1964 | /* | ||
1965 | * In branch delay slot. | ||
1966 | * We cheat a little bit here and use EPC to calculate the | ||
1967 | * debug return address (DEPC). EPC is restored after the | ||
1968 | * calculation. | ||
1969 | */ | ||
1970 | old_epc = regs->cp0_epc; | ||
1971 | old_ra = regs->regs[31]; | ||
1972 | regs->cp0_epc = depc; | ||
1973 | compute_return_epc(regs); | ||
1974 | depc = regs->cp0_epc; | ||
1975 | regs->cp0_epc = old_epc; | ||
1976 | regs->regs[31] = old_ra; | ||
1977 | } else | ||
1978 | depc += 4; | ||
1979 | write_c0_depc(depc); | ||
1980 | |||
1981 | #if 0 | ||
1982 | printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n"); | ||
1983 | write_c0_debug(debug | 0x100); | ||
1984 | #endif | ||
1985 | } | ||
1986 | |||
1987 | /* | ||
1988 | * NMI exception handler. | ||
1989 | * No lock; only written during early bootup by CPU 0. | ||
1990 | */ | ||
1991 | static RAW_NOTIFIER_HEAD(nmi_chain); | ||
1992 | |||
1993 | int register_nmi_notifier(struct notifier_block *nb) | ||
1994 | { | ||
1995 | return raw_notifier_chain_register(&nmi_chain, nb); | ||
1996 | } | ||
1997 | |||
1998 | void __noreturn nmi_exception_handler(struct pt_regs *regs) | ||
1999 | { | ||
2000 | char str[100]; | ||
2001 | |||
2002 | nmi_enter(); | ||
2003 | raw_notifier_call_chain(&nmi_chain, 0, regs); | ||
2004 | bust_spinlocks(1); | ||
2005 | snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n", | ||
2006 | smp_processor_id(), regs->cp0_epc); | ||
2007 | regs->cp0_epc = read_c0_errorepc(); | ||
2008 | die(str, regs); | ||
2009 | nmi_exit(); | ||
2010 | } | ||
2011 | |||
2012 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | ||
2013 | |||
2014 | unsigned long ebase; | ||
2015 | EXPORT_SYMBOL_GPL(ebase); | ||
2016 | unsigned long exception_handlers[32]; | ||
2017 | unsigned long vi_handlers[64]; | ||
2018 | |||
2019 | void __init *set_except_vector(int n, void *addr) | ||
2020 | { | ||
2021 | unsigned long handler = (unsigned long) addr; | ||
2022 | unsigned long old_handler; | ||
2023 | |||
2024 | #ifdef CONFIG_CPU_MICROMIPS | ||
2025 | /* | ||
2026 | * Only the TLB handlers are cache aligned with an even | ||
2027 | * address. All other handlers are on an odd address and | ||
2028 | * require no modification. Otherwise, MIPS32 mode will | ||
2029 | * be entered when handling any TLB exceptions. That | ||
2030 | * would be bad...since we must stay in microMIPS mode. | ||
2031 | */ | ||
2032 | if (!(handler & 0x1)) | ||
2033 | handler |= 1; | ||
2034 | #endif | ||
2035 | old_handler = xchg(&exception_handlers[n], handler); | ||
2036 | |||
2037 | if (n == 0 && cpu_has_divec) { | ||
2038 | #ifdef CONFIG_CPU_MICROMIPS | ||
2039 | unsigned long jump_mask = ~((1 << 27) - 1); | ||
2040 | #else | ||
2041 | unsigned long jump_mask = ~((1 << 28) - 1); | ||
2042 | #endif | ||
2043 | u32 *buf = (u32 *)(ebase + 0x200); | ||
2044 | unsigned int k0 = 26; | ||
2045 | if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) { | ||
2046 | uasm_i_j(&buf, handler & ~jump_mask); | ||
2047 | uasm_i_nop(&buf); | ||
2048 | } else { | ||
2049 | UASM_i_LA(&buf, k0, handler); | ||
2050 | uasm_i_jr(&buf, k0); | ||
2051 | uasm_i_nop(&buf); | ||
2052 | } | ||
2053 | local_flush_icache_range(ebase + 0x200, (unsigned long)buf); | ||
2054 | } | ||
2055 | return (void *)old_handler; | ||
2056 | } | ||
2057 | |||
2058 | static void do_default_vi(void) | ||
2059 | { | ||
2060 | show_regs(get_irq_regs()); | ||
2061 | panic("Caught unexpected vectored interrupt."); | ||
2062 | } | ||
2063 | |||
2064 | static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) | ||
2065 | { | ||
2066 | unsigned long handler; | ||
2067 | unsigned long old_handler = vi_handlers[n]; | ||
2068 | int srssets = current_cpu_data.srsets; | ||
2069 | u16 *h; | ||
2070 | unsigned char *b; | ||
2071 | |||
2072 | BUG_ON(!cpu_has_veic && !cpu_has_vint); | ||
2073 | |||
2074 | if (addr == NULL) { | ||
2075 | handler = (unsigned long) do_default_vi; | ||
2076 | srs = 0; | ||
2077 | } else | ||
2078 | handler = (unsigned long) addr; | ||
2079 | vi_handlers[n] = handler; | ||
2080 | |||
2081 | b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING); | ||
2082 | |||
2083 | if (srs >= srssets) | ||
2084 | panic("Shadow register set %d not supported", srs); | ||
2085 | |||
2086 | if (cpu_has_veic) { | ||
2087 | if (board_bind_eic_interrupt) | ||
2088 | board_bind_eic_interrupt(n, srs); | ||
2089 | } else if (cpu_has_vint) { | ||
2090 | /* SRSMap is only defined if shadow sets are implemented */ | ||
2091 | if (srssets > 1) | ||
2092 | change_c0_srsmap(0xf << n*4, srs << n*4); | ||
2093 | } | ||
2094 | |||
2095 | if (srs == 0) { | ||
2096 | /* | ||
2097 | * If no shadow set is selected then use the default handler | ||
2098 | * that does normal register saving and standard interrupt exit | ||
2099 | */ | ||
2100 | extern const u8 except_vec_vi[], except_vec_vi_lui[]; | ||
2101 | extern const u8 except_vec_vi_ori[], except_vec_vi_end[]; | ||
2102 | extern const u8 rollback_except_vec_vi[]; | ||
2103 | const u8 *vec_start = using_rollback_handler() ? | ||
2104 | rollback_except_vec_vi : except_vec_vi; | ||
2105 | #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) | ||
2106 | const int lui_offset = except_vec_vi_lui - vec_start + 2; | ||
2107 | const int ori_offset = except_vec_vi_ori - vec_start + 2; | ||
2108 | #else | ||
2109 | const int lui_offset = except_vec_vi_lui - vec_start; | ||
2110 | const int ori_offset = except_vec_vi_ori - vec_start; | ||
2111 | #endif | ||
2112 | const int handler_len = except_vec_vi_end - vec_start; | ||
2113 | |||
2114 | if (handler_len > VECTORSPACING) { | ||
2115 | /* | ||
2116 | * Sigh... panicing won't help as the console | ||
2117 | * is probably not configured :( | ||
2118 | */ | ||
2119 | panic("VECTORSPACING too small"); | ||
2120 | } | ||
2121 | |||
2122 | set_handler(((unsigned long)b - ebase), vec_start, | ||
2123 | #ifdef CONFIG_CPU_MICROMIPS | ||
2124 | (handler_len - 1)); | ||
2125 | #else | ||
2126 | handler_len); | ||
2127 | #endif | ||
2128 | h = (u16 *)(b + lui_offset); | ||
2129 | *h = (handler >> 16) & 0xffff; | ||
2130 | h = (u16 *)(b + ori_offset); | ||
2131 | *h = (handler & 0xffff); | ||
2132 | local_flush_icache_range((unsigned long)b, | ||
2133 | (unsigned long)(b+handler_len)); | ||
2134 | } | ||
2135 | else { | ||
2136 | /* | ||
2137 | * In other cases jump directly to the interrupt handler. It | ||
2138 | * is the handler's responsibility to save registers if required | ||
2139 | * (eg hi/lo) and return from the exception using "eret". | ||
2140 | */ | ||
2141 | u32 insn; | ||
2142 | |||
2143 | h = (u16 *)b; | ||
2144 | /* j handler */ | ||
2145 | #ifdef CONFIG_CPU_MICROMIPS | ||
2146 | insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1); | ||
2147 | #else | ||
2148 | insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2); | ||
2149 | #endif | ||
2150 | h[0] = (insn >> 16) & 0xffff; | ||
2151 | h[1] = insn & 0xffff; | ||
2152 | h[2] = 0; | ||
2153 | h[3] = 0; | ||
2154 | local_flush_icache_range((unsigned long)b, | ||
2155 | (unsigned long)(b+8)); | ||
2156 | } | ||
2157 | |||
2158 | return (void *)old_handler; | ||
2159 | } | ||
2160 | |||
2161 | void *set_vi_handler(int n, vi_handler_t addr) | ||
2162 | { | ||
2163 | return set_vi_srs_handler(n, addr, 0); | ||
2164 | } | ||
2165 | |||
2166 | extern void tlb_init(void); | ||
2167 | |||
2168 | /* | ||
2169 | * Timer interrupt | ||
2170 | */ | ||
2171 | int cp0_compare_irq; | ||
2172 | EXPORT_SYMBOL_GPL(cp0_compare_irq); | ||
2173 | int cp0_compare_irq_shift; | ||
2174 | |||
2175 | /* | ||
2176 | * Performance counter IRQ or -1 if shared with timer | ||
2177 | */ | ||
2178 | int cp0_perfcount_irq; | ||
2179 | EXPORT_SYMBOL_GPL(cp0_perfcount_irq); | ||
2180 | |||
2181 | /* | ||
2182 | * Fast debug channel IRQ or -1 if not present | ||
2183 | */ | ||
2184 | int cp0_fdc_irq; | ||
2185 | EXPORT_SYMBOL_GPL(cp0_fdc_irq); | ||
2186 | |||
2187 | static int noulri; | ||
2188 | |||
2189 | static int __init ulri_disable(char *s) | ||
2190 | { | ||
2191 | pr_info("Disabling ulri\n"); | ||
2192 | noulri = 1; | ||
2193 | |||
2194 | return 1; | ||
2195 | } | ||
2196 | __setup("noulri", ulri_disable); | ||
2197 | |||
2198 | /* configure STATUS register */ | ||
2199 | static void configure_status(void) | ||
2200 | { | ||
2201 | /* | ||
2202 | * Disable coprocessors and select 32-bit or 64-bit addressing | ||
2203 | * and the 16/32 or 32/32 FPR register model. Reset the BEV | ||
2204 | * flag that some firmware may have left set and the TS bit (for | ||
2205 | * IP27). Set XX for ISA IV code to work. | ||
2206 | */ | ||
2207 | unsigned int status_set = ST0_KERNEL_CUMASK; | ||
2208 | #ifdef CONFIG_64BIT | ||
2209 | status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX; | ||
2210 | #endif | ||
2211 | if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV) | ||
2212 | status_set |= ST0_XX; | ||
2213 | if (cpu_has_dsp) | ||
2214 | status_set |= ST0_MX; | ||
2215 | |||
2216 | change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX, | ||
2217 | status_set); | ||
2218 | back_to_back_c0_hazard(); | ||
2219 | } | ||
2220 | |||
2221 | unsigned int hwrena; | ||
2222 | EXPORT_SYMBOL_GPL(hwrena); | ||
2223 | |||
2224 | /* configure HWRENA register */ | ||
2225 | static void configure_hwrena(void) | ||
2226 | { | ||
2227 | hwrena = cpu_hwrena_impl_bits; | ||
2228 | |||
2229 | if (cpu_has_mips_r2_r6) | ||
2230 | hwrena |= MIPS_HWRENA_CPUNUM | | ||
2231 | MIPS_HWRENA_SYNCISTEP | | ||
2232 | MIPS_HWRENA_CC | | ||
2233 | MIPS_HWRENA_CCRES; | ||
2234 | |||
2235 | if (!noulri && cpu_has_userlocal) | ||
2236 | hwrena |= MIPS_HWRENA_ULR; | ||
2237 | |||
2238 | if (hwrena) | ||
2239 | write_c0_hwrena(hwrena); | ||
2240 | } | ||
2241 | |||
2242 | static void configure_exception_vector(void) | ||
2243 | { | ||
2244 | if (cpu_has_mips_r2_r6) { | ||
2245 | unsigned long sr = set_c0_status(ST0_BEV); | ||
2246 | /* If available, use WG to set top bits of EBASE */ | ||
2247 | if (cpu_has_ebase_wg) { | ||
2248 | #ifdef CONFIG_64BIT | ||
2249 | write_c0_ebase_64(ebase | MIPS_EBASE_WG); | ||
2250 | #else | ||
2251 | write_c0_ebase(ebase | MIPS_EBASE_WG); | ||
2252 | #endif | ||
2253 | } | ||
2254 | write_c0_ebase(ebase); | ||
2255 | write_c0_status(sr); | ||
2256 | } | ||
2257 | if (cpu_has_veic || cpu_has_vint) { | ||
2258 | /* Setting vector spacing enables EI/VI mode */ | ||
2259 | change_c0_intctl(0x3e0, VECTORSPACING); | ||
2260 | } | ||
2261 | if (cpu_has_divec) { | ||
2262 | if (cpu_has_mipsmt) { | ||
2263 | unsigned int vpflags = dvpe(); | ||
2264 | set_c0_cause(CAUSEF_IV); | ||
2265 | evpe(vpflags); | ||
2266 | } else | ||
2267 | set_c0_cause(CAUSEF_IV); | ||
2268 | } | ||
2269 | } | ||
2270 | |||
2271 | void per_cpu_trap_init(bool is_boot_cpu) | ||
2272 | { | ||
2273 | unsigned int cpu = smp_processor_id(); | ||
2274 | |||
2275 | configure_status(); | ||
2276 | configure_hwrena(); | ||
2277 | |||
2278 | configure_exception_vector(); | ||
2279 | |||
2280 | /* | ||
2281 | * Before R2 both interrupt numbers were fixed to 7, so on R2 only: | ||
2282 | * | ||
2283 | * o read IntCtl.IPTI to determine the timer interrupt | ||
2284 | * o read IntCtl.IPPCI to determine the performance counter interrupt | ||
2285 | * o read IntCtl.IPFDC to determine the fast debug channel interrupt | ||
2286 | */ | ||
2287 | if (cpu_has_mips_r2_r6) { | ||
2288 | cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP; | ||
2289 | cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7; | ||
2290 | cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7; | ||
2291 | cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7; | ||
2292 | if (!cp0_fdc_irq) | ||
2293 | cp0_fdc_irq = -1; | ||
2294 | |||
2295 | } else { | ||
2296 | cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ; | ||
2297 | cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ; | ||
2298 | cp0_perfcount_irq = -1; | ||
2299 | cp0_fdc_irq = -1; | ||
2300 | } | ||
2301 | |||
2302 | if (cpu_has_mmid) | ||
2303 | cpu_data[cpu].asid_cache = 0; | ||
2304 | else if (!cpu_data[cpu].asid_cache) | ||
2305 | cpu_data[cpu].asid_cache = asid_first_version(cpu); | ||
2306 | |||
2307 | mmgrab(&init_mm); | ||
2308 | current->active_mm = &init_mm; | ||
2309 | BUG_ON(current->mm); | ||
2310 | enter_lazy_tlb(&init_mm, current); | ||
2311 | |||
2312 | /* Boot CPU's cache setup in setup_arch(). */ | ||
2313 | if (!is_boot_cpu) | ||
2314 | cpu_cache_init(); | ||
2315 | tlb_init(); | ||
2316 | TLBMISS_HANDLER_SETUP(); | ||
2317 | } | ||
2318 | |||
2319 | /* Install CPU exception handler */ | ||
2320 | void set_handler(unsigned long offset, const void *addr, unsigned long size) | ||
2321 | { | ||
2322 | #ifdef CONFIG_CPU_MICROMIPS | ||
2323 | memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size); | ||
2324 | #else | ||
2325 | memcpy((void *)(ebase + offset), addr, size); | ||
2326 | #endif | ||
2327 | local_flush_icache_range(ebase + offset, ebase + offset + size); | ||
2328 | } | ||
2329 | |||
2330 | static const char panic_null_cerr[] = | ||
2331 | "Trying to set NULL cache error exception handler\n"; | ||
2332 | |||
2333 | /* | ||
2334 | * Install uncached CPU exception handler. | ||
2335 | * This is suitable only for the cache error exception which is the only | ||
2336 | * exception handler that is being run uncached. | ||
2337 | */ | ||
2338 | void set_uncached_handler(unsigned long offset, void *addr, | ||
2339 | unsigned long size) | ||
2340 | { | ||
2341 | unsigned long uncached_ebase = CKSEG1ADDR(ebase); | ||
2342 | |||
2343 | if (!addr) | ||
2344 | panic(panic_null_cerr); | ||
2345 | |||
2346 | memcpy((void *)(uncached_ebase + offset), addr, size); | ||
2347 | } | ||
2348 | |||
2349 | static int __initdata rdhwr_noopt; | ||
2350 | static int __init set_rdhwr_noopt(char *str) | ||
2351 | { | ||
2352 | rdhwr_noopt = 1; | ||
2353 | return 1; | ||
2354 | } | ||
2355 | |||
2356 | __setup("rdhwr_noopt", set_rdhwr_noopt); | ||
2357 | |||
2358 | void __init trap_init(void) | ||
2359 | { | ||
2360 | extern char except_vec3_generic; | ||
2361 | extern char except_vec4; | ||
2362 | extern char except_vec3_r4000; | ||
2363 | unsigned long i, vec_size; | ||
2364 | phys_addr_t ebase_pa; | ||
2365 | |||
2366 | check_wait(); | ||
2367 | |||
2368 | if (!cpu_has_mips_r2_r6) { | ||
2369 | ebase = CAC_BASE; | ||
2370 | ebase_pa = virt_to_phys((void *)ebase); | ||
2371 | vec_size = 0x400; | ||
2372 | |||
2373 | memblock_reserve(ebase_pa, vec_size); | ||
2374 | } else { | ||
2375 | if (cpu_has_veic || cpu_has_vint) | ||
2376 | vec_size = 0x200 + VECTORSPACING*64; | ||
2377 | else | ||
2378 | vec_size = PAGE_SIZE; | ||
2379 | |||
2380 | ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size)); | ||
2381 | if (!ebase_pa) | ||
2382 | panic("%s: Failed to allocate %lu bytes align=0x%x\n", | ||
2383 | __func__, vec_size, 1 << fls(vec_size)); | ||
2384 | |||
2385 | /* | ||
2386 | * Try to ensure ebase resides in KSeg0 if possible. | ||
2387 | * | ||
2388 | * It shouldn't generally be in XKPhys on MIPS64 to avoid | ||
2389 | * hitting a poorly defined exception base for Cache Errors. | ||
2390 | * The allocation is likely to be in the low 512MB of physical, | ||
2391 | * in which case we should be able to convert to KSeg0. | ||
2392 | * | ||
2393 | * EVA is special though as it allows segments to be rearranged | ||
2394 | * and to become uncached during cache error handling. | ||
2395 | */ | ||
2396 | if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000)) | ||
2397 | ebase = CKSEG0ADDR(ebase_pa); | ||
2398 | else | ||
2399 | ebase = (unsigned long)phys_to_virt(ebase_pa); | ||
2400 | } | ||
2401 | |||
2402 | if (cpu_has_mmips) { | ||
2403 | unsigned int config3 = read_c0_config3(); | ||
2404 | |||
2405 | if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) | ||
2406 | write_c0_config3(config3 | MIPS_CONF3_ISA_OE); | ||
2407 | else | ||
2408 | write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE); | ||
2409 | } | ||
2410 | |||
2411 | if (board_ebase_setup) | ||
2412 | board_ebase_setup(); | ||
2413 | per_cpu_trap_init(true); | ||
2414 | memblock_set_bottom_up(false); | ||
2415 | |||
2416 | /* | ||
2417 | * Copy the generic exception handlers to their final destination. | ||
2418 | * This will be overridden later as suitable for a particular | ||
2419 | * configuration. | ||
2420 | */ | ||
2421 | set_handler(0x180, &except_vec3_generic, 0x80); | ||
2422 | |||
2423 | /* | ||
2424 | * Setup default vectors | ||
2425 | */ | ||
2426 | for (i = 0; i <= 31; i++) | ||
2427 | set_except_vector(i, handle_reserved); | ||
2428 | |||
2429 | /* | ||
2430 | * Copy the EJTAG debug exception vector handler code to it's final | ||
2431 | * destination. | ||
2432 | */ | ||
2433 | if (cpu_has_ejtag && board_ejtag_handler_setup) | ||
2434 | board_ejtag_handler_setup(); | ||
2435 | |||
2436 | /* | ||
2437 | * Only some CPUs have the watch exceptions. | ||
2438 | */ | ||
2439 | if (cpu_has_watch) | ||
2440 | set_except_vector(EXCCODE_WATCH, handle_watch); | ||
2441 | |||
2442 | /* | ||
2443 | * Initialise interrupt handlers | ||
2444 | */ | ||
2445 | if (cpu_has_veic || cpu_has_vint) { | ||
2446 | int nvec = cpu_has_veic ? 64 : 8; | ||
2447 | for (i = 0; i < nvec; i++) | ||
2448 | set_vi_handler(i, NULL); | ||
2449 | } | ||
2450 | else if (cpu_has_divec) | ||
2451 | set_handler(0x200, &except_vec4, 0x8); | ||
2452 | |||
2453 | /* | ||
2454 | * Some CPUs can enable/disable for cache parity detection, but does | ||
2455 | * it different ways. | ||
2456 | */ | ||
2457 | parity_protection_init(); | ||
2458 | |||
2459 | /* | ||
2460 | * The Data Bus Errors / Instruction Bus Errors are signaled | ||
2461 | * by external hardware. Therefore these two exceptions | ||
2462 | * may have board specific handlers. | ||
2463 | */ | ||
2464 | if (board_be_init) | ||
2465 | board_be_init(); | ||
2466 | |||
2467 | set_except_vector(EXCCODE_INT, using_rollback_handler() ? | ||
2468 | rollback_handle_int : handle_int); | ||
2469 | set_except_vector(EXCCODE_MOD, handle_tlbm); | ||
2470 | set_except_vector(EXCCODE_TLBL, handle_tlbl); | ||
2471 | set_except_vector(EXCCODE_TLBS, handle_tlbs); | ||
2472 | |||
2473 | set_except_vector(EXCCODE_ADEL, handle_adel); | ||
2474 | set_except_vector(EXCCODE_ADES, handle_ades); | ||
2475 | |||
2476 | set_except_vector(EXCCODE_IBE, handle_ibe); | ||
2477 | set_except_vector(EXCCODE_DBE, handle_dbe); | ||
2478 | |||
2479 | set_except_vector(EXCCODE_SYS, handle_sys); | ||
2480 | set_except_vector(EXCCODE_BP, handle_bp); | ||
2481 | |||
2482 | if (rdhwr_noopt) | ||
2483 | set_except_vector(EXCCODE_RI, handle_ri); | ||
2484 | else { | ||
2485 | if (cpu_has_vtag_icache) | ||
2486 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); | ||
2487 | else if (current_cpu_type() == CPU_LOONGSON64) | ||
2488 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp); | ||
2489 | else | ||
2490 | set_except_vector(EXCCODE_RI, handle_ri_rdhwr); | ||
2491 | } | ||
2492 | |||
2493 | set_except_vector(EXCCODE_CPU, handle_cpu); | ||
2494 | set_except_vector(EXCCODE_OV, handle_ov); | ||
2495 | set_except_vector(EXCCODE_TR, handle_tr); | ||
2496 | set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe); | ||
2497 | |||
2498 | if (board_nmi_handler_setup) | ||
2499 | board_nmi_handler_setup(); | ||
2500 | |||
2501 | if (cpu_has_fpu && !cpu_has_nofpuex) | ||
2502 | set_except_vector(EXCCODE_FPE, handle_fpe); | ||
2503 | |||
2504 | if (cpu_has_ftlbparex) | ||
2505 | set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb); | ||
2506 | |||
2507 | if (cpu_has_gsexcex) | ||
2508 | set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc); | ||
2509 | |||
2510 | if (cpu_has_rixiex) { | ||
2511 | set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0); | ||
2512 | set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0); | ||
2513 | } | ||
2514 | |||
2515 | set_except_vector(EXCCODE_MSADIS, handle_msa); | ||
2516 | set_except_vector(EXCCODE_MDMX, handle_mdmx); | ||
2517 | |||
2518 | if (cpu_has_mcheck) | ||
2519 | set_except_vector(EXCCODE_MCHECK, handle_mcheck); | ||
2520 | |||
2521 | if (cpu_has_mipsmt) | ||
2522 | set_except_vector(EXCCODE_THREAD, handle_mt); | ||
2523 | |||
2524 | set_except_vector(EXCCODE_DSPDIS, handle_dsp); | ||
2525 | |||
2526 | if (board_cache_error_setup) | ||
2527 | board_cache_error_setup(); | ||
2528 | |||
2529 | if (cpu_has_vce) | ||
2530 | /* Special exception: R4[04]00 uses also the divec space. */ | ||
2531 | set_handler(0x180, &except_vec3_r4000, 0x100); | ||
2532 | else if (cpu_has_4kex) | ||
2533 | set_handler(0x180, &except_vec3_generic, 0x80); | ||
2534 | else | ||
2535 | set_handler(0x080, &except_vec3_generic, 0x80); | ||
2536 | |||
2537 | local_flush_icache_range(ebase, ebase + vec_size); | ||
2538 | |||
2539 | sort_extable(__start___dbe_table, __stop___dbe_table); | ||
2540 | |||
2541 | cu2_notifier(default_cu2_call, 0x80000000); /* Run last */ | ||
2542 | } | ||
2543 | |||
2544 | static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd, | ||
2545 | void *v) | ||
2546 | { | ||
2547 | switch (cmd) { | ||
2548 | case CPU_PM_ENTER_FAILED: | ||
2549 | case CPU_PM_EXIT: | ||
2550 | configure_status(); | ||
2551 | configure_hwrena(); | ||
2552 | configure_exception_vector(); | ||
2553 | |||
2554 | /* Restore register with CPU number for TLB handlers */ | ||
2555 | TLBMISS_HANDLER_RESTORE(); | ||
2556 | |||
2557 | break; | ||
2558 | } | ||
2559 | |||
2560 | return NOTIFY_OK; | ||
2561 | } | ||
2562 | |||
2563 | static struct notifier_block trap_pm_notifier_block = { | ||
2564 | .notifier_call = trap_pm_notifier, | ||
2565 | }; | ||
2566 | |||
2567 | static int __init trap_pm_init(void) | ||
2568 | { | ||
2569 | return cpu_pm_register_notifier(&trap_pm_notifier_block); | ||
2570 | } | ||
2571 | arch_initcall(trap_pm_init); | ||
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c new file mode 100644 index 000000000..126a5f3f4 --- /dev/null +++ b/arch/mips/kernel/unaligned.c | |||
@@ -0,0 +1,1610 @@ | |||
1 | /* | ||
2 | * Handle unaligned accesses by emulation. | ||
3 | * | ||
4 | * This file is subject to the terms and conditions of the GNU General Public | ||
5 | * License. See the file "COPYING" in the main directory of this archive | ||
6 | * for more details. | ||
7 | * | ||
8 | * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle | ||
9 | * Copyright (C) 1999 Silicon Graphics, Inc. | ||
10 | * Copyright (C) 2014 Imagination Technologies Ltd. | ||
11 | * | ||
12 | * This file contains exception handler for address error exception with the | ||
13 | * special capability to execute faulting instructions in software. The | ||
14 | * handler does not try to handle the case when the program counter points | ||
15 | * to an address not aligned to a word boundary. | ||
16 | * | ||
17 | * Putting data to unaligned addresses is a bad practice even on Intel where | ||
18 | * only the performance is affected. Much worse is that such code is non- | ||
19 | * portable. Due to several programs that die on MIPS due to alignment | ||
20 | * problems I decided to implement this handler anyway though I originally | ||
21 | * didn't intend to do this at all for user code. | ||
22 | * | ||
23 | * For now I enable fixing of address errors by default to make life easier. | ||
24 | * I however intend to disable this somewhen in the future when the alignment | ||
25 | * problems with user programs have been fixed. For programmers this is the | ||
26 | * right way to go. | ||
27 | * | ||
28 | * Fixing address errors is a per process option. The option is inherited | ||
29 | * across fork(2) and execve(2) calls. If you really want to use the | ||
30 | * option in your user programs - I discourage the use of the software | ||
31 | * emulation strongly - use the following code in your userland stuff: | ||
32 | * | ||
33 | * #include <sys/sysmips.h> | ||
34 | * | ||
35 | * ... | ||
36 | * sysmips(MIPS_FIXADE, x); | ||
37 | * ... | ||
38 | * | ||
39 | * The argument x is 0 for disabling software emulation, enabled otherwise. | ||
40 | * | ||
41 | * Below a little program to play around with this feature. | ||
42 | * | ||
43 | * #include <stdio.h> | ||
44 | * #include <sys/sysmips.h> | ||
45 | * | ||
46 | * struct foo { | ||
47 | * unsigned char bar[8]; | ||
48 | * }; | ||
49 | * | ||
50 | * main(int argc, char *argv[]) | ||
51 | * { | ||
52 | * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7}; | ||
53 | * unsigned int *p = (unsigned int *) (x.bar + 3); | ||
54 | * int i; | ||
55 | * | ||
56 | * if (argc > 1) | ||
57 | * sysmips(MIPS_FIXADE, atoi(argv[1])); | ||
58 | * | ||
59 | * printf("*p = %08lx\n", *p); | ||
60 | * | ||
61 | * *p = 0xdeadface; | ||
62 | * | ||
63 | * for(i = 0; i <= 7; i++) | ||
64 | * printf("%02x ", x.bar[i]); | ||
65 | * printf("\n"); | ||
66 | * } | ||
67 | * | ||
68 | * Coprocessor loads are not supported; I think this case is unimportant | ||
69 | * in the practice. | ||
70 | * | ||
71 | * TODO: Handle ndc (attempted store to doubleword in uncached memory) | ||
72 | * exception for the R6000. | ||
73 | * A store crossing a page boundary might be executed only partially. | ||
74 | * Undo the partial store in this case. | ||
75 | */ | ||
76 | #include <linux/context_tracking.h> | ||
77 | #include <linux/mm.h> | ||
78 | #include <linux/signal.h> | ||
79 | #include <linux/smp.h> | ||
80 | #include <linux/sched.h> | ||
81 | #include <linux/debugfs.h> | ||
82 | #include <linux/perf_event.h> | ||
83 | |||
84 | #include <asm/asm.h> | ||
85 | #include <asm/branch.h> | ||
86 | #include <asm/byteorder.h> | ||
87 | #include <asm/cop2.h> | ||
88 | #include <asm/debug.h> | ||
89 | #include <asm/fpu.h> | ||
90 | #include <asm/fpu_emulator.h> | ||
91 | #include <asm/inst.h> | ||
92 | #include <asm/unaligned-emul.h> | ||
93 | #include <asm/mmu_context.h> | ||
94 | #include <linux/uaccess.h> | ||
95 | |||
96 | enum { | ||
97 | UNALIGNED_ACTION_QUIET, | ||
98 | UNALIGNED_ACTION_SIGNAL, | ||
99 | UNALIGNED_ACTION_SHOW, | ||
100 | }; | ||
101 | #ifdef CONFIG_DEBUG_FS | ||
102 | static u32 unaligned_instructions; | ||
103 | static u32 unaligned_action; | ||
104 | #else | ||
105 | #define unaligned_action UNALIGNED_ACTION_QUIET | ||
106 | #endif | ||
107 | extern void show_registers(struct pt_regs *regs); | ||
108 | |||
109 | static void emulate_load_store_insn(struct pt_regs *regs, | ||
110 | void __user *addr, unsigned int __user *pc) | ||
111 | { | ||
112 | unsigned long origpc, orig31, value; | ||
113 | union mips_instruction insn; | ||
114 | unsigned int res; | ||
115 | #ifdef CONFIG_EVA | ||
116 | mm_segment_t seg; | ||
117 | #endif | ||
118 | origpc = (unsigned long)pc; | ||
119 | orig31 = regs->regs[31]; | ||
120 | |||
121 | perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0); | ||
122 | |||
123 | /* | ||
124 | * This load never faults. | ||
125 | */ | ||
126 | __get_user(insn.word, pc); | ||
127 | |||
128 | switch (insn.i_format.opcode) { | ||
129 | /* | ||
130 | * These are instructions that a compiler doesn't generate. We | ||
131 | * can assume therefore that the code is MIPS-aware and | ||
132 | * really buggy. Emulating these instructions would break the | ||
133 | * semantics anyway. | ||
134 | */ | ||
135 | case ll_op: | ||
136 | case lld_op: | ||
137 | case sc_op: | ||
138 | case scd_op: | ||
139 | |||
140 | /* | ||
141 | * For these instructions the only way to create an address | ||
142 | * error is an attempted access to kernel/supervisor address | ||
143 | * space. | ||
144 | */ | ||
145 | case ldl_op: | ||
146 | case ldr_op: | ||
147 | case lwl_op: | ||
148 | case lwr_op: | ||
149 | case sdl_op: | ||
150 | case sdr_op: | ||
151 | case swl_op: | ||
152 | case swr_op: | ||
153 | case lb_op: | ||
154 | case lbu_op: | ||
155 | case sb_op: | ||
156 | goto sigbus; | ||
157 | |||
158 | /* | ||
159 | * The remaining opcodes are the ones that are really of | ||
160 | * interest. | ||
161 | */ | ||
162 | case spec3_op: | ||
163 | if (insn.dsp_format.func == lx_op) { | ||
164 | switch (insn.dsp_format.op) { | ||
165 | case lwx_op: | ||
166 | if (!access_ok(addr, 4)) | ||
167 | goto sigbus; | ||
168 | LoadW(addr, value, res); | ||
169 | if (res) | ||
170 | goto fault; | ||
171 | compute_return_epc(regs); | ||
172 | regs->regs[insn.dsp_format.rd] = value; | ||
173 | break; | ||
174 | case lhx_op: | ||
175 | if (!access_ok(addr, 2)) | ||
176 | goto sigbus; | ||
177 | LoadHW(addr, value, res); | ||
178 | if (res) | ||
179 | goto fault; | ||
180 | compute_return_epc(regs); | ||
181 | regs->regs[insn.dsp_format.rd] = value; | ||
182 | break; | ||
183 | default: | ||
184 | goto sigill; | ||
185 | } | ||
186 | } | ||
187 | #ifdef CONFIG_EVA | ||
188 | else { | ||
189 | /* | ||
190 | * we can land here only from kernel accessing user | ||
191 | * memory, so we need to "switch" the address limit to | ||
192 | * user space, so that address check can work properly. | ||
193 | */ | ||
194 | seg = force_uaccess_begin(); | ||
195 | switch (insn.spec3_format.func) { | ||
196 | case lhe_op: | ||
197 | if (!access_ok(addr, 2)) { | ||
198 | force_uaccess_end(seg); | ||
199 | goto sigbus; | ||
200 | } | ||
201 | LoadHWE(addr, value, res); | ||
202 | if (res) { | ||
203 | force_uaccess_end(seg); | ||
204 | goto fault; | ||
205 | } | ||
206 | compute_return_epc(regs); | ||
207 | regs->regs[insn.spec3_format.rt] = value; | ||
208 | break; | ||
209 | case lwe_op: | ||
210 | if (!access_ok(addr, 4)) { | ||
211 | force_uaccess_end(seg); | ||
212 | goto sigbus; | ||
213 | } | ||
214 | LoadWE(addr, value, res); | ||
215 | if (res) { | ||
216 | force_uaccess_end(seg); | ||
217 | goto fault; | ||
218 | } | ||
219 | compute_return_epc(regs); | ||
220 | regs->regs[insn.spec3_format.rt] = value; | ||
221 | break; | ||
222 | case lhue_op: | ||
223 | if (!access_ok(addr, 2)) { | ||
224 | force_uaccess_end(seg); | ||
225 | goto sigbus; | ||
226 | } | ||
227 | LoadHWUE(addr, value, res); | ||
228 | if (res) { | ||
229 | force_uaccess_end(seg); | ||
230 | goto fault; | ||
231 | } | ||
232 | compute_return_epc(regs); | ||
233 | regs->regs[insn.spec3_format.rt] = value; | ||
234 | break; | ||
235 | case she_op: | ||
236 | if (!access_ok(addr, 2)) { | ||
237 | force_uaccess_end(seg); | ||
238 | goto sigbus; | ||
239 | } | ||
240 | compute_return_epc(regs); | ||
241 | value = regs->regs[insn.spec3_format.rt]; | ||
242 | StoreHWE(addr, value, res); | ||
243 | if (res) { | ||
244 | force_uaccess_end(seg); | ||
245 | goto fault; | ||
246 | } | ||
247 | break; | ||
248 | case swe_op: | ||
249 | if (!access_ok(addr, 4)) { | ||
250 | force_uaccess_end(seg); | ||
251 | goto sigbus; | ||
252 | } | ||
253 | compute_return_epc(regs); | ||
254 | value = regs->regs[insn.spec3_format.rt]; | ||
255 | StoreWE(addr, value, res); | ||
256 | if (res) { | ||
257 | force_uaccess_end(seg); | ||
258 | goto fault; | ||
259 | } | ||
260 | break; | ||
261 | default: | ||
262 | force_uaccess_end(seg); | ||
263 | goto sigill; | ||
264 | } | ||
265 | force_uaccess_end(seg); | ||
266 | } | ||
267 | #endif | ||
268 | break; | ||
269 | case lh_op: | ||
270 | if (!access_ok(addr, 2)) | ||
271 | goto sigbus; | ||
272 | |||
273 | if (IS_ENABLED(CONFIG_EVA)) { | ||
274 | if (uaccess_kernel()) | ||
275 | LoadHW(addr, value, res); | ||
276 | else | ||
277 | LoadHWE(addr, value, res); | ||
278 | } else { | ||
279 | LoadHW(addr, value, res); | ||
280 | } | ||
281 | |||
282 | if (res) | ||
283 | goto fault; | ||
284 | compute_return_epc(regs); | ||
285 | regs->regs[insn.i_format.rt] = value; | ||
286 | break; | ||
287 | |||
288 | case lw_op: | ||
289 | if (!access_ok(addr, 4)) | ||
290 | goto sigbus; | ||
291 | |||
292 | if (IS_ENABLED(CONFIG_EVA)) { | ||
293 | if (uaccess_kernel()) | ||
294 | LoadW(addr, value, res); | ||
295 | else | ||
296 | LoadWE(addr, value, res); | ||
297 | } else { | ||
298 | LoadW(addr, value, res); | ||
299 | } | ||
300 | |||
301 | if (res) | ||
302 | goto fault; | ||
303 | compute_return_epc(regs); | ||
304 | regs->regs[insn.i_format.rt] = value; | ||
305 | break; | ||
306 | |||
307 | case lhu_op: | ||
308 | if (!access_ok(addr, 2)) | ||
309 | goto sigbus; | ||
310 | |||
311 | if (IS_ENABLED(CONFIG_EVA)) { | ||
312 | if (uaccess_kernel()) | ||
313 | LoadHWU(addr, value, res); | ||
314 | else | ||
315 | LoadHWUE(addr, value, res); | ||
316 | } else { | ||
317 | LoadHWU(addr, value, res); | ||
318 | } | ||
319 | |||
320 | if (res) | ||
321 | goto fault; | ||
322 | compute_return_epc(regs); | ||
323 | regs->regs[insn.i_format.rt] = value; | ||
324 | break; | ||
325 | |||
326 | case lwu_op: | ||
327 | #ifdef CONFIG_64BIT | ||
328 | /* | ||
329 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
330 | * if we're on a 32-bit processor and an i-cache incoherency | ||
331 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
332 | * would blow up, so for now we don't handle unaligned 64-bit | ||
333 | * instructions on 32-bit kernels. | ||
334 | */ | ||
335 | if (!access_ok(addr, 4)) | ||
336 | goto sigbus; | ||
337 | |||
338 | LoadWU(addr, value, res); | ||
339 | if (res) | ||
340 | goto fault; | ||
341 | compute_return_epc(regs); | ||
342 | regs->regs[insn.i_format.rt] = value; | ||
343 | break; | ||
344 | #endif /* CONFIG_64BIT */ | ||
345 | |||
346 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
347 | goto sigill; | ||
348 | |||
349 | case ld_op: | ||
350 | #ifdef CONFIG_64BIT | ||
351 | /* | ||
352 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
353 | * if we're on a 32-bit processor and an i-cache incoherency | ||
354 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
355 | * would blow up, so for now we don't handle unaligned 64-bit | ||
356 | * instructions on 32-bit kernels. | ||
357 | */ | ||
358 | if (!access_ok(addr, 8)) | ||
359 | goto sigbus; | ||
360 | |||
361 | LoadDW(addr, value, res); | ||
362 | if (res) | ||
363 | goto fault; | ||
364 | compute_return_epc(regs); | ||
365 | regs->regs[insn.i_format.rt] = value; | ||
366 | break; | ||
367 | #endif /* CONFIG_64BIT */ | ||
368 | |||
369 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
370 | goto sigill; | ||
371 | |||
372 | case sh_op: | ||
373 | if (!access_ok(addr, 2)) | ||
374 | goto sigbus; | ||
375 | |||
376 | compute_return_epc(regs); | ||
377 | value = regs->regs[insn.i_format.rt]; | ||
378 | |||
379 | if (IS_ENABLED(CONFIG_EVA)) { | ||
380 | if (uaccess_kernel()) | ||
381 | StoreHW(addr, value, res); | ||
382 | else | ||
383 | StoreHWE(addr, value, res); | ||
384 | } else { | ||
385 | StoreHW(addr, value, res); | ||
386 | } | ||
387 | |||
388 | if (res) | ||
389 | goto fault; | ||
390 | break; | ||
391 | |||
392 | case sw_op: | ||
393 | if (!access_ok(addr, 4)) | ||
394 | goto sigbus; | ||
395 | |||
396 | compute_return_epc(regs); | ||
397 | value = regs->regs[insn.i_format.rt]; | ||
398 | |||
399 | if (IS_ENABLED(CONFIG_EVA)) { | ||
400 | if (uaccess_kernel()) | ||
401 | StoreW(addr, value, res); | ||
402 | else | ||
403 | StoreWE(addr, value, res); | ||
404 | } else { | ||
405 | StoreW(addr, value, res); | ||
406 | } | ||
407 | |||
408 | if (res) | ||
409 | goto fault; | ||
410 | break; | ||
411 | |||
412 | case sd_op: | ||
413 | #ifdef CONFIG_64BIT | ||
414 | /* | ||
415 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
416 | * if we're on a 32-bit processor and an i-cache incoherency | ||
417 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
418 | * would blow up, so for now we don't handle unaligned 64-bit | ||
419 | * instructions on 32-bit kernels. | ||
420 | */ | ||
421 | if (!access_ok(addr, 8)) | ||
422 | goto sigbus; | ||
423 | |||
424 | compute_return_epc(regs); | ||
425 | value = regs->regs[insn.i_format.rt]; | ||
426 | StoreDW(addr, value, res); | ||
427 | if (res) | ||
428 | goto fault; | ||
429 | break; | ||
430 | #endif /* CONFIG_64BIT */ | ||
431 | |||
432 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
433 | goto sigill; | ||
434 | |||
435 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
436 | |||
437 | case lwc1_op: | ||
438 | case ldc1_op: | ||
439 | case swc1_op: | ||
440 | case sdc1_op: | ||
441 | case cop1x_op: { | ||
442 | void __user *fault_addr = NULL; | ||
443 | |||
444 | die_if_kernel("Unaligned FP access in kernel code", regs); | ||
445 | BUG_ON(!used_math()); | ||
446 | |||
447 | res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | ||
448 | &fault_addr); | ||
449 | own_fpu(1); /* Restore FPU state. */ | ||
450 | |||
451 | /* Signal if something went wrong. */ | ||
452 | process_fpemu_return(res, fault_addr, 0); | ||
453 | |||
454 | if (res == 0) | ||
455 | break; | ||
456 | return; | ||
457 | } | ||
458 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
459 | |||
460 | #ifdef CONFIG_CPU_HAS_MSA | ||
461 | |||
462 | case msa_op: { | ||
463 | unsigned int wd, preempted; | ||
464 | enum msa_2b_fmt df; | ||
465 | union fpureg *fpr; | ||
466 | |||
467 | if (!cpu_has_msa) | ||
468 | goto sigill; | ||
469 | |||
470 | /* | ||
471 | * If we've reached this point then userland should have taken | ||
472 | * the MSA disabled exception & initialised vector context at | ||
473 | * some point in the past. | ||
474 | */ | ||
475 | BUG_ON(!thread_msa_context_live()); | ||
476 | |||
477 | df = insn.msa_mi10_format.df; | ||
478 | wd = insn.msa_mi10_format.wd; | ||
479 | fpr = ¤t->thread.fpu.fpr[wd]; | ||
480 | |||
481 | switch (insn.msa_mi10_format.func) { | ||
482 | case msa_ld_op: | ||
483 | if (!access_ok(addr, sizeof(*fpr))) | ||
484 | goto sigbus; | ||
485 | |||
486 | do { | ||
487 | /* | ||
488 | * If we have live MSA context keep track of | ||
489 | * whether we get preempted in order to avoid | ||
490 | * the register context we load being clobbered | ||
491 | * by the live context as it's saved during | ||
492 | * preemption. If we don't have live context | ||
493 | * then it can't be saved to clobber the value | ||
494 | * we load. | ||
495 | */ | ||
496 | preempted = test_thread_flag(TIF_USEDMSA); | ||
497 | |||
498 | res = __copy_from_user_inatomic(fpr, addr, | ||
499 | sizeof(*fpr)); | ||
500 | if (res) | ||
501 | goto fault; | ||
502 | |||
503 | /* | ||
504 | * Update the hardware register if it is in use | ||
505 | * by the task in this quantum, in order to | ||
506 | * avoid having to save & restore the whole | ||
507 | * vector context. | ||
508 | */ | ||
509 | preempt_disable(); | ||
510 | if (test_thread_flag(TIF_USEDMSA)) { | ||
511 | write_msa_wr(wd, fpr, df); | ||
512 | preempted = 0; | ||
513 | } | ||
514 | preempt_enable(); | ||
515 | } while (preempted); | ||
516 | break; | ||
517 | |||
518 | case msa_st_op: | ||
519 | if (!access_ok(addr, sizeof(*fpr))) | ||
520 | goto sigbus; | ||
521 | |||
522 | /* | ||
523 | * Update from the hardware register if it is in use by | ||
524 | * the task in this quantum, in order to avoid having to | ||
525 | * save & restore the whole vector context. | ||
526 | */ | ||
527 | preempt_disable(); | ||
528 | if (test_thread_flag(TIF_USEDMSA)) | ||
529 | read_msa_wr(wd, fpr, df); | ||
530 | preempt_enable(); | ||
531 | |||
532 | res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr)); | ||
533 | if (res) | ||
534 | goto fault; | ||
535 | break; | ||
536 | |||
537 | default: | ||
538 | goto sigbus; | ||
539 | } | ||
540 | |||
541 | compute_return_epc(regs); | ||
542 | break; | ||
543 | } | ||
544 | #endif /* CONFIG_CPU_HAS_MSA */ | ||
545 | |||
546 | #ifndef CONFIG_CPU_MIPSR6 | ||
547 | /* | ||
548 | * COP2 is available to implementor for application specific use. | ||
549 | * It's up to applications to register a notifier chain and do | ||
550 | * whatever they have to do, including possible sending of signals. | ||
551 | * | ||
552 | * This instruction has been reallocated in Release 6 | ||
553 | */ | ||
554 | case lwc2_op: | ||
555 | cu2_notifier_call_chain(CU2_LWC2_OP, regs); | ||
556 | break; | ||
557 | |||
558 | case ldc2_op: | ||
559 | cu2_notifier_call_chain(CU2_LDC2_OP, regs); | ||
560 | break; | ||
561 | |||
562 | case swc2_op: | ||
563 | cu2_notifier_call_chain(CU2_SWC2_OP, regs); | ||
564 | break; | ||
565 | |||
566 | case sdc2_op: | ||
567 | cu2_notifier_call_chain(CU2_SDC2_OP, regs); | ||
568 | break; | ||
569 | #endif | ||
570 | default: | ||
571 | /* | ||
572 | * Pheeee... We encountered an yet unknown instruction or | ||
573 | * cache coherence problem. Die sucker, die ... | ||
574 | */ | ||
575 | goto sigill; | ||
576 | } | ||
577 | |||
578 | #ifdef CONFIG_DEBUG_FS | ||
579 | unaligned_instructions++; | ||
580 | #endif | ||
581 | |||
582 | return; | ||
583 | |||
584 | fault: | ||
585 | /* roll back jump/branch */ | ||
586 | regs->cp0_epc = origpc; | ||
587 | regs->regs[31] = orig31; | ||
588 | /* Did we have an exception handler installed? */ | ||
589 | if (fixup_exception(regs)) | ||
590 | return; | ||
591 | |||
592 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
593 | force_sig(SIGSEGV); | ||
594 | |||
595 | return; | ||
596 | |||
597 | sigbus: | ||
598 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
599 | force_sig(SIGBUS); | ||
600 | |||
601 | return; | ||
602 | |||
603 | sigill: | ||
604 | die_if_kernel | ||
605 | ("Unhandled kernel unaligned access or invalid instruction", regs); | ||
606 | force_sig(SIGILL); | ||
607 | } | ||
608 | |||
609 | /* Recode table from 16-bit register notation to 32-bit GPR. */ | ||
610 | const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 }; | ||
611 | |||
612 | /* Recode table from 16-bit STORE register notation to 32-bit GPR. */ | ||
613 | static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 }; | ||
614 | |||
615 | static void emulate_load_store_microMIPS(struct pt_regs *regs, | ||
616 | void __user *addr) | ||
617 | { | ||
618 | unsigned long value; | ||
619 | unsigned int res; | ||
620 | int i; | ||
621 | unsigned int reg = 0, rvar; | ||
622 | unsigned long orig31; | ||
623 | u16 __user *pc16; | ||
624 | u16 halfword; | ||
625 | unsigned int word; | ||
626 | unsigned long origpc, contpc; | ||
627 | union mips_instruction insn; | ||
628 | struct mm_decoded_insn mminsn; | ||
629 | |||
630 | origpc = regs->cp0_epc; | ||
631 | orig31 = regs->regs[31]; | ||
632 | |||
633 | mminsn.micro_mips_mode = 1; | ||
634 | |||
635 | /* | ||
636 | * This load never faults. | ||
637 | */ | ||
638 | pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc); | ||
639 | __get_user(halfword, pc16); | ||
640 | pc16++; | ||
641 | contpc = regs->cp0_epc + 2; | ||
642 | word = ((unsigned int)halfword << 16); | ||
643 | mminsn.pc_inc = 2; | ||
644 | |||
645 | if (!mm_insn_16bit(halfword)) { | ||
646 | __get_user(halfword, pc16); | ||
647 | pc16++; | ||
648 | contpc = regs->cp0_epc + 4; | ||
649 | mminsn.pc_inc = 4; | ||
650 | word |= halfword; | ||
651 | } | ||
652 | mminsn.insn = word; | ||
653 | |||
654 | if (get_user(halfword, pc16)) | ||
655 | goto fault; | ||
656 | mminsn.next_pc_inc = 2; | ||
657 | word = ((unsigned int)halfword << 16); | ||
658 | |||
659 | if (!mm_insn_16bit(halfword)) { | ||
660 | pc16++; | ||
661 | if (get_user(halfword, pc16)) | ||
662 | goto fault; | ||
663 | mminsn.next_pc_inc = 4; | ||
664 | word |= halfword; | ||
665 | } | ||
666 | mminsn.next_insn = word; | ||
667 | |||
668 | insn = (union mips_instruction)(mminsn.insn); | ||
669 | if (mm_isBranchInstr(regs, mminsn, &contpc)) | ||
670 | insn = (union mips_instruction)(mminsn.next_insn); | ||
671 | |||
672 | /* Parse instruction to find what to do */ | ||
673 | |||
674 | switch (insn.mm_i_format.opcode) { | ||
675 | |||
676 | case mm_pool32a_op: | ||
677 | switch (insn.mm_x_format.func) { | ||
678 | case mm_lwxs_op: | ||
679 | reg = insn.mm_x_format.rd; | ||
680 | goto loadW; | ||
681 | } | ||
682 | |||
683 | goto sigbus; | ||
684 | |||
685 | case mm_pool32b_op: | ||
686 | switch (insn.mm_m_format.func) { | ||
687 | case mm_lwp_func: | ||
688 | reg = insn.mm_m_format.rd; | ||
689 | if (reg == 31) | ||
690 | goto sigbus; | ||
691 | |||
692 | if (!access_ok(addr, 8)) | ||
693 | goto sigbus; | ||
694 | |||
695 | LoadW(addr, value, res); | ||
696 | if (res) | ||
697 | goto fault; | ||
698 | regs->regs[reg] = value; | ||
699 | addr += 4; | ||
700 | LoadW(addr, value, res); | ||
701 | if (res) | ||
702 | goto fault; | ||
703 | regs->regs[reg + 1] = value; | ||
704 | goto success; | ||
705 | |||
706 | case mm_swp_func: | ||
707 | reg = insn.mm_m_format.rd; | ||
708 | if (reg == 31) | ||
709 | goto sigbus; | ||
710 | |||
711 | if (!access_ok(addr, 8)) | ||
712 | goto sigbus; | ||
713 | |||
714 | value = regs->regs[reg]; | ||
715 | StoreW(addr, value, res); | ||
716 | if (res) | ||
717 | goto fault; | ||
718 | addr += 4; | ||
719 | value = regs->regs[reg + 1]; | ||
720 | StoreW(addr, value, res); | ||
721 | if (res) | ||
722 | goto fault; | ||
723 | goto success; | ||
724 | |||
725 | case mm_ldp_func: | ||
726 | #ifdef CONFIG_64BIT | ||
727 | reg = insn.mm_m_format.rd; | ||
728 | if (reg == 31) | ||
729 | goto sigbus; | ||
730 | |||
731 | if (!access_ok(addr, 16)) | ||
732 | goto sigbus; | ||
733 | |||
734 | LoadDW(addr, value, res); | ||
735 | if (res) | ||
736 | goto fault; | ||
737 | regs->regs[reg] = value; | ||
738 | addr += 8; | ||
739 | LoadDW(addr, value, res); | ||
740 | if (res) | ||
741 | goto fault; | ||
742 | regs->regs[reg + 1] = value; | ||
743 | goto success; | ||
744 | #endif /* CONFIG_64BIT */ | ||
745 | |||
746 | goto sigill; | ||
747 | |||
748 | case mm_sdp_func: | ||
749 | #ifdef CONFIG_64BIT | ||
750 | reg = insn.mm_m_format.rd; | ||
751 | if (reg == 31) | ||
752 | goto sigbus; | ||
753 | |||
754 | if (!access_ok(addr, 16)) | ||
755 | goto sigbus; | ||
756 | |||
757 | value = regs->regs[reg]; | ||
758 | StoreDW(addr, value, res); | ||
759 | if (res) | ||
760 | goto fault; | ||
761 | addr += 8; | ||
762 | value = regs->regs[reg + 1]; | ||
763 | StoreDW(addr, value, res); | ||
764 | if (res) | ||
765 | goto fault; | ||
766 | goto success; | ||
767 | #endif /* CONFIG_64BIT */ | ||
768 | |||
769 | goto sigill; | ||
770 | |||
771 | case mm_lwm32_func: | ||
772 | reg = insn.mm_m_format.rd; | ||
773 | rvar = reg & 0xf; | ||
774 | if ((rvar > 9) || !reg) | ||
775 | goto sigill; | ||
776 | if (reg & 0x10) { | ||
777 | if (!access_ok(addr, 4 * (rvar + 1))) | ||
778 | goto sigbus; | ||
779 | } else { | ||
780 | if (!access_ok(addr, 4 * rvar)) | ||
781 | goto sigbus; | ||
782 | } | ||
783 | if (rvar == 9) | ||
784 | rvar = 8; | ||
785 | for (i = 16; rvar; rvar--, i++) { | ||
786 | LoadW(addr, value, res); | ||
787 | if (res) | ||
788 | goto fault; | ||
789 | addr += 4; | ||
790 | regs->regs[i] = value; | ||
791 | } | ||
792 | if ((reg & 0xf) == 9) { | ||
793 | LoadW(addr, value, res); | ||
794 | if (res) | ||
795 | goto fault; | ||
796 | addr += 4; | ||
797 | regs->regs[30] = value; | ||
798 | } | ||
799 | if (reg & 0x10) { | ||
800 | LoadW(addr, value, res); | ||
801 | if (res) | ||
802 | goto fault; | ||
803 | regs->regs[31] = value; | ||
804 | } | ||
805 | goto success; | ||
806 | |||
807 | case mm_swm32_func: | ||
808 | reg = insn.mm_m_format.rd; | ||
809 | rvar = reg & 0xf; | ||
810 | if ((rvar > 9) || !reg) | ||
811 | goto sigill; | ||
812 | if (reg & 0x10) { | ||
813 | if (!access_ok(addr, 4 * (rvar + 1))) | ||
814 | goto sigbus; | ||
815 | } else { | ||
816 | if (!access_ok(addr, 4 * rvar)) | ||
817 | goto sigbus; | ||
818 | } | ||
819 | if (rvar == 9) | ||
820 | rvar = 8; | ||
821 | for (i = 16; rvar; rvar--, i++) { | ||
822 | value = regs->regs[i]; | ||
823 | StoreW(addr, value, res); | ||
824 | if (res) | ||
825 | goto fault; | ||
826 | addr += 4; | ||
827 | } | ||
828 | if ((reg & 0xf) == 9) { | ||
829 | value = regs->regs[30]; | ||
830 | StoreW(addr, value, res); | ||
831 | if (res) | ||
832 | goto fault; | ||
833 | addr += 4; | ||
834 | } | ||
835 | if (reg & 0x10) { | ||
836 | value = regs->regs[31]; | ||
837 | StoreW(addr, value, res); | ||
838 | if (res) | ||
839 | goto fault; | ||
840 | } | ||
841 | goto success; | ||
842 | |||
843 | case mm_ldm_func: | ||
844 | #ifdef CONFIG_64BIT | ||
845 | reg = insn.mm_m_format.rd; | ||
846 | rvar = reg & 0xf; | ||
847 | if ((rvar > 9) || !reg) | ||
848 | goto sigill; | ||
849 | if (reg & 0x10) { | ||
850 | if (!access_ok(addr, 8 * (rvar + 1))) | ||
851 | goto sigbus; | ||
852 | } else { | ||
853 | if (!access_ok(addr, 8 * rvar)) | ||
854 | goto sigbus; | ||
855 | } | ||
856 | if (rvar == 9) | ||
857 | rvar = 8; | ||
858 | |||
859 | for (i = 16; rvar; rvar--, i++) { | ||
860 | LoadDW(addr, value, res); | ||
861 | if (res) | ||
862 | goto fault; | ||
863 | addr += 4; | ||
864 | regs->regs[i] = value; | ||
865 | } | ||
866 | if ((reg & 0xf) == 9) { | ||
867 | LoadDW(addr, value, res); | ||
868 | if (res) | ||
869 | goto fault; | ||
870 | addr += 8; | ||
871 | regs->regs[30] = value; | ||
872 | } | ||
873 | if (reg & 0x10) { | ||
874 | LoadDW(addr, value, res); | ||
875 | if (res) | ||
876 | goto fault; | ||
877 | regs->regs[31] = value; | ||
878 | } | ||
879 | goto success; | ||
880 | #endif /* CONFIG_64BIT */ | ||
881 | |||
882 | goto sigill; | ||
883 | |||
884 | case mm_sdm_func: | ||
885 | #ifdef CONFIG_64BIT | ||
886 | reg = insn.mm_m_format.rd; | ||
887 | rvar = reg & 0xf; | ||
888 | if ((rvar > 9) || !reg) | ||
889 | goto sigill; | ||
890 | if (reg & 0x10) { | ||
891 | if (!access_ok(addr, 8 * (rvar + 1))) | ||
892 | goto sigbus; | ||
893 | } else { | ||
894 | if (!access_ok(addr, 8 * rvar)) | ||
895 | goto sigbus; | ||
896 | } | ||
897 | if (rvar == 9) | ||
898 | rvar = 8; | ||
899 | |||
900 | for (i = 16; rvar; rvar--, i++) { | ||
901 | value = regs->regs[i]; | ||
902 | StoreDW(addr, value, res); | ||
903 | if (res) | ||
904 | goto fault; | ||
905 | addr += 8; | ||
906 | } | ||
907 | if ((reg & 0xf) == 9) { | ||
908 | value = regs->regs[30]; | ||
909 | StoreDW(addr, value, res); | ||
910 | if (res) | ||
911 | goto fault; | ||
912 | addr += 8; | ||
913 | } | ||
914 | if (reg & 0x10) { | ||
915 | value = regs->regs[31]; | ||
916 | StoreDW(addr, value, res); | ||
917 | if (res) | ||
918 | goto fault; | ||
919 | } | ||
920 | goto success; | ||
921 | #endif /* CONFIG_64BIT */ | ||
922 | |||
923 | goto sigill; | ||
924 | |||
925 | /* LWC2, SWC2, LDC2, SDC2 are not serviced */ | ||
926 | } | ||
927 | |||
928 | goto sigbus; | ||
929 | |||
930 | case mm_pool32c_op: | ||
931 | switch (insn.mm_m_format.func) { | ||
932 | case mm_lwu_func: | ||
933 | reg = insn.mm_m_format.rd; | ||
934 | goto loadWU; | ||
935 | } | ||
936 | |||
937 | /* LL,SC,LLD,SCD are not serviced */ | ||
938 | goto sigbus; | ||
939 | |||
940 | #ifdef CONFIG_MIPS_FP_SUPPORT | ||
941 | case mm_pool32f_op: | ||
942 | switch (insn.mm_x_format.func) { | ||
943 | case mm_lwxc1_func: | ||
944 | case mm_swxc1_func: | ||
945 | case mm_ldxc1_func: | ||
946 | case mm_sdxc1_func: | ||
947 | goto fpu_emul; | ||
948 | } | ||
949 | |||
950 | goto sigbus; | ||
951 | |||
952 | case mm_ldc132_op: | ||
953 | case mm_sdc132_op: | ||
954 | case mm_lwc132_op: | ||
955 | case mm_swc132_op: { | ||
956 | void __user *fault_addr = NULL; | ||
957 | |||
958 | fpu_emul: | ||
959 | /* roll back jump/branch */ | ||
960 | regs->cp0_epc = origpc; | ||
961 | regs->regs[31] = orig31; | ||
962 | |||
963 | die_if_kernel("Unaligned FP access in kernel code", regs); | ||
964 | BUG_ON(!used_math()); | ||
965 | BUG_ON(!is_fpu_owner()); | ||
966 | |||
967 | res = fpu_emulator_cop1Handler(regs, ¤t->thread.fpu, 1, | ||
968 | &fault_addr); | ||
969 | own_fpu(1); /* restore FPU state */ | ||
970 | |||
971 | /* If something went wrong, signal */ | ||
972 | process_fpemu_return(res, fault_addr, 0); | ||
973 | |||
974 | if (res == 0) | ||
975 | goto success; | ||
976 | return; | ||
977 | } | ||
978 | #endif /* CONFIG_MIPS_FP_SUPPORT */ | ||
979 | |||
980 | case mm_lh32_op: | ||
981 | reg = insn.mm_i_format.rt; | ||
982 | goto loadHW; | ||
983 | |||
984 | case mm_lhu32_op: | ||
985 | reg = insn.mm_i_format.rt; | ||
986 | goto loadHWU; | ||
987 | |||
988 | case mm_lw32_op: | ||
989 | reg = insn.mm_i_format.rt; | ||
990 | goto loadW; | ||
991 | |||
992 | case mm_sh32_op: | ||
993 | reg = insn.mm_i_format.rt; | ||
994 | goto storeHW; | ||
995 | |||
996 | case mm_sw32_op: | ||
997 | reg = insn.mm_i_format.rt; | ||
998 | goto storeW; | ||
999 | |||
1000 | case mm_ld32_op: | ||
1001 | reg = insn.mm_i_format.rt; | ||
1002 | goto loadDW; | ||
1003 | |||
1004 | case mm_sd32_op: | ||
1005 | reg = insn.mm_i_format.rt; | ||
1006 | goto storeDW; | ||
1007 | |||
1008 | case mm_pool16c_op: | ||
1009 | switch (insn.mm16_m_format.func) { | ||
1010 | case mm_lwm16_op: | ||
1011 | reg = insn.mm16_m_format.rlist; | ||
1012 | rvar = reg + 1; | ||
1013 | if (!access_ok(addr, 4 * rvar)) | ||
1014 | goto sigbus; | ||
1015 | |||
1016 | for (i = 16; rvar; rvar--, i++) { | ||
1017 | LoadW(addr, value, res); | ||
1018 | if (res) | ||
1019 | goto fault; | ||
1020 | addr += 4; | ||
1021 | regs->regs[i] = value; | ||
1022 | } | ||
1023 | LoadW(addr, value, res); | ||
1024 | if (res) | ||
1025 | goto fault; | ||
1026 | regs->regs[31] = value; | ||
1027 | |||
1028 | goto success; | ||
1029 | |||
1030 | case mm_swm16_op: | ||
1031 | reg = insn.mm16_m_format.rlist; | ||
1032 | rvar = reg + 1; | ||
1033 | if (!access_ok(addr, 4 * rvar)) | ||
1034 | goto sigbus; | ||
1035 | |||
1036 | for (i = 16; rvar; rvar--, i++) { | ||
1037 | value = regs->regs[i]; | ||
1038 | StoreW(addr, value, res); | ||
1039 | if (res) | ||
1040 | goto fault; | ||
1041 | addr += 4; | ||
1042 | } | ||
1043 | value = regs->regs[31]; | ||
1044 | StoreW(addr, value, res); | ||
1045 | if (res) | ||
1046 | goto fault; | ||
1047 | |||
1048 | goto success; | ||
1049 | |||
1050 | } | ||
1051 | |||
1052 | goto sigbus; | ||
1053 | |||
1054 | case mm_lhu16_op: | ||
1055 | reg = reg16to32[insn.mm16_rb_format.rt]; | ||
1056 | goto loadHWU; | ||
1057 | |||
1058 | case mm_lw16_op: | ||
1059 | reg = reg16to32[insn.mm16_rb_format.rt]; | ||
1060 | goto loadW; | ||
1061 | |||
1062 | case mm_sh16_op: | ||
1063 | reg = reg16to32st[insn.mm16_rb_format.rt]; | ||
1064 | goto storeHW; | ||
1065 | |||
1066 | case mm_sw16_op: | ||
1067 | reg = reg16to32st[insn.mm16_rb_format.rt]; | ||
1068 | goto storeW; | ||
1069 | |||
1070 | case mm_lwsp16_op: | ||
1071 | reg = insn.mm16_r5_format.rt; | ||
1072 | goto loadW; | ||
1073 | |||
1074 | case mm_swsp16_op: | ||
1075 | reg = insn.mm16_r5_format.rt; | ||
1076 | goto storeW; | ||
1077 | |||
1078 | case mm_lwgp16_op: | ||
1079 | reg = reg16to32[insn.mm16_r3_format.rt]; | ||
1080 | goto loadW; | ||
1081 | |||
1082 | default: | ||
1083 | goto sigill; | ||
1084 | } | ||
1085 | |||
1086 | loadHW: | ||
1087 | if (!access_ok(addr, 2)) | ||
1088 | goto sigbus; | ||
1089 | |||
1090 | LoadHW(addr, value, res); | ||
1091 | if (res) | ||
1092 | goto fault; | ||
1093 | regs->regs[reg] = value; | ||
1094 | goto success; | ||
1095 | |||
1096 | loadHWU: | ||
1097 | if (!access_ok(addr, 2)) | ||
1098 | goto sigbus; | ||
1099 | |||
1100 | LoadHWU(addr, value, res); | ||
1101 | if (res) | ||
1102 | goto fault; | ||
1103 | regs->regs[reg] = value; | ||
1104 | goto success; | ||
1105 | |||
1106 | loadW: | ||
1107 | if (!access_ok(addr, 4)) | ||
1108 | goto sigbus; | ||
1109 | |||
1110 | LoadW(addr, value, res); | ||
1111 | if (res) | ||
1112 | goto fault; | ||
1113 | regs->regs[reg] = value; | ||
1114 | goto success; | ||
1115 | |||
1116 | loadWU: | ||
1117 | #ifdef CONFIG_64BIT | ||
1118 | /* | ||
1119 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1120 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1121 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1122 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1123 | * instructions on 32-bit kernels. | ||
1124 | */ | ||
1125 | if (!access_ok(addr, 4)) | ||
1126 | goto sigbus; | ||
1127 | |||
1128 | LoadWU(addr, value, res); | ||
1129 | if (res) | ||
1130 | goto fault; | ||
1131 | regs->regs[reg] = value; | ||
1132 | goto success; | ||
1133 | #endif /* CONFIG_64BIT */ | ||
1134 | |||
1135 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1136 | goto sigill; | ||
1137 | |||
1138 | loadDW: | ||
1139 | #ifdef CONFIG_64BIT | ||
1140 | /* | ||
1141 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1142 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1143 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1144 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1145 | * instructions on 32-bit kernels. | ||
1146 | */ | ||
1147 | if (!access_ok(addr, 8)) | ||
1148 | goto sigbus; | ||
1149 | |||
1150 | LoadDW(addr, value, res); | ||
1151 | if (res) | ||
1152 | goto fault; | ||
1153 | regs->regs[reg] = value; | ||
1154 | goto success; | ||
1155 | #endif /* CONFIG_64BIT */ | ||
1156 | |||
1157 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1158 | goto sigill; | ||
1159 | |||
1160 | storeHW: | ||
1161 | if (!access_ok(addr, 2)) | ||
1162 | goto sigbus; | ||
1163 | |||
1164 | value = regs->regs[reg]; | ||
1165 | StoreHW(addr, value, res); | ||
1166 | if (res) | ||
1167 | goto fault; | ||
1168 | goto success; | ||
1169 | |||
1170 | storeW: | ||
1171 | if (!access_ok(addr, 4)) | ||
1172 | goto sigbus; | ||
1173 | |||
1174 | value = regs->regs[reg]; | ||
1175 | StoreW(addr, value, res); | ||
1176 | if (res) | ||
1177 | goto fault; | ||
1178 | goto success; | ||
1179 | |||
1180 | storeDW: | ||
1181 | #ifdef CONFIG_64BIT | ||
1182 | /* | ||
1183 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1184 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1185 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1186 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1187 | * instructions on 32-bit kernels. | ||
1188 | */ | ||
1189 | if (!access_ok(addr, 8)) | ||
1190 | goto sigbus; | ||
1191 | |||
1192 | value = regs->regs[reg]; | ||
1193 | StoreDW(addr, value, res); | ||
1194 | if (res) | ||
1195 | goto fault; | ||
1196 | goto success; | ||
1197 | #endif /* CONFIG_64BIT */ | ||
1198 | |||
1199 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1200 | goto sigill; | ||
1201 | |||
1202 | success: | ||
1203 | regs->cp0_epc = contpc; /* advance or branch */ | ||
1204 | |||
1205 | #ifdef CONFIG_DEBUG_FS | ||
1206 | unaligned_instructions++; | ||
1207 | #endif | ||
1208 | return; | ||
1209 | |||
1210 | fault: | ||
1211 | /* roll back jump/branch */ | ||
1212 | regs->cp0_epc = origpc; | ||
1213 | regs->regs[31] = orig31; | ||
1214 | /* Did we have an exception handler installed? */ | ||
1215 | if (fixup_exception(regs)) | ||
1216 | return; | ||
1217 | |||
1218 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1219 | force_sig(SIGSEGV); | ||
1220 | |||
1221 | return; | ||
1222 | |||
1223 | sigbus: | ||
1224 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1225 | force_sig(SIGBUS); | ||
1226 | |||
1227 | return; | ||
1228 | |||
1229 | sigill: | ||
1230 | die_if_kernel | ||
1231 | ("Unhandled kernel unaligned access or invalid instruction", regs); | ||
1232 | force_sig(SIGILL); | ||
1233 | } | ||
1234 | |||
1235 | static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr) | ||
1236 | { | ||
1237 | unsigned long value; | ||
1238 | unsigned int res; | ||
1239 | int reg; | ||
1240 | unsigned long orig31; | ||
1241 | u16 __user *pc16; | ||
1242 | unsigned long origpc; | ||
1243 | union mips16e_instruction mips16inst, oldinst; | ||
1244 | unsigned int opcode; | ||
1245 | int extended = 0; | ||
1246 | |||
1247 | origpc = regs->cp0_epc; | ||
1248 | orig31 = regs->regs[31]; | ||
1249 | pc16 = (unsigned short __user *)msk_isa16_mode(origpc); | ||
1250 | /* | ||
1251 | * This load never faults. | ||
1252 | */ | ||
1253 | __get_user(mips16inst.full, pc16); | ||
1254 | oldinst = mips16inst; | ||
1255 | |||
1256 | /* skip EXTEND instruction */ | ||
1257 | if (mips16inst.ri.opcode == MIPS16e_extend_op) { | ||
1258 | extended = 1; | ||
1259 | pc16++; | ||
1260 | __get_user(mips16inst.full, pc16); | ||
1261 | } else if (delay_slot(regs)) { | ||
1262 | /* skip jump instructions */ | ||
1263 | /* JAL/JALX are 32 bits but have OPCODE in first short int */ | ||
1264 | if (mips16inst.ri.opcode == MIPS16e_jal_op) | ||
1265 | pc16++; | ||
1266 | pc16++; | ||
1267 | if (get_user(mips16inst.full, pc16)) | ||
1268 | goto sigbus; | ||
1269 | } | ||
1270 | |||
1271 | opcode = mips16inst.ri.opcode; | ||
1272 | switch (opcode) { | ||
1273 | case MIPS16e_i64_op: /* I64 or RI64 instruction */ | ||
1274 | switch (mips16inst.i64.func) { /* I64/RI64 func field check */ | ||
1275 | case MIPS16e_ldpc_func: | ||
1276 | case MIPS16e_ldsp_func: | ||
1277 | reg = reg16to32[mips16inst.ri64.ry]; | ||
1278 | goto loadDW; | ||
1279 | |||
1280 | case MIPS16e_sdsp_func: | ||
1281 | reg = reg16to32[mips16inst.ri64.ry]; | ||
1282 | goto writeDW; | ||
1283 | |||
1284 | case MIPS16e_sdrasp_func: | ||
1285 | reg = 29; /* GPRSP */ | ||
1286 | goto writeDW; | ||
1287 | } | ||
1288 | |||
1289 | goto sigbus; | ||
1290 | |||
1291 | case MIPS16e_swsp_op: | ||
1292 | reg = reg16to32[mips16inst.ri.rx]; | ||
1293 | if (extended && cpu_has_mips16e2) | ||
1294 | switch (mips16inst.ri.imm >> 5) { | ||
1295 | case 0: /* SWSP */ | ||
1296 | case 1: /* SWGP */ | ||
1297 | break; | ||
1298 | case 2: /* SHGP */ | ||
1299 | opcode = MIPS16e_sh_op; | ||
1300 | break; | ||
1301 | default: | ||
1302 | goto sigbus; | ||
1303 | } | ||
1304 | break; | ||
1305 | |||
1306 | case MIPS16e_lwpc_op: | ||
1307 | reg = reg16to32[mips16inst.ri.rx]; | ||
1308 | break; | ||
1309 | |||
1310 | case MIPS16e_lwsp_op: | ||
1311 | reg = reg16to32[mips16inst.ri.rx]; | ||
1312 | if (extended && cpu_has_mips16e2) | ||
1313 | switch (mips16inst.ri.imm >> 5) { | ||
1314 | case 0: /* LWSP */ | ||
1315 | case 1: /* LWGP */ | ||
1316 | break; | ||
1317 | case 2: /* LHGP */ | ||
1318 | opcode = MIPS16e_lh_op; | ||
1319 | break; | ||
1320 | case 4: /* LHUGP */ | ||
1321 | opcode = MIPS16e_lhu_op; | ||
1322 | break; | ||
1323 | default: | ||
1324 | goto sigbus; | ||
1325 | } | ||
1326 | break; | ||
1327 | |||
1328 | case MIPS16e_i8_op: | ||
1329 | if (mips16inst.i8.func != MIPS16e_swrasp_func) | ||
1330 | goto sigbus; | ||
1331 | reg = 29; /* GPRSP */ | ||
1332 | break; | ||
1333 | |||
1334 | default: | ||
1335 | reg = reg16to32[mips16inst.rri.ry]; | ||
1336 | break; | ||
1337 | } | ||
1338 | |||
1339 | switch (opcode) { | ||
1340 | |||
1341 | case MIPS16e_lb_op: | ||
1342 | case MIPS16e_lbu_op: | ||
1343 | case MIPS16e_sb_op: | ||
1344 | goto sigbus; | ||
1345 | |||
1346 | case MIPS16e_lh_op: | ||
1347 | if (!access_ok(addr, 2)) | ||
1348 | goto sigbus; | ||
1349 | |||
1350 | LoadHW(addr, value, res); | ||
1351 | if (res) | ||
1352 | goto fault; | ||
1353 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1354 | regs->regs[reg] = value; | ||
1355 | break; | ||
1356 | |||
1357 | case MIPS16e_lhu_op: | ||
1358 | if (!access_ok(addr, 2)) | ||
1359 | goto sigbus; | ||
1360 | |||
1361 | LoadHWU(addr, value, res); | ||
1362 | if (res) | ||
1363 | goto fault; | ||
1364 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1365 | regs->regs[reg] = value; | ||
1366 | break; | ||
1367 | |||
1368 | case MIPS16e_lw_op: | ||
1369 | case MIPS16e_lwpc_op: | ||
1370 | case MIPS16e_lwsp_op: | ||
1371 | if (!access_ok(addr, 4)) | ||
1372 | goto sigbus; | ||
1373 | |||
1374 | LoadW(addr, value, res); | ||
1375 | if (res) | ||
1376 | goto fault; | ||
1377 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1378 | regs->regs[reg] = value; | ||
1379 | break; | ||
1380 | |||
1381 | case MIPS16e_lwu_op: | ||
1382 | #ifdef CONFIG_64BIT | ||
1383 | /* | ||
1384 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1385 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1386 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1387 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1388 | * instructions on 32-bit kernels. | ||
1389 | */ | ||
1390 | if (!access_ok(addr, 4)) | ||
1391 | goto sigbus; | ||
1392 | |||
1393 | LoadWU(addr, value, res); | ||
1394 | if (res) | ||
1395 | goto fault; | ||
1396 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1397 | regs->regs[reg] = value; | ||
1398 | break; | ||
1399 | #endif /* CONFIG_64BIT */ | ||
1400 | |||
1401 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1402 | goto sigill; | ||
1403 | |||
1404 | case MIPS16e_ld_op: | ||
1405 | loadDW: | ||
1406 | #ifdef CONFIG_64BIT | ||
1407 | /* | ||
1408 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1409 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1410 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1411 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1412 | * instructions on 32-bit kernels. | ||
1413 | */ | ||
1414 | if (!access_ok(addr, 8)) | ||
1415 | goto sigbus; | ||
1416 | |||
1417 | LoadDW(addr, value, res); | ||
1418 | if (res) | ||
1419 | goto fault; | ||
1420 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1421 | regs->regs[reg] = value; | ||
1422 | break; | ||
1423 | #endif /* CONFIG_64BIT */ | ||
1424 | |||
1425 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1426 | goto sigill; | ||
1427 | |||
1428 | case MIPS16e_sh_op: | ||
1429 | if (!access_ok(addr, 2)) | ||
1430 | goto sigbus; | ||
1431 | |||
1432 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1433 | value = regs->regs[reg]; | ||
1434 | StoreHW(addr, value, res); | ||
1435 | if (res) | ||
1436 | goto fault; | ||
1437 | break; | ||
1438 | |||
1439 | case MIPS16e_sw_op: | ||
1440 | case MIPS16e_swsp_op: | ||
1441 | case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */ | ||
1442 | if (!access_ok(addr, 4)) | ||
1443 | goto sigbus; | ||
1444 | |||
1445 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1446 | value = regs->regs[reg]; | ||
1447 | StoreW(addr, value, res); | ||
1448 | if (res) | ||
1449 | goto fault; | ||
1450 | break; | ||
1451 | |||
1452 | case MIPS16e_sd_op: | ||
1453 | writeDW: | ||
1454 | #ifdef CONFIG_64BIT | ||
1455 | /* | ||
1456 | * A 32-bit kernel might be running on a 64-bit processor. But | ||
1457 | * if we're on a 32-bit processor and an i-cache incoherency | ||
1458 | * or race makes us see a 64-bit instruction here the sdl/sdr | ||
1459 | * would blow up, so for now we don't handle unaligned 64-bit | ||
1460 | * instructions on 32-bit kernels. | ||
1461 | */ | ||
1462 | if (!access_ok(addr, 8)) | ||
1463 | goto sigbus; | ||
1464 | |||
1465 | MIPS16e_compute_return_epc(regs, &oldinst); | ||
1466 | value = regs->regs[reg]; | ||
1467 | StoreDW(addr, value, res); | ||
1468 | if (res) | ||
1469 | goto fault; | ||
1470 | break; | ||
1471 | #endif /* CONFIG_64BIT */ | ||
1472 | |||
1473 | /* Cannot handle 64-bit instructions in 32-bit kernel */ | ||
1474 | goto sigill; | ||
1475 | |||
1476 | default: | ||
1477 | /* | ||
1478 | * Pheeee... We encountered an yet unknown instruction or | ||
1479 | * cache coherence problem. Die sucker, die ... | ||
1480 | */ | ||
1481 | goto sigill; | ||
1482 | } | ||
1483 | |||
1484 | #ifdef CONFIG_DEBUG_FS | ||
1485 | unaligned_instructions++; | ||
1486 | #endif | ||
1487 | |||
1488 | return; | ||
1489 | |||
1490 | fault: | ||
1491 | /* roll back jump/branch */ | ||
1492 | regs->cp0_epc = origpc; | ||
1493 | regs->regs[31] = orig31; | ||
1494 | /* Did we have an exception handler installed? */ | ||
1495 | if (fixup_exception(regs)) | ||
1496 | return; | ||
1497 | |||
1498 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1499 | force_sig(SIGSEGV); | ||
1500 | |||
1501 | return; | ||
1502 | |||
1503 | sigbus: | ||
1504 | die_if_kernel("Unhandled kernel unaligned access", regs); | ||
1505 | force_sig(SIGBUS); | ||
1506 | |||
1507 | return; | ||
1508 | |||
1509 | sigill: | ||
1510 | die_if_kernel | ||
1511 | ("Unhandled kernel unaligned access or invalid instruction", regs); | ||
1512 | force_sig(SIGILL); | ||
1513 | } | ||
1514 | |||
1515 | asmlinkage void do_ade(struct pt_regs *regs) | ||
1516 | { | ||
1517 | enum ctx_state prev_state; | ||
1518 | unsigned int __user *pc; | ||
1519 | mm_segment_t seg; | ||
1520 | |||
1521 | prev_state = exception_enter(); | ||
1522 | perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, | ||
1523 | 1, regs, regs->cp0_badvaddr); | ||
1524 | /* | ||
1525 | * Did we catch a fault trying to load an instruction? | ||
1526 | */ | ||
1527 | if (regs->cp0_badvaddr == regs->cp0_epc) | ||
1528 | goto sigbus; | ||
1529 | |||
1530 | if (user_mode(regs) && !test_thread_flag(TIF_FIXADE)) | ||
1531 | goto sigbus; | ||
1532 | if (unaligned_action == UNALIGNED_ACTION_SIGNAL) | ||
1533 | goto sigbus; | ||
1534 | |||
1535 | /* | ||
1536 | * Do branch emulation only if we didn't forward the exception. | ||
1537 | * This is all so but ugly ... | ||
1538 | */ | ||
1539 | |||
1540 | /* | ||
1541 | * Are we running in microMIPS mode? | ||
1542 | */ | ||
1543 | if (get_isa16_mode(regs->cp0_epc)) { | ||
1544 | /* | ||
1545 | * Did we catch a fault trying to load an instruction in | ||
1546 | * 16-bit mode? | ||
1547 | */ | ||
1548 | if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc)) | ||
1549 | goto sigbus; | ||
1550 | if (unaligned_action == UNALIGNED_ACTION_SHOW) | ||
1551 | show_registers(regs); | ||
1552 | |||
1553 | if (cpu_has_mmips) { | ||
1554 | seg = get_fs(); | ||
1555 | if (!user_mode(regs)) | ||
1556 | set_fs(KERNEL_DS); | ||
1557 | emulate_load_store_microMIPS(regs, | ||
1558 | (void __user *)regs->cp0_badvaddr); | ||
1559 | set_fs(seg); | ||
1560 | |||
1561 | return; | ||
1562 | } | ||
1563 | |||
1564 | if (cpu_has_mips16) { | ||
1565 | seg = get_fs(); | ||
1566 | if (!user_mode(regs)) | ||
1567 | set_fs(KERNEL_DS); | ||
1568 | emulate_load_store_MIPS16e(regs, | ||
1569 | (void __user *)regs->cp0_badvaddr); | ||
1570 | set_fs(seg); | ||
1571 | |||
1572 | return; | ||
1573 | } | ||
1574 | |||
1575 | goto sigbus; | ||
1576 | } | ||
1577 | |||
1578 | if (unaligned_action == UNALIGNED_ACTION_SHOW) | ||
1579 | show_registers(regs); | ||
1580 | pc = (unsigned int __user *)exception_epc(regs); | ||
1581 | |||
1582 | seg = get_fs(); | ||
1583 | if (!user_mode(regs)) | ||
1584 | set_fs(KERNEL_DS); | ||
1585 | emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc); | ||
1586 | set_fs(seg); | ||
1587 | |||
1588 | return; | ||
1589 | |||
1590 | sigbus: | ||
1591 | die_if_kernel("Kernel unaligned instruction access", regs); | ||
1592 | force_sig(SIGBUS); | ||
1593 | |||
1594 | /* | ||
1595 | * XXX On return from the signal handler we should advance the epc | ||
1596 | */ | ||
1597 | exception_exit(prev_state); | ||
1598 | } | ||
1599 | |||
1600 | #ifdef CONFIG_DEBUG_FS | ||
1601 | static int __init debugfs_unaligned(void) | ||
1602 | { | ||
1603 | debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir, | ||
1604 | &unaligned_instructions); | ||
1605 | debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR, | ||
1606 | mips_debugfs_dir, &unaligned_action); | ||
1607 | return 0; | ||
1608 | } | ||
1609 | arch_initcall(debugfs_unaligned); | ||
1610 | #endif | ||
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c new file mode 100644 index 000000000..6dbe4eab0 --- /dev/null +++ b/arch/mips/kernel/uprobes.c | |||
@@ -0,0 +1,262 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/highmem.h> | ||
3 | #include <linux/kdebug.h> | ||
4 | #include <linux/types.h> | ||
5 | #include <linux/notifier.h> | ||
6 | #include <linux/sched.h> | ||
7 | #include <linux/uprobes.h> | ||
8 | |||
9 | #include <asm/branch.h> | ||
10 | #include <asm/cpu-features.h> | ||
11 | #include <asm/ptrace.h> | ||
12 | |||
13 | #include "probes-common.h" | ||
14 | |||
15 | static inline int insn_has_delay_slot(const union mips_instruction insn) | ||
16 | { | ||
17 | return __insn_has_delay_slot(insn); | ||
18 | } | ||
19 | |||
20 | /** | ||
21 | * arch_uprobe_analyze_insn - instruction analysis including validity and fixups. | ||
22 | * @mm: the probed address space. | ||
23 | * @arch_uprobe: the probepoint information. | ||
24 | * @addr: virtual address at which to install the probepoint | ||
25 | * Return 0 on success or a -ve number on error. | ||
26 | */ | ||
27 | int arch_uprobe_analyze_insn(struct arch_uprobe *aup, | ||
28 | struct mm_struct *mm, unsigned long addr) | ||
29 | { | ||
30 | union mips_instruction inst; | ||
31 | |||
32 | /* | ||
33 | * For the time being this also blocks attempts to use uprobes with | ||
34 | * MIPS16 and microMIPS. | ||
35 | */ | ||
36 | if (addr & 0x03) | ||
37 | return -EINVAL; | ||
38 | |||
39 | inst.word = aup->insn[0]; | ||
40 | |||
41 | if (__insn_is_compact_branch(inst)) { | ||
42 | pr_notice("Uprobes for compact branches are not supported\n"); | ||
43 | return -EINVAL; | ||
44 | } | ||
45 | |||
46 | aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)]; | ||
47 | aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */ | ||
48 | |||
49 | return 0; | ||
50 | } | ||
51 | |||
52 | /** | ||
53 | * is_trap_insn - check if the instruction is a trap variant | ||
54 | * @insn: instruction to be checked. | ||
55 | * Returns true if @insn is a trap variant. | ||
56 | * | ||
57 | * This definition overrides the weak definition in kernel/events/uprobes.c. | ||
58 | * and is needed for the case where an architecture has multiple trap | ||
59 | * instructions (like PowerPC or MIPS). We treat BREAK just like the more | ||
60 | * modern conditional trap instructions. | ||
61 | */ | ||
62 | bool is_trap_insn(uprobe_opcode_t *insn) | ||
63 | { | ||
64 | union mips_instruction inst; | ||
65 | |||
66 | inst.word = *insn; | ||
67 | |||
68 | switch (inst.i_format.opcode) { | ||
69 | case spec_op: | ||
70 | switch (inst.r_format.func) { | ||
71 | case break_op: | ||
72 | case teq_op: | ||
73 | case tge_op: | ||
74 | case tgeu_op: | ||
75 | case tlt_op: | ||
76 | case tltu_op: | ||
77 | case tne_op: | ||
78 | return 1; | ||
79 | } | ||
80 | break; | ||
81 | |||
82 | case bcond_op: /* Yes, really ... */ | ||
83 | switch (inst.u_format.rt) { | ||
84 | case teqi_op: | ||
85 | case tgei_op: | ||
86 | case tgeiu_op: | ||
87 | case tlti_op: | ||
88 | case tltiu_op: | ||
89 | case tnei_op: | ||
90 | return 1; | ||
91 | } | ||
92 | break; | ||
93 | } | ||
94 | |||
95 | return 0; | ||
96 | } | ||
97 | |||
98 | #define UPROBE_TRAP_NR ULONG_MAX | ||
99 | |||
100 | /* | ||
101 | * arch_uprobe_pre_xol - prepare to execute out of line. | ||
102 | * @auprobe: the probepoint information. | ||
103 | * @regs: reflects the saved user state of current task. | ||
104 | */ | ||
105 | int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs) | ||
106 | { | ||
107 | struct uprobe_task *utask = current->utask; | ||
108 | |||
109 | /* | ||
110 | * Now find the EPC where to resume after the breakpoint has been | ||
111 | * dealt with. This may require emulation of a branch. | ||
112 | */ | ||
113 | aup->resume_epc = regs->cp0_epc + 4; | ||
114 | if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) { | ||
115 | __compute_return_epc_for_insn(regs, | ||
116 | (union mips_instruction) aup->insn[0]); | ||
117 | aup->resume_epc = regs->cp0_epc; | ||
118 | } | ||
119 | utask->autask.saved_trap_nr = current->thread.trap_nr; | ||
120 | current->thread.trap_nr = UPROBE_TRAP_NR; | ||
121 | regs->cp0_epc = current->utask->xol_vaddr; | ||
122 | |||
123 | return 0; | ||
124 | } | ||
125 | |||
126 | int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs) | ||
127 | { | ||
128 | struct uprobe_task *utask = current->utask; | ||
129 | |||
130 | current->thread.trap_nr = utask->autask.saved_trap_nr; | ||
131 | regs->cp0_epc = aup->resume_epc; | ||
132 | |||
133 | return 0; | ||
134 | } | ||
135 | |||
136 | /* | ||
137 | * If xol insn itself traps and generates a signal(Say, | ||
138 | * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped | ||
139 | * instruction jumps back to its own address. It is assumed that anything | ||
140 | * like do_page_fault/do_trap/etc sets thread.trap_nr != -1. | ||
141 | * | ||
142 | * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr, | ||
143 | * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to | ||
144 | * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol(). | ||
145 | */ | ||
146 | bool arch_uprobe_xol_was_trapped(struct task_struct *tsk) | ||
147 | { | ||
148 | if (tsk->thread.trap_nr != UPROBE_TRAP_NR) | ||
149 | return true; | ||
150 | |||
151 | return false; | ||
152 | } | ||
153 | |||
154 | int arch_uprobe_exception_notify(struct notifier_block *self, | ||
155 | unsigned long val, void *data) | ||
156 | { | ||
157 | struct die_args *args = data; | ||
158 | struct pt_regs *regs = args->regs; | ||
159 | |||
160 | /* regs == NULL is a kernel bug */ | ||
161 | if (WARN_ON(!regs)) | ||
162 | return NOTIFY_DONE; | ||
163 | |||
164 | /* We are only interested in userspace traps */ | ||
165 | if (!user_mode(regs)) | ||
166 | return NOTIFY_DONE; | ||
167 | |||
168 | switch (val) { | ||
169 | case DIE_UPROBE: | ||
170 | if (uprobe_pre_sstep_notifier(regs)) | ||
171 | return NOTIFY_STOP; | ||
172 | break; | ||
173 | case DIE_UPROBE_XOL: | ||
174 | if (uprobe_post_sstep_notifier(regs)) | ||
175 | return NOTIFY_STOP; | ||
176 | default: | ||
177 | break; | ||
178 | } | ||
179 | |||
180 | return 0; | ||
181 | } | ||
182 | |||
183 | /* | ||
184 | * This function gets called when XOL instruction either gets trapped or | ||
185 | * the thread has a fatal signal. Reset the instruction pointer to its | ||
186 | * probed address for the potential restart or for post mortem analysis. | ||
187 | */ | ||
188 | void arch_uprobe_abort_xol(struct arch_uprobe *aup, | ||
189 | struct pt_regs *regs) | ||
190 | { | ||
191 | struct uprobe_task *utask = current->utask; | ||
192 | |||
193 | instruction_pointer_set(regs, utask->vaddr); | ||
194 | } | ||
195 | |||
196 | unsigned long arch_uretprobe_hijack_return_addr( | ||
197 | unsigned long trampoline_vaddr, struct pt_regs *regs) | ||
198 | { | ||
199 | unsigned long ra; | ||
200 | |||
201 | ra = regs->regs[31]; | ||
202 | |||
203 | /* Replace the return address with the trampoline address */ | ||
204 | regs->regs[31] = trampoline_vaddr; | ||
205 | |||
206 | return ra; | ||
207 | } | ||
208 | |||
209 | /** | ||
210 | * set_swbp - store breakpoint at a given address. | ||
211 | * @auprobe: arch specific probepoint information. | ||
212 | * @mm: the probed process address space. | ||
213 | * @vaddr: the virtual address to insert the opcode. | ||
214 | * | ||
215 | * For mm @mm, store the breakpoint instruction at @vaddr. | ||
216 | * Return 0 (success) or a negative errno. | ||
217 | * | ||
218 | * This version overrides the weak version in kernel/events/uprobes.c. | ||
219 | * It is required to handle MIPS16 and microMIPS. | ||
220 | */ | ||
221 | int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm, | ||
222 | unsigned long vaddr) | ||
223 | { | ||
224 | return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN); | ||
225 | } | ||
226 | |||
227 | void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr, | ||
228 | void *src, unsigned long len) | ||
229 | { | ||
230 | unsigned long kaddr, kstart; | ||
231 | |||
232 | /* Initialize the slot */ | ||
233 | kaddr = (unsigned long)kmap_atomic(page); | ||
234 | kstart = kaddr + (vaddr & ~PAGE_MASK); | ||
235 | memcpy((void *)kstart, src, len); | ||
236 | flush_icache_range(kstart, kstart + len); | ||
237 | kunmap_atomic((void *)kaddr); | ||
238 | } | ||
239 | |||
240 | /** | ||
241 | * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs | ||
242 | * @regs: Reflects the saved state of the task after it has hit a breakpoint | ||
243 | * instruction. | ||
244 | * Return the address of the breakpoint instruction. | ||
245 | * | ||
246 | * This overrides the weak version in kernel/events/uprobes.c. | ||
247 | */ | ||
248 | unsigned long uprobe_get_swbp_addr(struct pt_regs *regs) | ||
249 | { | ||
250 | return instruction_pointer(regs); | ||
251 | } | ||
252 | |||
253 | /* | ||
254 | * See if the instruction can be emulated. | ||
255 | * Returns true if instruction was emulated, false otherwise. | ||
256 | * | ||
257 | * For now we always emulate so this function just returns 0. | ||
258 | */ | ||
259 | bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs) | ||
260 | { | ||
261 | return 0; | ||
262 | } | ||
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c new file mode 100644 index 000000000..242dc5e83 --- /dev/null +++ b/arch/mips/kernel/vdso.c | |||
@@ -0,0 +1,192 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0-or-later | ||
2 | /* | ||
3 | * Copyright (C) 2015 Imagination Technologies | ||
4 | * Author: Alex Smith <alex.smith@imgtec.com> | ||
5 | */ | ||
6 | |||
7 | #include <linux/binfmts.h> | ||
8 | #include <linux/elf.h> | ||
9 | #include <linux/err.h> | ||
10 | #include <linux/init.h> | ||
11 | #include <linux/ioport.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/mm.h> | ||
14 | #include <linux/random.h> | ||
15 | #include <linux/sched.h> | ||
16 | #include <linux/slab.h> | ||
17 | #include <linux/timekeeper_internal.h> | ||
18 | |||
19 | #include <asm/abi.h> | ||
20 | #include <asm/mips-cps.h> | ||
21 | #include <asm/page.h> | ||
22 | #include <asm/vdso.h> | ||
23 | #include <vdso/helpers.h> | ||
24 | #include <vdso/vsyscall.h> | ||
25 | |||
26 | /* Kernel-provided data used by the VDSO. */ | ||
27 | static union mips_vdso_data mips_vdso_data __page_aligned_data; | ||
28 | struct vdso_data *vdso_data = mips_vdso_data.data; | ||
29 | |||
30 | /* | ||
31 | * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as | ||
32 | * what we map and where within the area they are mapped is determined at | ||
33 | * runtime. | ||
34 | */ | ||
35 | static struct page *no_pages[] = { NULL }; | ||
36 | static struct vm_special_mapping vdso_vvar_mapping = { | ||
37 | .name = "[vvar]", | ||
38 | .pages = no_pages, | ||
39 | }; | ||
40 | |||
41 | static void __init init_vdso_image(struct mips_vdso_image *image) | ||
42 | { | ||
43 | unsigned long num_pages, i; | ||
44 | unsigned long data_pfn; | ||
45 | |||
46 | BUG_ON(!PAGE_ALIGNED(image->data)); | ||
47 | BUG_ON(!PAGE_ALIGNED(image->size)); | ||
48 | |||
49 | num_pages = image->size / PAGE_SIZE; | ||
50 | |||
51 | data_pfn = __phys_to_pfn(__pa_symbol(image->data)); | ||
52 | for (i = 0; i < num_pages; i++) | ||
53 | image->mapping.pages[i] = pfn_to_page(data_pfn + i); | ||
54 | } | ||
55 | |||
56 | static int __init init_vdso(void) | ||
57 | { | ||
58 | init_vdso_image(&vdso_image); | ||
59 | |||
60 | #ifdef CONFIG_MIPS32_O32 | ||
61 | init_vdso_image(&vdso_image_o32); | ||
62 | #endif | ||
63 | |||
64 | #ifdef CONFIG_MIPS32_N32 | ||
65 | init_vdso_image(&vdso_image_n32); | ||
66 | #endif | ||
67 | |||
68 | return 0; | ||
69 | } | ||
70 | subsys_initcall(init_vdso); | ||
71 | |||
72 | static unsigned long vdso_base(void) | ||
73 | { | ||
74 | unsigned long base = STACK_TOP; | ||
75 | |||
76 | if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { | ||
77 | /* Skip the delay slot emulation page */ | ||
78 | base += PAGE_SIZE; | ||
79 | } | ||
80 | |||
81 | if (current->flags & PF_RANDOMIZE) { | ||
82 | base += get_random_int() & (VDSO_RANDOMIZE_SIZE - 1); | ||
83 | base = PAGE_ALIGN(base); | ||
84 | } | ||
85 | |||
86 | return base; | ||
87 | } | ||
88 | |||
89 | int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp) | ||
90 | { | ||
91 | struct mips_vdso_image *image = current->thread.abi->vdso; | ||
92 | struct mm_struct *mm = current->mm; | ||
93 | unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn; | ||
94 | struct vm_area_struct *vma; | ||
95 | int ret; | ||
96 | |||
97 | if (mmap_write_lock_killable(mm)) | ||
98 | return -EINTR; | ||
99 | |||
100 | if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) { | ||
101 | /* Map delay slot emulation page */ | ||
102 | base = mmap_region(NULL, STACK_TOP, PAGE_SIZE, | ||
103 | VM_READ | VM_EXEC | | ||
104 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | ||
105 | 0, NULL); | ||
106 | if (IS_ERR_VALUE(base)) { | ||
107 | ret = base; | ||
108 | goto out; | ||
109 | } | ||
110 | } | ||
111 | |||
112 | /* | ||
113 | * Determine total area size. This includes the VDSO data itself, the | ||
114 | * data page, and the GIC user page if present. Always create a mapping | ||
115 | * for the GIC user area if the GIC is present regardless of whether it | ||
116 | * is the current clocksource, in case it comes into use later on. We | ||
117 | * only map a page even though the total area is 64K, as we only need | ||
118 | * the counter registers at the start. | ||
119 | */ | ||
120 | gic_size = mips_gic_present() ? PAGE_SIZE : 0; | ||
121 | vvar_size = gic_size + PAGE_SIZE; | ||
122 | size = vvar_size + image->size; | ||
123 | |||
124 | /* | ||
125 | * Find a region that's large enough for us to perform the | ||
126 | * colour-matching alignment below. | ||
127 | */ | ||
128 | if (cpu_has_dc_aliases) | ||
129 | size += shm_align_mask + 1; | ||
130 | |||
131 | base = get_unmapped_area(NULL, vdso_base(), size, 0, 0); | ||
132 | if (IS_ERR_VALUE(base)) { | ||
133 | ret = base; | ||
134 | goto out; | ||
135 | } | ||
136 | |||
137 | /* | ||
138 | * If we suffer from dcache aliasing, ensure that the VDSO data page | ||
139 | * mapping is coloured the same as the kernel's mapping of that memory. | ||
140 | * This ensures that when the kernel updates the VDSO data userland | ||
141 | * will observe it without requiring cache invalidations. | ||
142 | */ | ||
143 | if (cpu_has_dc_aliases) { | ||
144 | base = __ALIGN_MASK(base, shm_align_mask); | ||
145 | base += ((unsigned long)vdso_data - gic_size) & shm_align_mask; | ||
146 | } | ||
147 | |||
148 | data_addr = base + gic_size; | ||
149 | vdso_addr = data_addr + PAGE_SIZE; | ||
150 | |||
151 | vma = _install_special_mapping(mm, base, vvar_size, | ||
152 | VM_READ | VM_MAYREAD, | ||
153 | &vdso_vvar_mapping); | ||
154 | if (IS_ERR(vma)) { | ||
155 | ret = PTR_ERR(vma); | ||
156 | goto out; | ||
157 | } | ||
158 | |||
159 | /* Map GIC user page. */ | ||
160 | if (gic_size) { | ||
161 | gic_pfn = virt_to_phys(mips_gic_base + MIPS_GIC_USER_OFS) >> PAGE_SHIFT; | ||
162 | |||
163 | ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size, | ||
164 | pgprot_noncached(PAGE_READONLY)); | ||
165 | if (ret) | ||
166 | goto out; | ||
167 | } | ||
168 | |||
169 | /* Map data page. */ | ||
170 | ret = remap_pfn_range(vma, data_addr, | ||
171 | virt_to_phys(vdso_data) >> PAGE_SHIFT, | ||
172 | PAGE_SIZE, PAGE_READONLY); | ||
173 | if (ret) | ||
174 | goto out; | ||
175 | |||
176 | /* Map VDSO image. */ | ||
177 | vma = _install_special_mapping(mm, vdso_addr, image->size, | ||
178 | VM_READ | VM_EXEC | | ||
179 | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC, | ||
180 | &image->mapping); | ||
181 | if (IS_ERR(vma)) { | ||
182 | ret = PTR_ERR(vma); | ||
183 | goto out; | ||
184 | } | ||
185 | |||
186 | mm->context.vdso = (void *)vdso_addr; | ||
187 | ret = 0; | ||
188 | |||
189 | out: | ||
190 | mmap_write_unlock(mm); | ||
191 | return ret; | ||
192 | } | ||
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S new file mode 100644 index 000000000..09fa4705c --- /dev/null +++ b/arch/mips/kernel/vmlinux.lds.S | |||
@@ -0,0 +1,228 @@ | |||
1 | /* SPDX-License-Identifier: GPL-2.0 */ | ||
2 | #include <asm/asm-offsets.h> | ||
3 | #include <asm/thread_info.h> | ||
4 | |||
5 | #define PAGE_SIZE _PAGE_SIZE | ||
6 | |||
7 | /* | ||
8 | * Put .bss..swapper_pg_dir as the first thing in .bss. This will | ||
9 | * ensure that it has .bss alignment (64K). | ||
10 | */ | ||
11 | #define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir) | ||
12 | |||
13 | /* Cavium Octeon should not have a separate PT_NOTE Program Header. */ | ||
14 | #ifndef CONFIG_CAVIUM_OCTEON_SOC | ||
15 | #define EMITS_PT_NOTE | ||
16 | #endif | ||
17 | |||
18 | #include <asm-generic/vmlinux.lds.h> | ||
19 | |||
20 | #undef mips | ||
21 | #define mips mips | ||
22 | OUTPUT_ARCH(mips) | ||
23 | ENTRY(kernel_entry) | ||
24 | PHDRS { | ||
25 | text PT_LOAD FLAGS(7); /* RWX */ | ||
26 | #ifndef CONFIG_CAVIUM_OCTEON_SOC | ||
27 | note PT_NOTE FLAGS(4); /* R__ */ | ||
28 | #endif /* CAVIUM_OCTEON_SOC */ | ||
29 | } | ||
30 | |||
31 | #ifdef CONFIG_32BIT | ||
32 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | ||
33 | jiffies = jiffies_64; | ||
34 | #else | ||
35 | jiffies = jiffies_64 + 4; | ||
36 | #endif | ||
37 | #else | ||
38 | jiffies = jiffies_64; | ||
39 | #endif | ||
40 | |||
41 | SECTIONS | ||
42 | { | ||
43 | #ifdef CONFIG_BOOT_ELF64 | ||
44 | /* Read-only sections, merged into text segment: */ | ||
45 | /* . = 0xc000000000000000; */ | ||
46 | |||
47 | /* This is the value for an Origin kernel, taken from an IRIX kernel. */ | ||
48 | /* . = 0xc00000000001c000; */ | ||
49 | |||
50 | /* Set the vaddr for the text segment to a value | ||
51 | * >= 0xa800 0000 0001 9000 if no symmon is going to configured | ||
52 | * >= 0xa800 0000 0030 0000 otherwise | ||
53 | */ | ||
54 | |||
55 | /* . = 0xa800000000300000; */ | ||
56 | . = 0xffffffff80300000; | ||
57 | #endif | ||
58 | . = LINKER_LOAD_ADDRESS; | ||
59 | /* read-only */ | ||
60 | _text = .; /* Text and read-only data */ | ||
61 | .text : { | ||
62 | TEXT_TEXT | ||
63 | SCHED_TEXT | ||
64 | CPUIDLE_TEXT | ||
65 | LOCK_TEXT | ||
66 | KPROBES_TEXT | ||
67 | IRQENTRY_TEXT | ||
68 | SOFTIRQENTRY_TEXT | ||
69 | *(.text.*) | ||
70 | *(.fixup) | ||
71 | *(.gnu.warning) | ||
72 | } :text = 0 | ||
73 | _etext = .; /* End of text section */ | ||
74 | |||
75 | EXCEPTION_TABLE(16) | ||
76 | |||
77 | /* Exception table for data bus errors */ | ||
78 | __dbe_table : { | ||
79 | __start___dbe_table = .; | ||
80 | KEEP(*(__dbe_table)) | ||
81 | __stop___dbe_table = .; | ||
82 | } | ||
83 | |||
84 | _sdata = .; /* Start of data section */ | ||
85 | RO_DATA(4096) | ||
86 | |||
87 | /* writeable */ | ||
88 | .data : { /* Data */ | ||
89 | . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */ | ||
90 | |||
91 | INIT_TASK_DATA(THREAD_SIZE) | ||
92 | NOSAVE_DATA | ||
93 | PAGE_ALIGNED_DATA(PAGE_SIZE) | ||
94 | CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | ||
95 | READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | ||
96 | DATA_DATA | ||
97 | CONSTRUCTORS | ||
98 | } | ||
99 | BUG_TABLE | ||
100 | _gp = . + 0x8000; | ||
101 | .lit8 : { | ||
102 | *(.lit8) | ||
103 | } | ||
104 | .lit4 : { | ||
105 | *(.lit4) | ||
106 | } | ||
107 | /* We want the small data sections together, so single-instruction offsets | ||
108 | can access them all, and initialized data all before uninitialized, so | ||
109 | we can shorten the on-disk segment size. */ | ||
110 | .sdata : { | ||
111 | *(.sdata) | ||
112 | } | ||
113 | _edata = .; /* End of data section */ | ||
114 | |||
115 | /* will be freed after init */ | ||
116 | . = ALIGN(PAGE_SIZE); /* Init code and data */ | ||
117 | __init_begin = .; | ||
118 | INIT_TEXT_SECTION(PAGE_SIZE) | ||
119 | INIT_DATA_SECTION(16) | ||
120 | |||
121 | . = ALIGN(4); | ||
122 | .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) { | ||
123 | __mips_machines_start = .; | ||
124 | KEEP(*(.mips.machines.init)) | ||
125 | __mips_machines_end = .; | ||
126 | } | ||
127 | |||
128 | /* .exit.text is discarded at runtime, not link time, to deal with | ||
129 | * references from .rodata | ||
130 | */ | ||
131 | .exit.text : { | ||
132 | EXIT_TEXT | ||
133 | } | ||
134 | .exit.data : { | ||
135 | EXIT_DATA | ||
136 | } | ||
137 | #ifdef CONFIG_SMP | ||
138 | PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT) | ||
139 | #endif | ||
140 | |||
141 | #ifdef CONFIG_MIPS_ELF_APPENDED_DTB | ||
142 | .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) { | ||
143 | *(.appended_dtb) | ||
144 | KEEP(*(.appended_dtb)) | ||
145 | } | ||
146 | #endif | ||
147 | |||
148 | #ifdef CONFIG_RELOCATABLE | ||
149 | . = ALIGN(4); | ||
150 | |||
151 | .data.reloc : { | ||
152 | _relocation_start = .; | ||
153 | /* | ||
154 | * Space for relocation table | ||
155 | * This needs to be filled so that the | ||
156 | * relocs tool can overwrite the content. | ||
157 | * An invalid value is left at the start of the | ||
158 | * section to abort relocation if the table | ||
159 | * has not been filled in. | ||
160 | */ | ||
161 | LONG(0xFFFFFFFF); | ||
162 | FILL(0); | ||
163 | . += CONFIG_RELOCATION_TABLE_SIZE - 4; | ||
164 | _relocation_end = .; | ||
165 | } | ||
166 | #endif | ||
167 | |||
168 | #ifdef CONFIG_MIPS_RAW_APPENDED_DTB | ||
169 | __appended_dtb = .; | ||
170 | /* leave space for appended DTB */ | ||
171 | . += 0x100000; | ||
172 | #endif | ||
173 | /* | ||
174 | * Align to 64K in attempt to eliminate holes before the | ||
175 | * .bss..swapper_pg_dir section at the start of .bss. This | ||
176 | * also satisfies PAGE_SIZE alignment as the largest page size | ||
177 | * allowed is 64K. | ||
178 | */ | ||
179 | . = ALIGN(0x10000); | ||
180 | __init_end = .; | ||
181 | /* freed after init ends here */ | ||
182 | |||
183 | /* | ||
184 | * Force .bss to 64K alignment so that .bss..swapper_pg_dir | ||
185 | * gets that alignment. .sbss should be empty, so there will be | ||
186 | * no holes after __init_end. */ | ||
187 | BSS_SECTION(0, 0x10000, 8) | ||
188 | |||
189 | _end = . ; | ||
190 | |||
191 | /* These mark the ABI of the kernel for debuggers. */ | ||
192 | .mdebug.abi32 : { | ||
193 | KEEP(*(.mdebug.abi32)) | ||
194 | } | ||
195 | .mdebug.abi64 : { | ||
196 | KEEP(*(.mdebug.abi64)) | ||
197 | } | ||
198 | |||
199 | /* This is the MIPS specific mdebug section. */ | ||
200 | .mdebug : { | ||
201 | *(.mdebug) | ||
202 | } | ||
203 | |||
204 | STABS_DEBUG | ||
205 | DWARF_DEBUG | ||
206 | ELF_DETAILS | ||
207 | |||
208 | /* These must appear regardless of . */ | ||
209 | .gptab.sdata : { | ||
210 | *(.gptab.data) | ||
211 | *(.gptab.sdata) | ||
212 | } | ||
213 | .gptab.sbss : { | ||
214 | *(.gptab.bss) | ||
215 | *(.gptab.sbss) | ||
216 | } | ||
217 | |||
218 | /* Sections to be discarded */ | ||
219 | DISCARDS | ||
220 | /DISCARD/ : { | ||
221 | /* ABI crap starts here */ | ||
222 | *(.MIPS.abiflags) | ||
223 | *(.MIPS.options) | ||
224 | *(.options) | ||
225 | *(.pdr) | ||
226 | *(.reginfo) | ||
227 | } | ||
228 | } | ||
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c new file mode 100644 index 000000000..903c07bdc --- /dev/null +++ b/arch/mips/kernel/vpe-cmp.c | |||
@@ -0,0 +1,180 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/export.h> | ||
14 | |||
15 | #include <asm/vpe.h> | ||
16 | |||
17 | static int major; | ||
18 | |||
19 | void cleanup_tc(struct tc *tc) | ||
20 | { | ||
21 | |||
22 | } | ||
23 | |||
24 | static ssize_t store_kill(struct device *dev, struct device_attribute *attr, | ||
25 | const char *buf, size_t len) | ||
26 | { | ||
27 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
28 | struct vpe_notifications *notifier; | ||
29 | |||
30 | list_for_each_entry(notifier, &vpe->notify, list) | ||
31 | notifier->stop(aprp_cpu_index()); | ||
32 | |||
33 | release_progmem(vpe->load_addr); | ||
34 | vpe->state = VPE_STATE_UNUSED; | ||
35 | |||
36 | return len; | ||
37 | } | ||
38 | static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); | ||
39 | |||
40 | static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, | ||
41 | char *buf) | ||
42 | { | ||
43 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
44 | |||
45 | return sprintf(buf, "%d\n", vpe->ntcs); | ||
46 | } | ||
47 | |||
48 | static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, | ||
49 | const char *buf, size_t len) | ||
50 | { | ||
51 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
52 | unsigned long new; | ||
53 | int ret; | ||
54 | |||
55 | ret = kstrtoul(buf, 0, &new); | ||
56 | if (ret < 0) | ||
57 | return ret; | ||
58 | |||
59 | /* APRP can only reserve one TC in a VPE and no more. */ | ||
60 | if (new != 1) | ||
61 | return -EINVAL; | ||
62 | |||
63 | vpe->ntcs = new; | ||
64 | |||
65 | return len; | ||
66 | } | ||
67 | static DEVICE_ATTR_RW(ntcs); | ||
68 | |||
69 | static struct attribute *vpe_attrs[] = { | ||
70 | &dev_attr_kill.attr, | ||
71 | &dev_attr_ntcs.attr, | ||
72 | NULL, | ||
73 | }; | ||
74 | ATTRIBUTE_GROUPS(vpe); | ||
75 | |||
76 | static void vpe_device_release(struct device *cd) | ||
77 | { | ||
78 | } | ||
79 | |||
80 | static struct class vpe_class = { | ||
81 | .name = "vpe", | ||
82 | .owner = THIS_MODULE, | ||
83 | .dev_release = vpe_device_release, | ||
84 | .dev_groups = vpe_groups, | ||
85 | }; | ||
86 | |||
87 | static struct device vpe_device; | ||
88 | |||
89 | int __init vpe_module_init(void) | ||
90 | { | ||
91 | struct vpe *v = NULL; | ||
92 | struct tc *t; | ||
93 | int err; | ||
94 | |||
95 | if (!cpu_has_mipsmt) { | ||
96 | pr_warn("VPE loader: not a MIPS MT capable processor\n"); | ||
97 | return -ENODEV; | ||
98 | } | ||
99 | |||
100 | if (num_possible_cpus() - aprp_cpu_index() < 1) { | ||
101 | pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" | ||
102 | "Pass maxcpus=<n> argument as kernel argument\n"); | ||
103 | return -ENODEV; | ||
104 | } | ||
105 | |||
106 | major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); | ||
107 | if (major < 0) { | ||
108 | pr_warn("VPE loader: unable to register character device\n"); | ||
109 | return major; | ||
110 | } | ||
111 | |||
112 | err = class_register(&vpe_class); | ||
113 | if (err) { | ||
114 | pr_err("vpe_class registration failed\n"); | ||
115 | goto out_chrdev; | ||
116 | } | ||
117 | |||
118 | device_initialize(&vpe_device); | ||
119 | vpe_device.class = &vpe_class, | ||
120 | vpe_device.parent = NULL, | ||
121 | dev_set_name(&vpe_device, "vpe_sp"); | ||
122 | vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); | ||
123 | err = device_add(&vpe_device); | ||
124 | if (err) { | ||
125 | pr_err("Adding vpe_device failed\n"); | ||
126 | goto out_class; | ||
127 | } | ||
128 | |||
129 | t = alloc_tc(aprp_cpu_index()); | ||
130 | if (!t) { | ||
131 | pr_warn("VPE: unable to allocate TC\n"); | ||
132 | err = -ENOMEM; | ||
133 | goto out_dev; | ||
134 | } | ||
135 | |||
136 | /* VPE */ | ||
137 | v = alloc_vpe(aprp_cpu_index()); | ||
138 | if (v == NULL) { | ||
139 | pr_warn("VPE: unable to allocate VPE\n"); | ||
140 | kfree(t); | ||
141 | err = -ENOMEM; | ||
142 | goto out_dev; | ||
143 | } | ||
144 | |||
145 | v->ntcs = 1; | ||
146 | |||
147 | /* add the tc to the list of this vpe's tc's. */ | ||
148 | list_add(&t->tc, &v->tc); | ||
149 | |||
150 | /* TC */ | ||
151 | t->pvpe = v; /* set the parent vpe */ | ||
152 | |||
153 | return 0; | ||
154 | |||
155 | out_dev: | ||
156 | device_del(&vpe_device); | ||
157 | |||
158 | out_class: | ||
159 | put_device(&vpe_device); | ||
160 | class_unregister(&vpe_class); | ||
161 | |||
162 | out_chrdev: | ||
163 | unregister_chrdev(major, VPE_MODULE_NAME); | ||
164 | |||
165 | return err; | ||
166 | } | ||
167 | |||
168 | void __exit vpe_module_exit(void) | ||
169 | { | ||
170 | struct vpe *v, *n; | ||
171 | |||
172 | device_unregister(&vpe_device); | ||
173 | class_unregister(&vpe_class); | ||
174 | unregister_chrdev(major, VPE_MODULE_NAME); | ||
175 | |||
176 | /* No locking needed here */ | ||
177 | list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) | ||
178 | if (v->state != VPE_STATE_UNUSED) | ||
179 | release_vpe(v); | ||
180 | } | ||
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c new file mode 100644 index 000000000..9fd7cd48e --- /dev/null +++ b/arch/mips/kernel/vpe-mt.c | |||
@@ -0,0 +1,521 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
8 | */ | ||
9 | #include <linux/kernel.h> | ||
10 | #include <linux/device.h> | ||
11 | #include <linux/fs.h> | ||
12 | #include <linux/slab.h> | ||
13 | #include <linux/export.h> | ||
14 | |||
15 | #include <asm/mipsregs.h> | ||
16 | #include <asm/mipsmtregs.h> | ||
17 | #include <asm/mips_mt.h> | ||
18 | #include <asm/vpe.h> | ||
19 | |||
20 | static int major; | ||
21 | |||
22 | /* The number of TCs and VPEs physically available on the core */ | ||
23 | static int hw_tcs, hw_vpes; | ||
24 | |||
25 | /* We are prepared so configure and start the VPE... */ | ||
26 | int vpe_run(struct vpe *v) | ||
27 | { | ||
28 | unsigned long flags, val, dmt_flag; | ||
29 | struct vpe_notifications *notifier; | ||
30 | unsigned int vpeflags; | ||
31 | struct tc *t; | ||
32 | |||
33 | /* check we are the Master VPE */ | ||
34 | local_irq_save(flags); | ||
35 | val = read_c0_vpeconf0(); | ||
36 | if (!(val & VPECONF0_MVP)) { | ||
37 | pr_warn("VPE loader: only Master VPE's are able to config MT\n"); | ||
38 | local_irq_restore(flags); | ||
39 | |||
40 | return -1; | ||
41 | } | ||
42 | |||
43 | dmt_flag = dmt(); | ||
44 | vpeflags = dvpe(); | ||
45 | |||
46 | if (list_empty(&v->tc)) { | ||
47 | evpe(vpeflags); | ||
48 | emt(dmt_flag); | ||
49 | local_irq_restore(flags); | ||
50 | |||
51 | pr_warn("VPE loader: No TC's associated with VPE %d\n", | ||
52 | v->minor); | ||
53 | |||
54 | return -ENOEXEC; | ||
55 | } | ||
56 | |||
57 | t = list_first_entry(&v->tc, struct tc, tc); | ||
58 | |||
59 | /* Put MVPE's into 'configuration state' */ | ||
60 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
61 | |||
62 | settc(t->index); | ||
63 | |||
64 | /* should check it is halted, and not activated */ | ||
65 | if ((read_tc_c0_tcstatus() & TCSTATUS_A) || | ||
66 | !(read_tc_c0_tchalt() & TCHALT_H)) { | ||
67 | evpe(vpeflags); | ||
68 | emt(dmt_flag); | ||
69 | local_irq_restore(flags); | ||
70 | |||
71 | pr_warn("VPE loader: TC %d is already active!\n", | ||
72 | t->index); | ||
73 | |||
74 | return -ENOEXEC; | ||
75 | } | ||
76 | |||
77 | /* | ||
78 | * Write the address we want it to start running from in the TCPC | ||
79 | * register. | ||
80 | */ | ||
81 | write_tc_c0_tcrestart((unsigned long)v->__start); | ||
82 | write_tc_c0_tccontext((unsigned long)0); | ||
83 | |||
84 | /* | ||
85 | * Mark the TC as activated, not interrupt exempt and not dynamically | ||
86 | * allocatable | ||
87 | */ | ||
88 | val = read_tc_c0_tcstatus(); | ||
89 | val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A; | ||
90 | write_tc_c0_tcstatus(val); | ||
91 | |||
92 | write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H); | ||
93 | |||
94 | /* | ||
95 | * The sde-kit passes 'memsize' to __start in $a3, so set something | ||
96 | * here... Or set $a3 to zero and define DFLT_STACK_SIZE and | ||
97 | * DFLT_HEAP_SIZE when you compile your program | ||
98 | */ | ||
99 | mttgpr(6, v->ntcs); | ||
100 | mttgpr(7, physical_memsize); | ||
101 | |||
102 | /* set up VPE1 */ | ||
103 | /* | ||
104 | * bind the TC to VPE 1 as late as possible so we only have the final | ||
105 | * VPE registers to set up, and so an EJTAG probe can trigger on it | ||
106 | */ | ||
107 | write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1); | ||
108 | |||
109 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA)); | ||
110 | |||
111 | back_to_back_c0_hazard(); | ||
112 | |||
113 | /* Set up the XTC bit in vpeconf0 to point at our tc */ | ||
114 | write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC)) | ||
115 | | (t->index << VPECONF0_XTC_SHIFT)); | ||
116 | |||
117 | back_to_back_c0_hazard(); | ||
118 | |||
119 | /* enable this VPE */ | ||
120 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA); | ||
121 | |||
122 | /* clear out any left overs from a previous program */ | ||
123 | write_vpe_c0_status(0); | ||
124 | write_vpe_c0_cause(0); | ||
125 | |||
126 | /* take system out of configuration state */ | ||
127 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
128 | |||
129 | /* | ||
130 | * SMVP kernels manage VPE enable independently, but uniprocessor | ||
131 | * kernels need to turn it on, even if that wasn't the pre-dvpe() state. | ||
132 | */ | ||
133 | #ifdef CONFIG_SMP | ||
134 | evpe(vpeflags); | ||
135 | #else | ||
136 | evpe(EVPE_ENABLE); | ||
137 | #endif | ||
138 | emt(dmt_flag); | ||
139 | local_irq_restore(flags); | ||
140 | |||
141 | list_for_each_entry(notifier, &v->notify, list) | ||
142 | notifier->start(VPE_MODULE_MINOR); | ||
143 | |||
144 | return 0; | ||
145 | } | ||
146 | |||
147 | void cleanup_tc(struct tc *tc) | ||
148 | { | ||
149 | unsigned long flags; | ||
150 | unsigned int mtflags, vpflags; | ||
151 | int tmp; | ||
152 | |||
153 | local_irq_save(flags); | ||
154 | mtflags = dmt(); | ||
155 | vpflags = dvpe(); | ||
156 | /* Put MVPE's into 'configuration state' */ | ||
157 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
158 | |||
159 | settc(tc->index); | ||
160 | tmp = read_tc_c0_tcstatus(); | ||
161 | |||
162 | /* mark not allocated and not dynamically allocatable */ | ||
163 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
164 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
165 | write_tc_c0_tcstatus(tmp); | ||
166 | |||
167 | write_tc_c0_tchalt(TCHALT_H); | ||
168 | mips_ihb(); | ||
169 | |||
170 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
171 | evpe(vpflags); | ||
172 | emt(mtflags); | ||
173 | local_irq_restore(flags); | ||
174 | } | ||
175 | |||
176 | /* module wrapper entry points */ | ||
177 | /* give me a vpe */ | ||
178 | void *vpe_alloc(void) | ||
179 | { | ||
180 | int i; | ||
181 | struct vpe *v; | ||
182 | |||
183 | /* find a vpe */ | ||
184 | for (i = 1; i < MAX_VPES; i++) { | ||
185 | v = get_vpe(i); | ||
186 | if (v != NULL) { | ||
187 | v->state = VPE_STATE_INUSE; | ||
188 | return v; | ||
189 | } | ||
190 | } | ||
191 | return NULL; | ||
192 | } | ||
193 | EXPORT_SYMBOL(vpe_alloc); | ||
194 | |||
195 | /* start running from here */ | ||
196 | int vpe_start(void *vpe, unsigned long start) | ||
197 | { | ||
198 | struct vpe *v = vpe; | ||
199 | |||
200 | v->__start = start; | ||
201 | return vpe_run(v); | ||
202 | } | ||
203 | EXPORT_SYMBOL(vpe_start); | ||
204 | |||
205 | /* halt it for now */ | ||
206 | int vpe_stop(void *vpe) | ||
207 | { | ||
208 | struct vpe *v = vpe; | ||
209 | struct tc *t; | ||
210 | unsigned int evpe_flags; | ||
211 | |||
212 | evpe_flags = dvpe(); | ||
213 | |||
214 | t = list_entry(v->tc.next, struct tc, tc); | ||
215 | if (t != NULL) { | ||
216 | settc(t->index); | ||
217 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
218 | } | ||
219 | |||
220 | evpe(evpe_flags); | ||
221 | |||
222 | return 0; | ||
223 | } | ||
224 | EXPORT_SYMBOL(vpe_stop); | ||
225 | |||
226 | /* I've done with it thank you */ | ||
227 | int vpe_free(void *vpe) | ||
228 | { | ||
229 | struct vpe *v = vpe; | ||
230 | struct tc *t; | ||
231 | unsigned int evpe_flags; | ||
232 | |||
233 | t = list_entry(v->tc.next, struct tc, tc); | ||
234 | if (t == NULL) | ||
235 | return -ENOEXEC; | ||
236 | |||
237 | evpe_flags = dvpe(); | ||
238 | |||
239 | /* Put MVPE's into 'configuration state' */ | ||
240 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
241 | |||
242 | settc(t->index); | ||
243 | write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA); | ||
244 | |||
245 | /* halt the TC */ | ||
246 | write_tc_c0_tchalt(TCHALT_H); | ||
247 | mips_ihb(); | ||
248 | |||
249 | /* mark the TC unallocated */ | ||
250 | write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A); | ||
251 | |||
252 | v->state = VPE_STATE_UNUSED; | ||
253 | |||
254 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
255 | evpe(evpe_flags); | ||
256 | |||
257 | return 0; | ||
258 | } | ||
259 | EXPORT_SYMBOL(vpe_free); | ||
260 | |||
261 | static ssize_t store_kill(struct device *dev, struct device_attribute *attr, | ||
262 | const char *buf, size_t len) | ||
263 | { | ||
264 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
265 | struct vpe_notifications *notifier; | ||
266 | |||
267 | list_for_each_entry(notifier, &vpe->notify, list) | ||
268 | notifier->stop(aprp_cpu_index()); | ||
269 | |||
270 | release_progmem(vpe->load_addr); | ||
271 | cleanup_tc(get_tc(aprp_cpu_index())); | ||
272 | vpe_stop(vpe); | ||
273 | vpe_free(vpe); | ||
274 | |||
275 | return len; | ||
276 | } | ||
277 | static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill); | ||
278 | |||
279 | static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr, | ||
280 | char *buf) | ||
281 | { | ||
282 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
283 | |||
284 | return sprintf(buf, "%d\n", vpe->ntcs); | ||
285 | } | ||
286 | |||
287 | static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr, | ||
288 | const char *buf, size_t len) | ||
289 | { | ||
290 | struct vpe *vpe = get_vpe(aprp_cpu_index()); | ||
291 | unsigned long new; | ||
292 | int ret; | ||
293 | |||
294 | ret = kstrtoul(buf, 0, &new); | ||
295 | if (ret < 0) | ||
296 | return ret; | ||
297 | |||
298 | if (new == 0 || new > (hw_tcs - aprp_cpu_index())) | ||
299 | return -EINVAL; | ||
300 | |||
301 | vpe->ntcs = new; | ||
302 | |||
303 | return len; | ||
304 | } | ||
305 | static DEVICE_ATTR_RW(ntcs); | ||
306 | |||
307 | static struct attribute *vpe_attrs[] = { | ||
308 | &dev_attr_kill.attr, | ||
309 | &dev_attr_ntcs.attr, | ||
310 | NULL, | ||
311 | }; | ||
312 | ATTRIBUTE_GROUPS(vpe); | ||
313 | |||
314 | static void vpe_device_release(struct device *cd) | ||
315 | { | ||
316 | } | ||
317 | |||
318 | static struct class vpe_class = { | ||
319 | .name = "vpe", | ||
320 | .owner = THIS_MODULE, | ||
321 | .dev_release = vpe_device_release, | ||
322 | .dev_groups = vpe_groups, | ||
323 | }; | ||
324 | |||
325 | static struct device vpe_device; | ||
326 | |||
327 | int __init vpe_module_init(void) | ||
328 | { | ||
329 | unsigned int mtflags, vpflags; | ||
330 | unsigned long flags, val; | ||
331 | struct vpe *v = NULL; | ||
332 | struct tc *t; | ||
333 | int tc, err; | ||
334 | |||
335 | if (!cpu_has_mipsmt) { | ||
336 | pr_warn("VPE loader: not a MIPS MT capable processor\n"); | ||
337 | return -ENODEV; | ||
338 | } | ||
339 | |||
340 | if (vpelimit == 0) { | ||
341 | pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n" | ||
342 | "Pass maxvpes=<n> argument as kernel argument\n"); | ||
343 | |||
344 | return -ENODEV; | ||
345 | } | ||
346 | |||
347 | if (aprp_cpu_index() == 0) { | ||
348 | pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n" | ||
349 | "Pass maxtcs=<n> argument as kernel argument\n"); | ||
350 | |||
351 | return -ENODEV; | ||
352 | } | ||
353 | |||
354 | major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops); | ||
355 | if (major < 0) { | ||
356 | pr_warn("VPE loader: unable to register character device\n"); | ||
357 | return major; | ||
358 | } | ||
359 | |||
360 | err = class_register(&vpe_class); | ||
361 | if (err) { | ||
362 | pr_err("vpe_class registration failed\n"); | ||
363 | goto out_chrdev; | ||
364 | } | ||
365 | |||
366 | device_initialize(&vpe_device); | ||
367 | vpe_device.class = &vpe_class, | ||
368 | vpe_device.parent = NULL, | ||
369 | dev_set_name(&vpe_device, "vpe1"); | ||
370 | vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR); | ||
371 | err = device_add(&vpe_device); | ||
372 | if (err) { | ||
373 | pr_err("Adding vpe_device failed\n"); | ||
374 | goto out_class; | ||
375 | } | ||
376 | |||
377 | local_irq_save(flags); | ||
378 | mtflags = dmt(); | ||
379 | vpflags = dvpe(); | ||
380 | |||
381 | /* Put MVPE's into 'configuration state' */ | ||
382 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
383 | |||
384 | val = read_c0_mvpconf0(); | ||
385 | hw_tcs = (val & MVPCONF0_PTC) + 1; | ||
386 | hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; | ||
387 | |||
388 | for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) { | ||
389 | /* | ||
390 | * Must re-enable multithreading temporarily or in case we | ||
391 | * reschedule send IPIs or similar we might hang. | ||
392 | */ | ||
393 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
394 | evpe(vpflags); | ||
395 | emt(mtflags); | ||
396 | local_irq_restore(flags); | ||
397 | t = alloc_tc(tc); | ||
398 | if (!t) { | ||
399 | err = -ENOMEM; | ||
400 | goto out_dev; | ||
401 | } | ||
402 | |||
403 | local_irq_save(flags); | ||
404 | mtflags = dmt(); | ||
405 | vpflags = dvpe(); | ||
406 | set_c0_mvpcontrol(MVPCONTROL_VPC); | ||
407 | |||
408 | /* VPE's */ | ||
409 | if (tc < hw_tcs) { | ||
410 | settc(tc); | ||
411 | |||
412 | v = alloc_vpe(tc); | ||
413 | if (v == NULL) { | ||
414 | pr_warn("VPE: unable to allocate VPE\n"); | ||
415 | goto out_reenable; | ||
416 | } | ||
417 | |||
418 | v->ntcs = hw_tcs - aprp_cpu_index(); | ||
419 | |||
420 | /* add the tc to the list of this vpe's tc's. */ | ||
421 | list_add(&t->tc, &v->tc); | ||
422 | |||
423 | /* deactivate all but vpe0 */ | ||
424 | if (tc >= aprp_cpu_index()) { | ||
425 | unsigned long tmp = read_vpe_c0_vpeconf0(); | ||
426 | |||
427 | tmp &= ~VPECONF0_VPA; | ||
428 | |||
429 | /* master VPE */ | ||
430 | tmp |= VPECONF0_MVP; | ||
431 | write_vpe_c0_vpeconf0(tmp); | ||
432 | } | ||
433 | |||
434 | /* disable multi-threading with TC's */ | ||
435 | write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & | ||
436 | ~VPECONTROL_TE); | ||
437 | |||
438 | if (tc >= vpelimit) { | ||
439 | /* | ||
440 | * Set config to be the same as vpe0, | ||
441 | * particularly kseg0 coherency alg | ||
442 | */ | ||
443 | write_vpe_c0_config(read_c0_config()); | ||
444 | } | ||
445 | } | ||
446 | |||
447 | /* TC's */ | ||
448 | t->pvpe = v; /* set the parent vpe */ | ||
449 | |||
450 | if (tc >= aprp_cpu_index()) { | ||
451 | unsigned long tmp; | ||
452 | |||
453 | settc(tc); | ||
454 | |||
455 | /* | ||
456 | * A TC that is bound to any other VPE gets bound to | ||
457 | * VPE0, ideally I'd like to make it homeless but it | ||
458 | * doesn't appear to let me bind a TC to a non-existent | ||
459 | * VPE. Which is perfectly reasonable. | ||
460 | * | ||
461 | * The (un)bound state is visible to an EJTAG probe so | ||
462 | * may notify GDB... | ||
463 | */ | ||
464 | tmp = read_tc_c0_tcbind(); | ||
465 | if (tmp & TCBIND_CURVPE) { | ||
466 | /* tc is bound >vpe0 */ | ||
467 | write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE); | ||
468 | |||
469 | t->pvpe = get_vpe(0); /* set the parent vpe */ | ||
470 | } | ||
471 | |||
472 | /* halt the TC */ | ||
473 | write_tc_c0_tchalt(TCHALT_H); | ||
474 | mips_ihb(); | ||
475 | |||
476 | tmp = read_tc_c0_tcstatus(); | ||
477 | |||
478 | /* mark not activated and not dynamically allocatable */ | ||
479 | tmp &= ~(TCSTATUS_A | TCSTATUS_DA); | ||
480 | tmp |= TCSTATUS_IXMT; /* interrupt exempt */ | ||
481 | write_tc_c0_tcstatus(tmp); | ||
482 | } | ||
483 | } | ||
484 | |||
485 | out_reenable: | ||
486 | /* release config state */ | ||
487 | clear_c0_mvpcontrol(MVPCONTROL_VPC); | ||
488 | |||
489 | evpe(vpflags); | ||
490 | emt(mtflags); | ||
491 | local_irq_restore(flags); | ||
492 | |||
493 | return 0; | ||
494 | |||
495 | out_dev: | ||
496 | device_del(&vpe_device); | ||
497 | |||
498 | out_class: | ||
499 | put_device(&vpe_device); | ||
500 | class_unregister(&vpe_class); | ||
501 | |||
502 | out_chrdev: | ||
503 | unregister_chrdev(major, VPE_MODULE_NAME); | ||
504 | |||
505 | return err; | ||
506 | } | ||
507 | |||
508 | void __exit vpe_module_exit(void) | ||
509 | { | ||
510 | struct vpe *v, *n; | ||
511 | |||
512 | device_unregister(&vpe_device); | ||
513 | class_unregister(&vpe_class); | ||
514 | unregister_chrdev(major, VPE_MODULE_NAME); | ||
515 | |||
516 | /* No locking needed here */ | ||
517 | list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) { | ||
518 | if (v->state != VPE_STATE_UNUSED) | ||
519 | release_vpe(v); | ||
520 | } | ||
521 | } | ||
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c new file mode 100644 index 000000000..d0d832ab3 --- /dev/null +++ b/arch/mips/kernel/vpe.c | |||
@@ -0,0 +1,933 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved. | ||
7 | * Copyright (C) 2013 Imagination Technologies Ltd. | ||
8 | * | ||
9 | * VPE spport module for loading a MIPS SP program into VPE1. The SP | ||
10 | * environment is rather simple since there are no TLBs. It needs | ||
11 | * to be relocatable (or partiall linked). Initialize your stack in | ||
12 | * the startup-code. The loader looks for the symbol __start and sets | ||
13 | * up the execution to resume from there. To load and run, simply do | ||
14 | * a cat SP 'binary' to the /dev/vpe1 device. | ||
15 | */ | ||
16 | #include <linux/kernel.h> | ||
17 | #include <linux/device.h> | ||
18 | #include <linux/fs.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/slab.h> | ||
21 | #include <linux/list.h> | ||
22 | #include <linux/vmalloc.h> | ||
23 | #include <linux/elf.h> | ||
24 | #include <linux/seq_file.h> | ||
25 | #include <linux/syscalls.h> | ||
26 | #include <linux/moduleloader.h> | ||
27 | #include <linux/interrupt.h> | ||
28 | #include <linux/poll.h> | ||
29 | #include <linux/memblock.h> | ||
30 | #include <asm/mipsregs.h> | ||
31 | #include <asm/mipsmtregs.h> | ||
32 | #include <asm/cacheflush.h> | ||
33 | #include <linux/atomic.h> | ||
34 | #include <asm/mips_mt.h> | ||
35 | #include <asm/processor.h> | ||
36 | #include <asm/vpe.h> | ||
37 | |||
38 | #ifndef ARCH_SHF_SMALL | ||
39 | #define ARCH_SHF_SMALL 0 | ||
40 | #endif | ||
41 | |||
42 | /* If this is set, the section belongs in the init part of the module */ | ||
43 | #define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1)) | ||
44 | |||
45 | struct vpe_control vpecontrol = { | ||
46 | .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock), | ||
47 | .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list), | ||
48 | .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock), | ||
49 | .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list) | ||
50 | }; | ||
51 | |||
52 | /* get the vpe associated with this minor */ | ||
53 | struct vpe *get_vpe(int minor) | ||
54 | { | ||
55 | struct vpe *res, *v; | ||
56 | |||
57 | if (!cpu_has_mipsmt) | ||
58 | return NULL; | ||
59 | |||
60 | res = NULL; | ||
61 | spin_lock(&vpecontrol.vpe_list_lock); | ||
62 | list_for_each_entry(v, &vpecontrol.vpe_list, list) { | ||
63 | if (v->minor == VPE_MODULE_MINOR) { | ||
64 | res = v; | ||
65 | break; | ||
66 | } | ||
67 | } | ||
68 | spin_unlock(&vpecontrol.vpe_list_lock); | ||
69 | |||
70 | return res; | ||
71 | } | ||
72 | |||
73 | /* get the vpe associated with this minor */ | ||
74 | struct tc *get_tc(int index) | ||
75 | { | ||
76 | struct tc *res, *t; | ||
77 | |||
78 | res = NULL; | ||
79 | spin_lock(&vpecontrol.tc_list_lock); | ||
80 | list_for_each_entry(t, &vpecontrol.tc_list, list) { | ||
81 | if (t->index == index) { | ||
82 | res = t; | ||
83 | break; | ||
84 | } | ||
85 | } | ||
86 | spin_unlock(&vpecontrol.tc_list_lock); | ||
87 | |||
88 | return res; | ||
89 | } | ||
90 | |||
91 | /* allocate a vpe and associate it with this minor (or index) */ | ||
92 | struct vpe *alloc_vpe(int minor) | ||
93 | { | ||
94 | struct vpe *v; | ||
95 | |||
96 | v = kzalloc(sizeof(struct vpe), GFP_KERNEL); | ||
97 | if (v == NULL) | ||
98 | goto out; | ||
99 | |||
100 | INIT_LIST_HEAD(&v->tc); | ||
101 | spin_lock(&vpecontrol.vpe_list_lock); | ||
102 | list_add_tail(&v->list, &vpecontrol.vpe_list); | ||
103 | spin_unlock(&vpecontrol.vpe_list_lock); | ||
104 | |||
105 | INIT_LIST_HEAD(&v->notify); | ||
106 | v->minor = VPE_MODULE_MINOR; | ||
107 | |||
108 | out: | ||
109 | return v; | ||
110 | } | ||
111 | |||
112 | /* allocate a tc. At startup only tc0 is running, all other can be halted. */ | ||
113 | struct tc *alloc_tc(int index) | ||
114 | { | ||
115 | struct tc *tc; | ||
116 | |||
117 | tc = kzalloc(sizeof(struct tc), GFP_KERNEL); | ||
118 | if (tc == NULL) | ||
119 | goto out; | ||
120 | |||
121 | INIT_LIST_HEAD(&tc->tc); | ||
122 | tc->index = index; | ||
123 | |||
124 | spin_lock(&vpecontrol.tc_list_lock); | ||
125 | list_add_tail(&tc->list, &vpecontrol.tc_list); | ||
126 | spin_unlock(&vpecontrol.tc_list_lock); | ||
127 | |||
128 | out: | ||
129 | return tc; | ||
130 | } | ||
131 | |||
132 | /* clean up and free everything */ | ||
133 | void release_vpe(struct vpe *v) | ||
134 | { | ||
135 | list_del(&v->list); | ||
136 | if (v->load_addr) | ||
137 | release_progmem(v->load_addr); | ||
138 | kfree(v); | ||
139 | } | ||
140 | |||
141 | /* Find some VPE program space */ | ||
142 | void *alloc_progmem(unsigned long len) | ||
143 | { | ||
144 | void *addr; | ||
145 | |||
146 | #ifdef CONFIG_MIPS_VPE_LOADER_TOM | ||
147 | /* | ||
148 | * This means you must tell Linux to use less memory than you | ||
149 | * physically have, for example by passing a mem= boot argument. | ||
150 | */ | ||
151 | addr = pfn_to_kaddr(max_low_pfn); | ||
152 | memset(addr, 0, len); | ||
153 | #else | ||
154 | /* simple grab some mem for now */ | ||
155 | addr = kzalloc(len, GFP_KERNEL); | ||
156 | #endif | ||
157 | |||
158 | return addr; | ||
159 | } | ||
160 | |||
161 | void release_progmem(void *ptr) | ||
162 | { | ||
163 | #ifndef CONFIG_MIPS_VPE_LOADER_TOM | ||
164 | kfree(ptr); | ||
165 | #endif | ||
166 | } | ||
167 | |||
168 | /* Update size with this section: return offset. */ | ||
169 | static long get_offset(unsigned long *size, Elf_Shdr *sechdr) | ||
170 | { | ||
171 | long ret; | ||
172 | |||
173 | ret = ALIGN(*size, sechdr->sh_addralign ? : 1); | ||
174 | *size = ret + sechdr->sh_size; | ||
175 | return ret; | ||
176 | } | ||
177 | |||
178 | /* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld | ||
179 | might -- code, read-only data, read-write data, small data. Tally | ||
180 | sizes, and place the offsets into sh_entsize fields: high bit means it | ||
181 | belongs in init. */ | ||
182 | static void layout_sections(struct module *mod, const Elf_Ehdr *hdr, | ||
183 | Elf_Shdr *sechdrs, const char *secstrings) | ||
184 | { | ||
185 | static unsigned long const masks[][2] = { | ||
186 | /* NOTE: all executable code must be the first section | ||
187 | * in this array; otherwise modify the text_size | ||
188 | * finder in the two loops below */ | ||
189 | {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL}, | ||
190 | {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL}, | ||
191 | {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL}, | ||
192 | {ARCH_SHF_SMALL | SHF_ALLOC, 0} | ||
193 | }; | ||
194 | unsigned int m, i; | ||
195 | |||
196 | for (i = 0; i < hdr->e_shnum; i++) | ||
197 | sechdrs[i].sh_entsize = ~0UL; | ||
198 | |||
199 | for (m = 0; m < ARRAY_SIZE(masks); ++m) { | ||
200 | for (i = 0; i < hdr->e_shnum; ++i) { | ||
201 | Elf_Shdr *s = &sechdrs[i]; | ||
202 | |||
203 | if ((s->sh_flags & masks[m][0]) != masks[m][0] | ||
204 | || (s->sh_flags & masks[m][1]) | ||
205 | || s->sh_entsize != ~0UL) | ||
206 | continue; | ||
207 | s->sh_entsize = | ||
208 | get_offset((unsigned long *)&mod->core_layout.size, s); | ||
209 | } | ||
210 | |||
211 | if (m == 0) | ||
212 | mod->core_layout.text_size = mod->core_layout.size; | ||
213 | |||
214 | } | ||
215 | } | ||
216 | |||
217 | /* from module-elf32.c, but subverted a little */ | ||
218 | |||
219 | struct mips_hi16 { | ||
220 | struct mips_hi16 *next; | ||
221 | Elf32_Addr *addr; | ||
222 | Elf32_Addr value; | ||
223 | }; | ||
224 | |||
225 | static struct mips_hi16 *mips_hi16_list; | ||
226 | static unsigned int gp_offs, gp_addr; | ||
227 | |||
228 | static int apply_r_mips_none(struct module *me, uint32_t *location, | ||
229 | Elf32_Addr v) | ||
230 | { | ||
231 | return 0; | ||
232 | } | ||
233 | |||
234 | static int apply_r_mips_gprel16(struct module *me, uint32_t *location, | ||
235 | Elf32_Addr v) | ||
236 | { | ||
237 | int rel; | ||
238 | |||
239 | if (!(*location & 0xffff)) { | ||
240 | rel = (int)v - gp_addr; | ||
241 | } else { | ||
242 | /* .sbss + gp(relative) + offset */ | ||
243 | /* kludge! */ | ||
244 | rel = (int)(short)((int)v + gp_offs + | ||
245 | (int)(short)(*location & 0xffff) - gp_addr); | ||
246 | } | ||
247 | |||
248 | if ((rel > 32768) || (rel < -32768)) { | ||
249 | pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n", | ||
250 | rel); | ||
251 | return -ENOEXEC; | ||
252 | } | ||
253 | |||
254 | *location = (*location & 0xffff0000) | (rel & 0xffff); | ||
255 | |||
256 | return 0; | ||
257 | } | ||
258 | |||
259 | static int apply_r_mips_pc16(struct module *me, uint32_t *location, | ||
260 | Elf32_Addr v) | ||
261 | { | ||
262 | int rel; | ||
263 | rel = (((unsigned int)v - (unsigned int)location)); | ||
264 | rel >>= 2; /* because the offset is in _instructions_ not bytes. */ | ||
265 | rel -= 1; /* and one instruction less due to the branch delay slot. */ | ||
266 | |||
267 | if ((rel > 32768) || (rel < -32768)) { | ||
268 | pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n", | ||
269 | rel); | ||
270 | return -ENOEXEC; | ||
271 | } | ||
272 | |||
273 | *location = (*location & 0xffff0000) | (rel & 0xffff); | ||
274 | |||
275 | return 0; | ||
276 | } | ||
277 | |||
278 | static int apply_r_mips_32(struct module *me, uint32_t *location, | ||
279 | Elf32_Addr v) | ||
280 | { | ||
281 | *location += v; | ||
282 | |||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int apply_r_mips_26(struct module *me, uint32_t *location, | ||
287 | Elf32_Addr v) | ||
288 | { | ||
289 | if (v % 4) { | ||
290 | pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n"); | ||
291 | return -ENOEXEC; | ||
292 | } | ||
293 | |||
294 | /* | ||
295 | * Not desperately convinced this is a good check of an overflow condition | ||
296 | * anyway. But it gets in the way of handling undefined weak symbols which | ||
297 | * we want to set to zero. | ||
298 | * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) { | ||
299 | * printk(KERN_ERR | ||
300 | * "module %s: relocation overflow\n", | ||
301 | * me->name); | ||
302 | * return -ENOEXEC; | ||
303 | * } | ||
304 | */ | ||
305 | |||
306 | *location = (*location & ~0x03ffffff) | | ||
307 | ((*location + (v >> 2)) & 0x03ffffff); | ||
308 | return 0; | ||
309 | } | ||
310 | |||
311 | static int apply_r_mips_hi16(struct module *me, uint32_t *location, | ||
312 | Elf32_Addr v) | ||
313 | { | ||
314 | struct mips_hi16 *n; | ||
315 | |||
316 | /* | ||
317 | * We cannot relocate this one now because we don't know the value of | ||
318 | * the carry we need to add. Save the information, and let LO16 do the | ||
319 | * actual relocation. | ||
320 | */ | ||
321 | n = kmalloc(sizeof(*n), GFP_KERNEL); | ||
322 | if (!n) | ||
323 | return -ENOMEM; | ||
324 | |||
325 | n->addr = location; | ||
326 | n->value = v; | ||
327 | n->next = mips_hi16_list; | ||
328 | mips_hi16_list = n; | ||
329 | |||
330 | return 0; | ||
331 | } | ||
332 | |||
333 | static int apply_r_mips_lo16(struct module *me, uint32_t *location, | ||
334 | Elf32_Addr v) | ||
335 | { | ||
336 | unsigned long insnlo = *location; | ||
337 | Elf32_Addr val, vallo; | ||
338 | struct mips_hi16 *l, *next; | ||
339 | |||
340 | /* Sign extend the addend we extract from the lo insn. */ | ||
341 | vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000; | ||
342 | |||
343 | if (mips_hi16_list != NULL) { | ||
344 | |||
345 | l = mips_hi16_list; | ||
346 | while (l != NULL) { | ||
347 | unsigned long insn; | ||
348 | |||
349 | /* | ||
350 | * The value for the HI16 had best be the same. | ||
351 | */ | ||
352 | if (v != l->value) { | ||
353 | pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n"); | ||
354 | goto out_free; | ||
355 | } | ||
356 | |||
357 | /* | ||
358 | * Do the HI16 relocation. Note that we actually don't | ||
359 | * need to know anything about the LO16 itself, except | ||
360 | * where to find the low 16 bits of the addend needed | ||
361 | * by the LO16. | ||
362 | */ | ||
363 | insn = *l->addr; | ||
364 | val = ((insn & 0xffff) << 16) + vallo; | ||
365 | val += v; | ||
366 | |||
367 | /* | ||
368 | * Account for the sign extension that will happen in | ||
369 | * the low bits. | ||
370 | */ | ||
371 | val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff; | ||
372 | |||
373 | insn = (insn & ~0xffff) | val; | ||
374 | *l->addr = insn; | ||
375 | |||
376 | next = l->next; | ||
377 | kfree(l); | ||
378 | l = next; | ||
379 | } | ||
380 | |||
381 | mips_hi16_list = NULL; | ||
382 | } | ||
383 | |||
384 | /* | ||
385 | * Ok, we're done with the HI16 relocs. Now deal with the LO16. | ||
386 | */ | ||
387 | val = v + vallo; | ||
388 | insnlo = (insnlo & ~0xffff) | (val & 0xffff); | ||
389 | *location = insnlo; | ||
390 | |||
391 | return 0; | ||
392 | |||
393 | out_free: | ||
394 | while (l != NULL) { | ||
395 | next = l->next; | ||
396 | kfree(l); | ||
397 | l = next; | ||
398 | } | ||
399 | mips_hi16_list = NULL; | ||
400 | |||
401 | return -ENOEXEC; | ||
402 | } | ||
403 | |||
404 | static int (*reloc_handlers[]) (struct module *me, uint32_t *location, | ||
405 | Elf32_Addr v) = { | ||
406 | [R_MIPS_NONE] = apply_r_mips_none, | ||
407 | [R_MIPS_32] = apply_r_mips_32, | ||
408 | [R_MIPS_26] = apply_r_mips_26, | ||
409 | [R_MIPS_HI16] = apply_r_mips_hi16, | ||
410 | [R_MIPS_LO16] = apply_r_mips_lo16, | ||
411 | [R_MIPS_GPREL16] = apply_r_mips_gprel16, | ||
412 | [R_MIPS_PC16] = apply_r_mips_pc16 | ||
413 | }; | ||
414 | |||
415 | static char *rstrs[] = { | ||
416 | [R_MIPS_NONE] = "MIPS_NONE", | ||
417 | [R_MIPS_32] = "MIPS_32", | ||
418 | [R_MIPS_26] = "MIPS_26", | ||
419 | [R_MIPS_HI16] = "MIPS_HI16", | ||
420 | [R_MIPS_LO16] = "MIPS_LO16", | ||
421 | [R_MIPS_GPREL16] = "MIPS_GPREL16", | ||
422 | [R_MIPS_PC16] = "MIPS_PC16" | ||
423 | }; | ||
424 | |||
425 | static int apply_relocations(Elf32_Shdr *sechdrs, | ||
426 | const char *strtab, | ||
427 | unsigned int symindex, | ||
428 | unsigned int relsec, | ||
429 | struct module *me) | ||
430 | { | ||
431 | Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr; | ||
432 | Elf32_Sym *sym; | ||
433 | uint32_t *location; | ||
434 | unsigned int i; | ||
435 | Elf32_Addr v; | ||
436 | int res; | ||
437 | |||
438 | for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { | ||
439 | Elf32_Word r_info = rel[i].r_info; | ||
440 | |||
441 | /* This is where to make the change */ | ||
442 | location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr | ||
443 | + rel[i].r_offset; | ||
444 | /* This is the symbol it is referring to */ | ||
445 | sym = (Elf32_Sym *)sechdrs[symindex].sh_addr | ||
446 | + ELF32_R_SYM(r_info); | ||
447 | |||
448 | if (!sym->st_value) { | ||
449 | pr_debug("%s: undefined weak symbol %s\n", | ||
450 | me->name, strtab + sym->st_name); | ||
451 | /* just print the warning, dont barf */ | ||
452 | } | ||
453 | |||
454 | v = sym->st_value; | ||
455 | |||
456 | res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v); | ||
457 | if (res) { | ||
458 | char *r = rstrs[ELF32_R_TYPE(r_info)]; | ||
459 | pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n", | ||
460 | rel[i].r_offset, r ? r : "UNKNOWN", | ||
461 | strtab + sym->st_name); | ||
462 | return res; | ||
463 | } | ||
464 | } | ||
465 | |||
466 | return 0; | ||
467 | } | ||
468 | |||
469 | static inline void save_gp_address(unsigned int secbase, unsigned int rel) | ||
470 | { | ||
471 | gp_addr = secbase + rel; | ||
472 | gp_offs = gp_addr - (secbase & 0xffff0000); | ||
473 | } | ||
474 | /* end module-elf32.c */ | ||
475 | |||
476 | /* Change all symbols so that sh_value encodes the pointer directly. */ | ||
477 | static void simplify_symbols(Elf_Shdr *sechdrs, | ||
478 | unsigned int symindex, | ||
479 | const char *strtab, | ||
480 | const char *secstrings, | ||
481 | unsigned int nsecs, struct module *mod) | ||
482 | { | ||
483 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
484 | unsigned long secbase, bssbase = 0; | ||
485 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
486 | int size; | ||
487 | |||
488 | /* find the .bss section for COMMON symbols */ | ||
489 | for (i = 0; i < nsecs; i++) { | ||
490 | if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) { | ||
491 | bssbase = sechdrs[i].sh_addr; | ||
492 | break; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | for (i = 1; i < n; i++) { | ||
497 | switch (sym[i].st_shndx) { | ||
498 | case SHN_COMMON: | ||
499 | /* Allocate space for the symbol in the .bss section. | ||
500 | st_value is currently size. | ||
501 | We want it to have the address of the symbol. */ | ||
502 | |||
503 | size = sym[i].st_value; | ||
504 | sym[i].st_value = bssbase; | ||
505 | |||
506 | bssbase += size; | ||
507 | break; | ||
508 | |||
509 | case SHN_ABS: | ||
510 | /* Don't need to do anything */ | ||
511 | break; | ||
512 | |||
513 | case SHN_UNDEF: | ||
514 | /* ret = -ENOENT; */ | ||
515 | break; | ||
516 | |||
517 | case SHN_MIPS_SCOMMON: | ||
518 | pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n", | ||
519 | strtab + sym[i].st_name, sym[i].st_shndx); | ||
520 | /* .sbss section */ | ||
521 | break; | ||
522 | |||
523 | default: | ||
524 | secbase = sechdrs[sym[i].st_shndx].sh_addr; | ||
525 | |||
526 | if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0) | ||
527 | save_gp_address(secbase, sym[i].st_value); | ||
528 | |||
529 | sym[i].st_value += secbase; | ||
530 | break; | ||
531 | } | ||
532 | } | ||
533 | } | ||
534 | |||
535 | #ifdef DEBUG_ELFLOADER | ||
536 | static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex, | ||
537 | const char *strtab, struct module *mod) | ||
538 | { | ||
539 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
540 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
541 | |||
542 | pr_debug("dump_elfsymbols: n %d\n", n); | ||
543 | for (i = 1; i < n; i++) { | ||
544 | pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name, | ||
545 | sym[i].st_value); | ||
546 | } | ||
547 | } | ||
548 | #endif | ||
549 | |||
550 | static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs, | ||
551 | unsigned int symindex, const char *strtab, | ||
552 | struct module *mod) | ||
553 | { | ||
554 | Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr; | ||
555 | unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym); | ||
556 | |||
557 | for (i = 1; i < n; i++) { | ||
558 | if (strcmp(strtab + sym[i].st_name, "__start") == 0) | ||
559 | v->__start = sym[i].st_value; | ||
560 | |||
561 | if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0) | ||
562 | v->shared_ptr = (void *)sym[i].st_value; | ||
563 | } | ||
564 | |||
565 | if ((v->__start == 0) || (v->shared_ptr == NULL)) | ||
566 | return -1; | ||
567 | |||
568 | return 0; | ||
569 | } | ||
570 | |||
571 | /* | ||
572 | * Allocates a VPE with some program code space(the load address), copies the | ||
573 | * contents of the program (p)buffer performing relocatations/etc, free's it | ||
574 | * when finished. | ||
575 | */ | ||
576 | static int vpe_elfload(struct vpe *v) | ||
577 | { | ||
578 | Elf_Ehdr *hdr; | ||
579 | Elf_Shdr *sechdrs; | ||
580 | long err = 0; | ||
581 | char *secstrings, *strtab = NULL; | ||
582 | unsigned int len, i, symindex = 0, strindex = 0, relocate = 0; | ||
583 | struct module mod; /* so we can re-use the relocations code */ | ||
584 | |||
585 | memset(&mod, 0, sizeof(struct module)); | ||
586 | strcpy(mod.name, "VPE loader"); | ||
587 | |||
588 | hdr = (Elf_Ehdr *) v->pbuffer; | ||
589 | len = v->plen; | ||
590 | |||
591 | /* Sanity checks against insmoding binaries or wrong arch, | ||
592 | weird elf version */ | ||
593 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0 | ||
594 | || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC) | ||
595 | || !elf_check_arch(hdr) | ||
596 | || hdr->e_shentsize != sizeof(*sechdrs)) { | ||
597 | pr_warn("VPE loader: program wrong arch or weird elf version\n"); | ||
598 | |||
599 | return -ENOEXEC; | ||
600 | } | ||
601 | |||
602 | if (hdr->e_type == ET_REL) | ||
603 | relocate = 1; | ||
604 | |||
605 | if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) { | ||
606 | pr_err("VPE loader: program length %u truncated\n", len); | ||
607 | |||
608 | return -ENOEXEC; | ||
609 | } | ||
610 | |||
611 | /* Convenience variables */ | ||
612 | sechdrs = (void *)hdr + hdr->e_shoff; | ||
613 | secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset; | ||
614 | sechdrs[0].sh_addr = 0; | ||
615 | |||
616 | /* And these should exist, but gcc whinges if we don't init them */ | ||
617 | symindex = strindex = 0; | ||
618 | |||
619 | if (relocate) { | ||
620 | for (i = 1; i < hdr->e_shnum; i++) { | ||
621 | if ((sechdrs[i].sh_type != SHT_NOBITS) && | ||
622 | (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) { | ||
623 | pr_err("VPE program length %u truncated\n", | ||
624 | len); | ||
625 | return -ENOEXEC; | ||
626 | } | ||
627 | |||
628 | /* Mark all sections sh_addr with their address in the | ||
629 | temporary image. */ | ||
630 | sechdrs[i].sh_addr = (size_t) hdr + | ||
631 | sechdrs[i].sh_offset; | ||
632 | |||
633 | /* Internal symbols and strings. */ | ||
634 | if (sechdrs[i].sh_type == SHT_SYMTAB) { | ||
635 | symindex = i; | ||
636 | strindex = sechdrs[i].sh_link; | ||
637 | strtab = (char *)hdr + | ||
638 | sechdrs[strindex].sh_offset; | ||
639 | } | ||
640 | } | ||
641 | layout_sections(&mod, hdr, sechdrs, secstrings); | ||
642 | } | ||
643 | |||
644 | v->load_addr = alloc_progmem(mod.core_layout.size); | ||
645 | if (!v->load_addr) | ||
646 | return -ENOMEM; | ||
647 | |||
648 | pr_info("VPE loader: loading to %p\n", v->load_addr); | ||
649 | |||
650 | if (relocate) { | ||
651 | for (i = 0; i < hdr->e_shnum; i++) { | ||
652 | void *dest; | ||
653 | |||
654 | if (!(sechdrs[i].sh_flags & SHF_ALLOC)) | ||
655 | continue; | ||
656 | |||
657 | dest = v->load_addr + sechdrs[i].sh_entsize; | ||
658 | |||
659 | if (sechdrs[i].sh_type != SHT_NOBITS) | ||
660 | memcpy(dest, (void *)sechdrs[i].sh_addr, | ||
661 | sechdrs[i].sh_size); | ||
662 | /* Update sh_addr to point to copy in image. */ | ||
663 | sechdrs[i].sh_addr = (unsigned long)dest; | ||
664 | |||
665 | pr_debug(" section sh_name %s sh_addr 0x%x\n", | ||
666 | secstrings + sechdrs[i].sh_name, | ||
667 | sechdrs[i].sh_addr); | ||
668 | } | ||
669 | |||
670 | /* Fix up syms, so that st_value is a pointer to location. */ | ||
671 | simplify_symbols(sechdrs, symindex, strtab, secstrings, | ||
672 | hdr->e_shnum, &mod); | ||
673 | |||
674 | /* Now do relocations. */ | ||
675 | for (i = 1; i < hdr->e_shnum; i++) { | ||
676 | const char *strtab = (char *)sechdrs[strindex].sh_addr; | ||
677 | unsigned int info = sechdrs[i].sh_info; | ||
678 | |||
679 | /* Not a valid relocation section? */ | ||
680 | if (info >= hdr->e_shnum) | ||
681 | continue; | ||
682 | |||
683 | /* Don't bother with non-allocated sections */ | ||
684 | if (!(sechdrs[info].sh_flags & SHF_ALLOC)) | ||
685 | continue; | ||
686 | |||
687 | if (sechdrs[i].sh_type == SHT_REL) | ||
688 | err = apply_relocations(sechdrs, strtab, | ||
689 | symindex, i, &mod); | ||
690 | else if (sechdrs[i].sh_type == SHT_RELA) | ||
691 | err = apply_relocate_add(sechdrs, strtab, | ||
692 | symindex, i, &mod); | ||
693 | if (err < 0) | ||
694 | return err; | ||
695 | |||
696 | } | ||
697 | } else { | ||
698 | struct elf_phdr *phdr = (struct elf_phdr *) | ||
699 | ((char *)hdr + hdr->e_phoff); | ||
700 | |||
701 | for (i = 0; i < hdr->e_phnum; i++) { | ||
702 | if (phdr->p_type == PT_LOAD) { | ||
703 | memcpy((void *)phdr->p_paddr, | ||
704 | (char *)hdr + phdr->p_offset, | ||
705 | phdr->p_filesz); | ||
706 | memset((void *)phdr->p_paddr + phdr->p_filesz, | ||
707 | 0, phdr->p_memsz - phdr->p_filesz); | ||
708 | } | ||
709 | phdr++; | ||
710 | } | ||
711 | |||
712 | for (i = 0; i < hdr->e_shnum; i++) { | ||
713 | /* Internal symbols and strings. */ | ||
714 | if (sechdrs[i].sh_type == SHT_SYMTAB) { | ||
715 | symindex = i; | ||
716 | strindex = sechdrs[i].sh_link; | ||
717 | strtab = (char *)hdr + | ||
718 | sechdrs[strindex].sh_offset; | ||
719 | |||
720 | /* | ||
721 | * mark symtab's address for when we try | ||
722 | * to find the magic symbols | ||
723 | */ | ||
724 | sechdrs[i].sh_addr = (size_t) hdr + | ||
725 | sechdrs[i].sh_offset; | ||
726 | } | ||
727 | } | ||
728 | } | ||
729 | |||
730 | /* make sure it's physically written out */ | ||
731 | flush_icache_range((unsigned long)v->load_addr, | ||
732 | (unsigned long)v->load_addr + v->len); | ||
733 | |||
734 | if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) { | ||
735 | if (v->__start == 0) { | ||
736 | pr_warn("VPE loader: program does not contain a __start symbol\n"); | ||
737 | return -ENOEXEC; | ||
738 | } | ||
739 | |||
740 | if (v->shared_ptr == NULL) | ||
741 | pr_warn("VPE loader: program does not contain vpe_shared symbol.\n" | ||
742 | " Unable to use AMVP (AP/SP) facilities.\n"); | ||
743 | } | ||
744 | |||
745 | pr_info(" elf loaded\n"); | ||
746 | return 0; | ||
747 | } | ||
748 | |||
749 | static int getcwd(char *buff, int size) | ||
750 | { | ||
751 | mm_segment_t old_fs; | ||
752 | int ret; | ||
753 | |||
754 | old_fs = get_fs(); | ||
755 | set_fs(KERNEL_DS); | ||
756 | |||
757 | ret = sys_getcwd(buff, size); | ||
758 | |||
759 | set_fs(old_fs); | ||
760 | |||
761 | return ret; | ||
762 | } | ||
763 | |||
764 | /* checks VPE is unused and gets ready to load program */ | ||
765 | static int vpe_open(struct inode *inode, struct file *filp) | ||
766 | { | ||
767 | enum vpe_state state; | ||
768 | struct vpe_notifications *notifier; | ||
769 | struct vpe *v; | ||
770 | int ret; | ||
771 | |||
772 | if (VPE_MODULE_MINOR != iminor(inode)) { | ||
773 | /* assume only 1 device at the moment. */ | ||
774 | pr_warn("VPE loader: only vpe1 is supported\n"); | ||
775 | |||
776 | return -ENODEV; | ||
777 | } | ||
778 | |||
779 | v = get_vpe(aprp_cpu_index()); | ||
780 | if (v == NULL) { | ||
781 | pr_warn("VPE loader: unable to get vpe\n"); | ||
782 | |||
783 | return -ENODEV; | ||
784 | } | ||
785 | |||
786 | state = xchg(&v->state, VPE_STATE_INUSE); | ||
787 | if (state != VPE_STATE_UNUSED) { | ||
788 | pr_debug("VPE loader: tc in use dumping regs\n"); | ||
789 | |||
790 | list_for_each_entry(notifier, &v->notify, list) | ||
791 | notifier->stop(aprp_cpu_index()); | ||
792 | |||
793 | release_progmem(v->load_addr); | ||
794 | cleanup_tc(get_tc(aprp_cpu_index())); | ||
795 | } | ||
796 | |||
797 | /* this of-course trashes what was there before... */ | ||
798 | v->pbuffer = vmalloc(P_SIZE); | ||
799 | if (!v->pbuffer) { | ||
800 | pr_warn("VPE loader: unable to allocate memory\n"); | ||
801 | return -ENOMEM; | ||
802 | } | ||
803 | v->plen = P_SIZE; | ||
804 | v->load_addr = NULL; | ||
805 | v->len = 0; | ||
806 | |||
807 | v->cwd[0] = 0; | ||
808 | ret = getcwd(v->cwd, VPE_PATH_MAX); | ||
809 | if (ret < 0) | ||
810 | pr_warn("VPE loader: open, getcwd returned %d\n", ret); | ||
811 | |||
812 | v->shared_ptr = NULL; | ||
813 | v->__start = 0; | ||
814 | |||
815 | return 0; | ||
816 | } | ||
817 | |||
818 | static int vpe_release(struct inode *inode, struct file *filp) | ||
819 | { | ||
820 | #if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP) | ||
821 | struct vpe *v; | ||
822 | Elf_Ehdr *hdr; | ||
823 | int ret = 0; | ||
824 | |||
825 | v = get_vpe(aprp_cpu_index()); | ||
826 | if (v == NULL) | ||
827 | return -ENODEV; | ||
828 | |||
829 | hdr = (Elf_Ehdr *) v->pbuffer; | ||
830 | if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) { | ||
831 | if (vpe_elfload(v) >= 0) { | ||
832 | vpe_run(v); | ||
833 | } else { | ||
834 | pr_warn("VPE loader: ELF load failed.\n"); | ||
835 | ret = -ENOEXEC; | ||
836 | } | ||
837 | } else { | ||
838 | pr_warn("VPE loader: only elf files are supported\n"); | ||
839 | ret = -ENOEXEC; | ||
840 | } | ||
841 | |||
842 | /* It's good to be able to run the SP and if it chokes have a look at | ||
843 | the /dev/rt?. But if we reset the pointer to the shared struct we | ||
844 | lose what has happened. So perhaps if garbage is sent to the vpe | ||
845 | device, use it as a trigger for the reset. Hopefully a nice | ||
846 | executable will be along shortly. */ | ||
847 | if (ret < 0) | ||
848 | v->shared_ptr = NULL; | ||
849 | |||
850 | vfree(v->pbuffer); | ||
851 | v->plen = 0; | ||
852 | |||
853 | return ret; | ||
854 | #else | ||
855 | pr_warn("VPE loader: ELF load failed.\n"); | ||
856 | return -ENOEXEC; | ||
857 | #endif | ||
858 | } | ||
859 | |||
860 | static ssize_t vpe_write(struct file *file, const char __user *buffer, | ||
861 | size_t count, loff_t *ppos) | ||
862 | { | ||
863 | size_t ret = count; | ||
864 | struct vpe *v; | ||
865 | |||
866 | if (iminor(file_inode(file)) != VPE_MODULE_MINOR) | ||
867 | return -ENODEV; | ||
868 | |||
869 | v = get_vpe(aprp_cpu_index()); | ||
870 | |||
871 | if (v == NULL) | ||
872 | return -ENODEV; | ||
873 | |||
874 | if ((count + v->len) > v->plen) { | ||
875 | pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n"); | ||
876 | return -ENOMEM; | ||
877 | } | ||
878 | |||
879 | count -= copy_from_user(v->pbuffer + v->len, buffer, count); | ||
880 | if (!count) | ||
881 | return -EFAULT; | ||
882 | |||
883 | v->len += count; | ||
884 | return ret; | ||
885 | } | ||
886 | |||
887 | const struct file_operations vpe_fops = { | ||
888 | .owner = THIS_MODULE, | ||
889 | .open = vpe_open, | ||
890 | .release = vpe_release, | ||
891 | .write = vpe_write, | ||
892 | .llseek = noop_llseek, | ||
893 | }; | ||
894 | |||
895 | void *vpe_get_shared(int index) | ||
896 | { | ||
897 | struct vpe *v = get_vpe(index); | ||
898 | |||
899 | if (v == NULL) | ||
900 | return NULL; | ||
901 | |||
902 | return v->shared_ptr; | ||
903 | } | ||
904 | EXPORT_SYMBOL(vpe_get_shared); | ||
905 | |||
906 | int vpe_notify(int index, struct vpe_notifications *notify) | ||
907 | { | ||
908 | struct vpe *v = get_vpe(index); | ||
909 | |||
910 | if (v == NULL) | ||
911 | return -1; | ||
912 | |||
913 | list_add(¬ify->list, &v->notify); | ||
914 | return 0; | ||
915 | } | ||
916 | EXPORT_SYMBOL(vpe_notify); | ||
917 | |||
918 | char *vpe_getcwd(int index) | ||
919 | { | ||
920 | struct vpe *v = get_vpe(index); | ||
921 | |||
922 | if (v == NULL) | ||
923 | return NULL; | ||
924 | |||
925 | return v->cwd; | ||
926 | } | ||
927 | EXPORT_SYMBOL(vpe_getcwd); | ||
928 | |||
929 | module_init(vpe_module_init); | ||
930 | module_exit(vpe_module_exit); | ||
931 | MODULE_DESCRIPTION("MIPS VPE Loader"); | ||
932 | MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); | ||
933 | MODULE_LICENSE("GPL"); | ||
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c new file mode 100644 index 000000000..c9263b95c --- /dev/null +++ b/arch/mips/kernel/watch.c | |||
@@ -0,0 +1,211 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 2008 David Daney | ||
7 | */ | ||
8 | |||
9 | #include <linux/sched.h> | ||
10 | |||
11 | #include <asm/processor.h> | ||
12 | #include <asm/watch.h> | ||
13 | |||
14 | /* | ||
15 | * Install the watch registers for the current thread. A maximum of | ||
16 | * four registers are installed although the machine may have more. | ||
17 | */ | ||
18 | void mips_install_watch_registers(struct task_struct *t) | ||
19 | { | ||
20 | struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264; | ||
21 | unsigned int watchhi = MIPS_WATCHHI_G | /* Trap all ASIDs */ | ||
22 | MIPS_WATCHHI_IRW; /* Clear result bits */ | ||
23 | |||
24 | switch (current_cpu_data.watch_reg_use_cnt) { | ||
25 | default: | ||
26 | BUG(); | ||
27 | case 4: | ||
28 | write_c0_watchlo3(watches->watchlo[3]); | ||
29 | write_c0_watchhi3(watchhi | watches->watchhi[3]); | ||
30 | fallthrough; | ||
31 | case 3: | ||
32 | write_c0_watchlo2(watches->watchlo[2]); | ||
33 | write_c0_watchhi2(watchhi | watches->watchhi[2]); | ||
34 | fallthrough; | ||
35 | case 2: | ||
36 | write_c0_watchlo1(watches->watchlo[1]); | ||
37 | write_c0_watchhi1(watchhi | watches->watchhi[1]); | ||
38 | fallthrough; | ||
39 | case 1: | ||
40 | write_c0_watchlo0(watches->watchlo[0]); | ||
41 | write_c0_watchhi0(watchhi | watches->watchhi[0]); | ||
42 | } | ||
43 | } | ||
44 | |||
45 | /* | ||
46 | * Read back the watchhi registers so the user space debugger has | ||
47 | * access to the I, R, and W bits. A maximum of four registers are | ||
48 | * read although the machine may have more. | ||
49 | */ | ||
50 | void mips_read_watch_registers(void) | ||
51 | { | ||
52 | struct mips3264_watch_reg_state *watches = | ||
53 | ¤t->thread.watch.mips3264; | ||
54 | unsigned int watchhi_mask = MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW; | ||
55 | |||
56 | switch (current_cpu_data.watch_reg_use_cnt) { | ||
57 | default: | ||
58 | BUG(); | ||
59 | case 4: | ||
60 | watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask); | ||
61 | fallthrough; | ||
62 | case 3: | ||
63 | watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask); | ||
64 | fallthrough; | ||
65 | case 2: | ||
66 | watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask); | ||
67 | fallthrough; | ||
68 | case 1: | ||
69 | watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask); | ||
70 | } | ||
71 | if (current_cpu_data.watch_reg_use_cnt == 1 && | ||
72 | (watches->watchhi[0] & MIPS_WATCHHI_IRW) == 0) { | ||
73 | /* Pathological case of release 1 architecture that | ||
74 | * doesn't set the condition bits. We assume that | ||
75 | * since we got here, the watch condition was met and | ||
76 | * signal that the conditions requested in watchlo | ||
77 | * were met. */ | ||
78 | watches->watchhi[0] |= (watches->watchlo[0] & MIPS_WATCHHI_IRW); | ||
79 | } | ||
80 | } | ||
81 | |||
82 | /* | ||
83 | * Disable all watch registers. Although only four registers are | ||
84 | * installed, all are cleared to eliminate the possibility of endless | ||
85 | * looping in the watch handler. | ||
86 | */ | ||
87 | void mips_clear_watch_registers(void) | ||
88 | { | ||
89 | switch (current_cpu_data.watch_reg_count) { | ||
90 | default: | ||
91 | BUG(); | ||
92 | case 8: | ||
93 | write_c0_watchlo7(0); | ||
94 | fallthrough; | ||
95 | case 7: | ||
96 | write_c0_watchlo6(0); | ||
97 | fallthrough; | ||
98 | case 6: | ||
99 | write_c0_watchlo5(0); | ||
100 | fallthrough; | ||
101 | case 5: | ||
102 | write_c0_watchlo4(0); | ||
103 | fallthrough; | ||
104 | case 4: | ||
105 | write_c0_watchlo3(0); | ||
106 | fallthrough; | ||
107 | case 3: | ||
108 | write_c0_watchlo2(0); | ||
109 | fallthrough; | ||
110 | case 2: | ||
111 | write_c0_watchlo1(0); | ||
112 | fallthrough; | ||
113 | case 1: | ||
114 | write_c0_watchlo0(0); | ||
115 | } | ||
116 | } | ||
117 | |||
118 | void mips_probe_watch_registers(struct cpuinfo_mips *c) | ||
119 | { | ||
120 | unsigned int t; | ||
121 | |||
122 | if ((c->options & MIPS_CPU_WATCH) == 0) | ||
123 | return; | ||
124 | /* | ||
125 | * Check which of the I,R and W bits are supported, then | ||
126 | * disable the register. | ||
127 | */ | ||
128 | write_c0_watchlo0(MIPS_WATCHLO_IRW); | ||
129 | back_to_back_c0_hazard(); | ||
130 | t = read_c0_watchlo0(); | ||
131 | write_c0_watchlo0(0); | ||
132 | c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW; | ||
133 | |||
134 | /* Write the mask bits and read them back to determine which | ||
135 | * can be used. */ | ||
136 | c->watch_reg_count = 1; | ||
137 | c->watch_reg_use_cnt = 1; | ||
138 | t = read_c0_watchhi0(); | ||
139 | write_c0_watchhi0(t | MIPS_WATCHHI_MASK); | ||
140 | back_to_back_c0_hazard(); | ||
141 | t = read_c0_watchhi0(); | ||
142 | c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK); | ||
143 | if ((t & MIPS_WATCHHI_M) == 0) | ||
144 | return; | ||
145 | |||
146 | write_c0_watchlo1(MIPS_WATCHLO_IRW); | ||
147 | back_to_back_c0_hazard(); | ||
148 | t = read_c0_watchlo1(); | ||
149 | write_c0_watchlo1(0); | ||
150 | c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW; | ||
151 | |||
152 | c->watch_reg_count = 2; | ||
153 | c->watch_reg_use_cnt = 2; | ||
154 | t = read_c0_watchhi1(); | ||
155 | write_c0_watchhi1(t | MIPS_WATCHHI_MASK); | ||
156 | back_to_back_c0_hazard(); | ||
157 | t = read_c0_watchhi1(); | ||
158 | c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK); | ||
159 | if ((t & MIPS_WATCHHI_M) == 0) | ||
160 | return; | ||
161 | |||
162 | write_c0_watchlo2(MIPS_WATCHLO_IRW); | ||
163 | back_to_back_c0_hazard(); | ||
164 | t = read_c0_watchlo2(); | ||
165 | write_c0_watchlo2(0); | ||
166 | c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW; | ||
167 | |||
168 | c->watch_reg_count = 3; | ||
169 | c->watch_reg_use_cnt = 3; | ||
170 | t = read_c0_watchhi2(); | ||
171 | write_c0_watchhi2(t | MIPS_WATCHHI_MASK); | ||
172 | back_to_back_c0_hazard(); | ||
173 | t = read_c0_watchhi2(); | ||
174 | c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK); | ||
175 | if ((t & MIPS_WATCHHI_M) == 0) | ||
176 | return; | ||
177 | |||
178 | write_c0_watchlo3(MIPS_WATCHLO_IRW); | ||
179 | back_to_back_c0_hazard(); | ||
180 | t = read_c0_watchlo3(); | ||
181 | write_c0_watchlo3(0); | ||
182 | c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW; | ||
183 | |||
184 | c->watch_reg_count = 4; | ||
185 | c->watch_reg_use_cnt = 4; | ||
186 | t = read_c0_watchhi3(); | ||
187 | write_c0_watchhi3(t | MIPS_WATCHHI_MASK); | ||
188 | back_to_back_c0_hazard(); | ||
189 | t = read_c0_watchhi3(); | ||
190 | c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK); | ||
191 | if ((t & MIPS_WATCHHI_M) == 0) | ||
192 | return; | ||
193 | |||
194 | /* We use at most 4, but probe and report up to 8. */ | ||
195 | c->watch_reg_count = 5; | ||
196 | t = read_c0_watchhi4(); | ||
197 | if ((t & MIPS_WATCHHI_M) == 0) | ||
198 | return; | ||
199 | |||
200 | c->watch_reg_count = 6; | ||
201 | t = read_c0_watchhi5(); | ||
202 | if ((t & MIPS_WATCHHI_M) == 0) | ||
203 | return; | ||
204 | |||
205 | c->watch_reg_count = 7; | ||
206 | t = read_c0_watchhi6(); | ||
207 | if ((t & MIPS_WATCHHI_M) == 0) | ||
208 | return; | ||
209 | |||
210 | c->watch_reg_count = 8; | ||
211 | } | ||