diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /arch/mips/mm/fault.c | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'arch/mips/mm/fault.c')
-rw-r--r-- | arch/mips/mm/fault.c | 332 |
1 files changed, 332 insertions, 0 deletions
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c new file mode 100644 index 000000000..7c871b14e --- /dev/null +++ b/arch/mips/mm/fault.c | |||
@@ -0,0 +1,332 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1995 - 2000 by Ralf Baechle | ||
7 | */ | ||
8 | #include <linux/context_tracking.h> | ||
9 | #include <linux/signal.h> | ||
10 | #include <linux/sched.h> | ||
11 | #include <linux/interrupt.h> | ||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/errno.h> | ||
14 | #include <linux/string.h> | ||
15 | #include <linux/types.h> | ||
16 | #include <linux/ptrace.h> | ||
17 | #include <linux/ratelimit.h> | ||
18 | #include <linux/mman.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/smp.h> | ||
21 | #include <linux/kprobes.h> | ||
22 | #include <linux/perf_event.h> | ||
23 | #include <linux/uaccess.h> | ||
24 | |||
25 | #include <asm/branch.h> | ||
26 | #include <asm/mmu_context.h> | ||
27 | #include <asm/ptrace.h> | ||
28 | #include <asm/highmem.h> /* For VMALLOC_END */ | ||
29 | #include <linux/kdebug.h> | ||
30 | |||
31 | int show_unhandled_signals = 1; | ||
32 | |||
33 | /* | ||
34 | * This routine handles page faults. It determines the address, | ||
35 | * and the problem, and then passes it off to one of the appropriate | ||
36 | * routines. | ||
37 | */ | ||
38 | static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write, | ||
39 | unsigned long address) | ||
40 | { | ||
41 | struct vm_area_struct * vma = NULL; | ||
42 | struct task_struct *tsk = current; | ||
43 | struct mm_struct *mm = tsk->mm; | ||
44 | const int field = sizeof(unsigned long) * 2; | ||
45 | int si_code; | ||
46 | vm_fault_t fault; | ||
47 | unsigned int flags = FAULT_FLAG_DEFAULT; | ||
48 | |||
49 | static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); | ||
50 | |||
51 | #if 0 | ||
52 | printk("Cpu%d[%s:%d:%0*lx:%ld:%0*lx]\n", raw_smp_processor_id(), | ||
53 | current->comm, current->pid, field, address, write, | ||
54 | field, regs->cp0_epc); | ||
55 | #endif | ||
56 | |||
57 | #ifdef CONFIG_KPROBES | ||
58 | /* | ||
59 | * This is to notify the fault handler of the kprobes. | ||
60 | */ | ||
61 | if (notify_die(DIE_PAGE_FAULT, "page fault", regs, -1, | ||
62 | current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP) | ||
63 | return; | ||
64 | #endif | ||
65 | |||
66 | si_code = SEGV_MAPERR; | ||
67 | |||
68 | /* | ||
69 | * We fault-in kernel-space virtual memory on-demand. The | ||
70 | * 'reference' page table is init_mm.pgd. | ||
71 | * | ||
72 | * NOTE! We MUST NOT take any locks for this case. We may | ||
73 | * be in an interrupt or a critical region, and should | ||
74 | * only copy the information from the master page table, | ||
75 | * nothing more. | ||
76 | */ | ||
77 | #ifdef CONFIG_64BIT | ||
78 | # define VMALLOC_FAULT_TARGET no_context | ||
79 | #else | ||
80 | # define VMALLOC_FAULT_TARGET vmalloc_fault | ||
81 | #endif | ||
82 | |||
83 | if (unlikely(address >= VMALLOC_START && address <= VMALLOC_END)) | ||
84 | goto VMALLOC_FAULT_TARGET; | ||
85 | #ifdef MODULE_START | ||
86 | if (unlikely(address >= MODULE_START && address < MODULE_END)) | ||
87 | goto VMALLOC_FAULT_TARGET; | ||
88 | #endif | ||
89 | |||
90 | /* | ||
91 | * If we're in an interrupt or have no user | ||
92 | * context, we must not take the fault.. | ||
93 | */ | ||
94 | if (faulthandler_disabled() || !mm) | ||
95 | goto bad_area_nosemaphore; | ||
96 | |||
97 | if (user_mode(regs)) | ||
98 | flags |= FAULT_FLAG_USER; | ||
99 | |||
100 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); | ||
101 | retry: | ||
102 | mmap_read_lock(mm); | ||
103 | vma = find_vma(mm, address); | ||
104 | if (!vma) | ||
105 | goto bad_area; | ||
106 | if (vma->vm_start <= address) | ||
107 | goto good_area; | ||
108 | if (!(vma->vm_flags & VM_GROWSDOWN)) | ||
109 | goto bad_area; | ||
110 | if (expand_stack(vma, address)) | ||
111 | goto bad_area; | ||
112 | /* | ||
113 | * Ok, we have a good vm_area for this memory access, so | ||
114 | * we can handle it.. | ||
115 | */ | ||
116 | good_area: | ||
117 | si_code = SEGV_ACCERR; | ||
118 | |||
119 | if (write) { | ||
120 | if (!(vma->vm_flags & VM_WRITE)) | ||
121 | goto bad_area; | ||
122 | flags |= FAULT_FLAG_WRITE; | ||
123 | } else { | ||
124 | if (cpu_has_rixi) { | ||
125 | if (address == regs->cp0_epc && !(vma->vm_flags & VM_EXEC)) { | ||
126 | #if 0 | ||
127 | pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] XI violation\n", | ||
128 | raw_smp_processor_id(), | ||
129 | current->comm, current->pid, | ||
130 | field, address, write, | ||
131 | field, regs->cp0_epc); | ||
132 | #endif | ||
133 | goto bad_area; | ||
134 | } | ||
135 | if (!(vma->vm_flags & VM_READ) && | ||
136 | exception_epc(regs) != address) { | ||
137 | #if 0 | ||
138 | pr_notice("Cpu%d[%s:%d:%0*lx:%ld:%0*lx] RI violation\n", | ||
139 | raw_smp_processor_id(), | ||
140 | current->comm, current->pid, | ||
141 | field, address, write, | ||
142 | field, regs->cp0_epc); | ||
143 | #endif | ||
144 | goto bad_area; | ||
145 | } | ||
146 | } else { | ||
147 | if (unlikely(!vma_is_accessible(vma))) | ||
148 | goto bad_area; | ||
149 | } | ||
150 | } | ||
151 | |||
152 | /* | ||
153 | * If for any reason at all we couldn't handle the fault, | ||
154 | * make sure we exit gracefully rather than endlessly redo | ||
155 | * the fault. | ||
156 | */ | ||
157 | fault = handle_mm_fault(vma, address, flags, regs); | ||
158 | |||
159 | if (fault_signal_pending(fault, regs)) | ||
160 | return; | ||
161 | |||
162 | if (unlikely(fault & VM_FAULT_ERROR)) { | ||
163 | if (fault & VM_FAULT_OOM) | ||
164 | goto out_of_memory; | ||
165 | else if (fault & VM_FAULT_SIGSEGV) | ||
166 | goto bad_area; | ||
167 | else if (fault & VM_FAULT_SIGBUS) | ||
168 | goto do_sigbus; | ||
169 | BUG(); | ||
170 | } | ||
171 | if (flags & FAULT_FLAG_ALLOW_RETRY) { | ||
172 | if (fault & VM_FAULT_RETRY) { | ||
173 | flags |= FAULT_FLAG_TRIED; | ||
174 | |||
175 | /* | ||
176 | * No need to mmap_read_unlock(mm) as we would | ||
177 | * have already released it in __lock_page_or_retry | ||
178 | * in mm/filemap.c. | ||
179 | */ | ||
180 | |||
181 | goto retry; | ||
182 | } | ||
183 | } | ||
184 | |||
185 | mmap_read_unlock(mm); | ||
186 | return; | ||
187 | |||
188 | /* | ||
189 | * Something tried to access memory that isn't in our memory map.. | ||
190 | * Fix it, but check if it's kernel or user first.. | ||
191 | */ | ||
192 | bad_area: | ||
193 | mmap_read_unlock(mm); | ||
194 | |||
195 | bad_area_nosemaphore: | ||
196 | /* User mode accesses just cause a SIGSEGV */ | ||
197 | if (user_mode(regs)) { | ||
198 | tsk->thread.cp0_badvaddr = address; | ||
199 | tsk->thread.error_code = write; | ||
200 | if (show_unhandled_signals && | ||
201 | unhandled_signal(tsk, SIGSEGV) && | ||
202 | __ratelimit(&ratelimit_state)) { | ||
203 | pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n", | ||
204 | tsk->comm, | ||
205 | write ? "write access to" : "read access from", | ||
206 | field, address); | ||
207 | pr_info("epc = %0*lx in", field, | ||
208 | (unsigned long) regs->cp0_epc); | ||
209 | print_vma_addr(KERN_CONT " ", regs->cp0_epc); | ||
210 | pr_cont("\n"); | ||
211 | pr_info("ra = %0*lx in", field, | ||
212 | (unsigned long) regs->regs[31]); | ||
213 | print_vma_addr(KERN_CONT " ", regs->regs[31]); | ||
214 | pr_cont("\n"); | ||
215 | } | ||
216 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; | ||
217 | force_sig_fault(SIGSEGV, si_code, (void __user *)address); | ||
218 | return; | ||
219 | } | ||
220 | |||
221 | no_context: | ||
222 | /* Are we prepared to handle this kernel fault? */ | ||
223 | if (fixup_exception(regs)) { | ||
224 | current->thread.cp0_baduaddr = address; | ||
225 | return; | ||
226 | } | ||
227 | |||
228 | /* | ||
229 | * Oops. The kernel tried to access some bad page. We'll have to | ||
230 | * terminate things with extreme prejudice. | ||
231 | */ | ||
232 | bust_spinlocks(1); | ||
233 | |||
234 | printk(KERN_ALERT "CPU %d Unable to handle kernel paging request at " | ||
235 | "virtual address %0*lx, epc == %0*lx, ra == %0*lx\n", | ||
236 | raw_smp_processor_id(), field, address, field, regs->cp0_epc, | ||
237 | field, regs->regs[31]); | ||
238 | die("Oops", regs); | ||
239 | |||
240 | out_of_memory: | ||
241 | /* | ||
242 | * We ran out of memory, call the OOM killer, and return the userspace | ||
243 | * (which will retry the fault, or kill us if we got oom-killed). | ||
244 | */ | ||
245 | mmap_read_unlock(mm); | ||
246 | if (!user_mode(regs)) | ||
247 | goto no_context; | ||
248 | pagefault_out_of_memory(); | ||
249 | return; | ||
250 | |||
251 | do_sigbus: | ||
252 | mmap_read_unlock(mm); | ||
253 | |||
254 | /* Kernel mode? Handle exceptions or die */ | ||
255 | if (!user_mode(regs)) | ||
256 | goto no_context; | ||
257 | |||
258 | /* | ||
259 | * Send a sigbus, regardless of whether we were in kernel | ||
260 | * or user mode. | ||
261 | */ | ||
262 | #if 0 | ||
263 | printk("do_page_fault() #3: sending SIGBUS to %s for " | ||
264 | "invalid %s\n%0*lx (epc == %0*lx, ra == %0*lx)\n", | ||
265 | tsk->comm, | ||
266 | write ? "write access to" : "read access from", | ||
267 | field, address, | ||
268 | field, (unsigned long) regs->cp0_epc, | ||
269 | field, (unsigned long) regs->regs[31]); | ||
270 | #endif | ||
271 | current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f; | ||
272 | tsk->thread.cp0_badvaddr = address; | ||
273 | force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address); | ||
274 | |||
275 | return; | ||
276 | #ifndef CONFIG_64BIT | ||
277 | vmalloc_fault: | ||
278 | { | ||
279 | /* | ||
280 | * Synchronize this task's top level page-table | ||
281 | * with the 'reference' page table. | ||
282 | * | ||
283 | * Do _not_ use "tsk" here. We might be inside | ||
284 | * an interrupt in the middle of a task switch.. | ||
285 | */ | ||
286 | int offset = pgd_index(address); | ||
287 | pgd_t *pgd, *pgd_k; | ||
288 | p4d_t *p4d, *p4d_k; | ||
289 | pud_t *pud, *pud_k; | ||
290 | pmd_t *pmd, *pmd_k; | ||
291 | pte_t *pte_k; | ||
292 | |||
293 | pgd = (pgd_t *) pgd_current[raw_smp_processor_id()] + offset; | ||
294 | pgd_k = init_mm.pgd + offset; | ||
295 | |||
296 | if (!pgd_present(*pgd_k)) | ||
297 | goto no_context; | ||
298 | set_pgd(pgd, *pgd_k); | ||
299 | |||
300 | p4d = p4d_offset(pgd, address); | ||
301 | p4d_k = p4d_offset(pgd_k, address); | ||
302 | if (!p4d_present(*p4d_k)) | ||
303 | goto no_context; | ||
304 | |||
305 | pud = pud_offset(p4d, address); | ||
306 | pud_k = pud_offset(p4d_k, address); | ||
307 | if (!pud_present(*pud_k)) | ||
308 | goto no_context; | ||
309 | |||
310 | pmd = pmd_offset(pud, address); | ||
311 | pmd_k = pmd_offset(pud_k, address); | ||
312 | if (!pmd_present(*pmd_k)) | ||
313 | goto no_context; | ||
314 | set_pmd(pmd, *pmd_k); | ||
315 | |||
316 | pte_k = pte_offset_kernel(pmd_k, address); | ||
317 | if (!pte_present(*pte_k)) | ||
318 | goto no_context; | ||
319 | return; | ||
320 | } | ||
321 | #endif | ||
322 | } | ||
323 | |||
324 | asmlinkage void __kprobes do_page_fault(struct pt_regs *regs, | ||
325 | unsigned long write, unsigned long address) | ||
326 | { | ||
327 | enum ctx_state prev_state; | ||
328 | |||
329 | prev_state = exception_enter(); | ||
330 | __do_page_fault(regs, write, address); | ||
331 | exception_exit(prev_state); | ||
332 | } | ||