diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /fs/coredump.c | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'fs/coredump.c')
-rw-r--r-- | fs/coredump.c | 1169 |
1 files changed, 1169 insertions, 0 deletions
diff --git a/fs/coredump.c b/fs/coredump.c new file mode 100644 index 000000000..21cda2346 --- /dev/null +++ b/fs/coredump.c | |||
@@ -0,0 +1,1169 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | #include <linux/slab.h> | ||
3 | #include <linux/file.h> | ||
4 | #include <linux/fdtable.h> | ||
5 | #include <linux/freezer.h> | ||
6 | #include <linux/mm.h> | ||
7 | #include <linux/stat.h> | ||
8 | #include <linux/fcntl.h> | ||
9 | #include <linux/swap.h> | ||
10 | #include <linux/ctype.h> | ||
11 | #include <linux/string.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/pagemap.h> | ||
14 | #include <linux/perf_event.h> | ||
15 | #include <linux/highmem.h> | ||
16 | #include <linux/spinlock.h> | ||
17 | #include <linux/key.h> | ||
18 | #include <linux/personality.h> | ||
19 | #include <linux/binfmts.h> | ||
20 | #include <linux/coredump.h> | ||
21 | #include <linux/sched/coredump.h> | ||
22 | #include <linux/sched/signal.h> | ||
23 | #include <linux/sched/task_stack.h> | ||
24 | #include <linux/utsname.h> | ||
25 | #include <linux/pid_namespace.h> | ||
26 | #include <linux/module.h> | ||
27 | #include <linux/namei.h> | ||
28 | #include <linux/mount.h> | ||
29 | #include <linux/security.h> | ||
30 | #include <linux/syscalls.h> | ||
31 | #include <linux/tsacct_kern.h> | ||
32 | #include <linux/cn_proc.h> | ||
33 | #include <linux/audit.h> | ||
34 | #include <linux/tracehook.h> | ||
35 | #include <linux/kmod.h> | ||
36 | #include <linux/fsnotify.h> | ||
37 | #include <linux/fs_struct.h> | ||
38 | #include <linux/pipe_fs_i.h> | ||
39 | #include <linux/oom.h> | ||
40 | #include <linux/compat.h> | ||
41 | #include <linux/fs.h> | ||
42 | #include <linux/path.h> | ||
43 | #include <linux/timekeeping.h> | ||
44 | #include <linux/elf.h> | ||
45 | |||
46 | #include <linux/uaccess.h> | ||
47 | #include <asm/mmu_context.h> | ||
48 | #include <asm/tlb.h> | ||
49 | #include <asm/exec.h> | ||
50 | |||
51 | #include <trace/events/task.h> | ||
52 | #include "internal.h" | ||
53 | |||
54 | #include <trace/events/sched.h> | ||
55 | |||
56 | static bool dump_vma_snapshot(struct coredump_params *cprm); | ||
57 | static void free_vma_snapshot(struct coredump_params *cprm); | ||
58 | |||
59 | int core_uses_pid; | ||
60 | unsigned int core_pipe_limit; | ||
61 | char core_pattern[CORENAME_MAX_SIZE] = "core"; | ||
62 | static int core_name_size = CORENAME_MAX_SIZE; | ||
63 | |||
64 | struct core_name { | ||
65 | char *corename; | ||
66 | int used, size; | ||
67 | }; | ||
68 | |||
69 | /* The maximal length of core_pattern is also specified in sysctl.c */ | ||
70 | |||
71 | static int expand_corename(struct core_name *cn, int size) | ||
72 | { | ||
73 | char *corename = krealloc(cn->corename, size, GFP_KERNEL); | ||
74 | |||
75 | if (!corename) | ||
76 | return -ENOMEM; | ||
77 | |||
78 | if (size > core_name_size) /* racy but harmless */ | ||
79 | core_name_size = size; | ||
80 | |||
81 | cn->size = ksize(corename); | ||
82 | cn->corename = corename; | ||
83 | return 0; | ||
84 | } | ||
85 | |||
86 | static __printf(2, 0) int cn_vprintf(struct core_name *cn, const char *fmt, | ||
87 | va_list arg) | ||
88 | { | ||
89 | int free, need; | ||
90 | va_list arg_copy; | ||
91 | |||
92 | again: | ||
93 | free = cn->size - cn->used; | ||
94 | |||
95 | va_copy(arg_copy, arg); | ||
96 | need = vsnprintf(cn->corename + cn->used, free, fmt, arg_copy); | ||
97 | va_end(arg_copy); | ||
98 | |||
99 | if (need < free) { | ||
100 | cn->used += need; | ||
101 | return 0; | ||
102 | } | ||
103 | |||
104 | if (!expand_corename(cn, cn->size + need - free + 1)) | ||
105 | goto again; | ||
106 | |||
107 | return -ENOMEM; | ||
108 | } | ||
109 | |||
110 | static __printf(2, 3) int cn_printf(struct core_name *cn, const char *fmt, ...) | ||
111 | { | ||
112 | va_list arg; | ||
113 | int ret; | ||
114 | |||
115 | va_start(arg, fmt); | ||
116 | ret = cn_vprintf(cn, fmt, arg); | ||
117 | va_end(arg); | ||
118 | |||
119 | return ret; | ||
120 | } | ||
121 | |||
122 | static __printf(2, 3) | ||
123 | int cn_esc_printf(struct core_name *cn, const char *fmt, ...) | ||
124 | { | ||
125 | int cur = cn->used; | ||
126 | va_list arg; | ||
127 | int ret; | ||
128 | |||
129 | va_start(arg, fmt); | ||
130 | ret = cn_vprintf(cn, fmt, arg); | ||
131 | va_end(arg); | ||
132 | |||
133 | if (ret == 0) { | ||
134 | /* | ||
135 | * Ensure that this coredump name component can't cause the | ||
136 | * resulting corefile path to consist of a ".." or ".". | ||
137 | */ | ||
138 | if ((cn->used - cur == 1 && cn->corename[cur] == '.') || | ||
139 | (cn->used - cur == 2 && cn->corename[cur] == '.' | ||
140 | && cn->corename[cur+1] == '.')) | ||
141 | cn->corename[cur] = '!'; | ||
142 | |||
143 | /* | ||
144 | * Empty names are fishy and could be used to create a "//" in a | ||
145 | * corefile name, causing the coredump to happen one directory | ||
146 | * level too high. Enforce that all components of the core | ||
147 | * pattern are at least one character long. | ||
148 | */ | ||
149 | if (cn->used == cur) | ||
150 | ret = cn_printf(cn, "!"); | ||
151 | } | ||
152 | |||
153 | for (; cur < cn->used; ++cur) { | ||
154 | if (cn->corename[cur] == '/') | ||
155 | cn->corename[cur] = '!'; | ||
156 | } | ||
157 | return ret; | ||
158 | } | ||
159 | |||
160 | static int cn_print_exe_file(struct core_name *cn, bool name_only) | ||
161 | { | ||
162 | struct file *exe_file; | ||
163 | char *pathbuf, *path, *ptr; | ||
164 | int ret; | ||
165 | |||
166 | exe_file = get_mm_exe_file(current->mm); | ||
167 | if (!exe_file) | ||
168 | return cn_esc_printf(cn, "%s (path unknown)", current->comm); | ||
169 | |||
170 | pathbuf = kmalloc(PATH_MAX, GFP_KERNEL); | ||
171 | if (!pathbuf) { | ||
172 | ret = -ENOMEM; | ||
173 | goto put_exe_file; | ||
174 | } | ||
175 | |||
176 | path = file_path(exe_file, pathbuf, PATH_MAX); | ||
177 | if (IS_ERR(path)) { | ||
178 | ret = PTR_ERR(path); | ||
179 | goto free_buf; | ||
180 | } | ||
181 | |||
182 | if (name_only) { | ||
183 | ptr = strrchr(path, '/'); | ||
184 | if (ptr) | ||
185 | path = ptr + 1; | ||
186 | } | ||
187 | ret = cn_esc_printf(cn, "%s", path); | ||
188 | |||
189 | free_buf: | ||
190 | kfree(pathbuf); | ||
191 | put_exe_file: | ||
192 | fput(exe_file); | ||
193 | return ret; | ||
194 | } | ||
195 | |||
196 | /* format_corename will inspect the pattern parameter, and output a | ||
197 | * name into corename, which must have space for at least | ||
198 | * CORENAME_MAX_SIZE bytes plus one byte for the zero terminator. | ||
199 | */ | ||
200 | static int format_corename(struct core_name *cn, struct coredump_params *cprm, | ||
201 | size_t **argv, int *argc) | ||
202 | { | ||
203 | const struct cred *cred = current_cred(); | ||
204 | const char *pat_ptr = core_pattern; | ||
205 | int ispipe = (*pat_ptr == '|'); | ||
206 | bool was_space = false; | ||
207 | int pid_in_pattern = 0; | ||
208 | int err = 0; | ||
209 | |||
210 | cn->used = 0; | ||
211 | cn->corename = NULL; | ||
212 | if (expand_corename(cn, core_name_size)) | ||
213 | return -ENOMEM; | ||
214 | cn->corename[0] = '\0'; | ||
215 | |||
216 | if (ispipe) { | ||
217 | int argvs = sizeof(core_pattern) / 2; | ||
218 | (*argv) = kmalloc_array(argvs, sizeof(**argv), GFP_KERNEL); | ||
219 | if (!(*argv)) | ||
220 | return -ENOMEM; | ||
221 | (*argv)[(*argc)++] = 0; | ||
222 | ++pat_ptr; | ||
223 | if (!(*pat_ptr)) | ||
224 | return -ENOMEM; | ||
225 | } | ||
226 | |||
227 | /* Repeat as long as we have more pattern to process and more output | ||
228 | space */ | ||
229 | while (*pat_ptr) { | ||
230 | /* | ||
231 | * Split on spaces before doing template expansion so that | ||
232 | * %e and %E don't get split if they have spaces in them | ||
233 | */ | ||
234 | if (ispipe) { | ||
235 | if (isspace(*pat_ptr)) { | ||
236 | if (cn->used != 0) | ||
237 | was_space = true; | ||
238 | pat_ptr++; | ||
239 | continue; | ||
240 | } else if (was_space) { | ||
241 | was_space = false; | ||
242 | err = cn_printf(cn, "%c", '\0'); | ||
243 | if (err) | ||
244 | return err; | ||
245 | (*argv)[(*argc)++] = cn->used; | ||
246 | } | ||
247 | } | ||
248 | if (*pat_ptr != '%') { | ||
249 | err = cn_printf(cn, "%c", *pat_ptr++); | ||
250 | } else { | ||
251 | switch (*++pat_ptr) { | ||
252 | /* single % at the end, drop that */ | ||
253 | case 0: | ||
254 | goto out; | ||
255 | /* Double percent, output one percent */ | ||
256 | case '%': | ||
257 | err = cn_printf(cn, "%c", '%'); | ||
258 | break; | ||
259 | /* pid */ | ||
260 | case 'p': | ||
261 | pid_in_pattern = 1; | ||
262 | err = cn_printf(cn, "%d", | ||
263 | task_tgid_vnr(current)); | ||
264 | break; | ||
265 | /* global pid */ | ||
266 | case 'P': | ||
267 | err = cn_printf(cn, "%d", | ||
268 | task_tgid_nr(current)); | ||
269 | break; | ||
270 | case 'i': | ||
271 | err = cn_printf(cn, "%d", | ||
272 | task_pid_vnr(current)); | ||
273 | break; | ||
274 | case 'I': | ||
275 | err = cn_printf(cn, "%d", | ||
276 | task_pid_nr(current)); | ||
277 | break; | ||
278 | /* uid */ | ||
279 | case 'u': | ||
280 | err = cn_printf(cn, "%u", | ||
281 | from_kuid(&init_user_ns, | ||
282 | cred->uid)); | ||
283 | break; | ||
284 | /* gid */ | ||
285 | case 'g': | ||
286 | err = cn_printf(cn, "%u", | ||
287 | from_kgid(&init_user_ns, | ||
288 | cred->gid)); | ||
289 | break; | ||
290 | case 'd': | ||
291 | err = cn_printf(cn, "%d", | ||
292 | __get_dumpable(cprm->mm_flags)); | ||
293 | break; | ||
294 | /* signal that caused the coredump */ | ||
295 | case 's': | ||
296 | err = cn_printf(cn, "%d", | ||
297 | cprm->siginfo->si_signo); | ||
298 | break; | ||
299 | /* UNIX time of coredump */ | ||
300 | case 't': { | ||
301 | time64_t time; | ||
302 | |||
303 | time = ktime_get_real_seconds(); | ||
304 | err = cn_printf(cn, "%lld", time); | ||
305 | break; | ||
306 | } | ||
307 | /* hostname */ | ||
308 | case 'h': | ||
309 | down_read(&uts_sem); | ||
310 | err = cn_esc_printf(cn, "%s", | ||
311 | utsname()->nodename); | ||
312 | up_read(&uts_sem); | ||
313 | break; | ||
314 | /* executable, could be changed by prctl PR_SET_NAME etc */ | ||
315 | case 'e': | ||
316 | err = cn_esc_printf(cn, "%s", current->comm); | ||
317 | break; | ||
318 | /* file name of executable */ | ||
319 | case 'f': | ||
320 | err = cn_print_exe_file(cn, true); | ||
321 | break; | ||
322 | case 'E': | ||
323 | err = cn_print_exe_file(cn, false); | ||
324 | break; | ||
325 | /* core limit size */ | ||
326 | case 'c': | ||
327 | err = cn_printf(cn, "%lu", | ||
328 | rlimit(RLIMIT_CORE)); | ||
329 | break; | ||
330 | default: | ||
331 | break; | ||
332 | } | ||
333 | ++pat_ptr; | ||
334 | } | ||
335 | |||
336 | if (err) | ||
337 | return err; | ||
338 | } | ||
339 | |||
340 | out: | ||
341 | /* Backward compatibility with core_uses_pid: | ||
342 | * | ||
343 | * If core_pattern does not include a %p (as is the default) | ||
344 | * and core_uses_pid is set, then .%pid will be appended to | ||
345 | * the filename. Do not do this for piped commands. */ | ||
346 | if (!ispipe && !pid_in_pattern && core_uses_pid) { | ||
347 | err = cn_printf(cn, ".%d", task_tgid_vnr(current)); | ||
348 | if (err) | ||
349 | return err; | ||
350 | } | ||
351 | return ispipe; | ||
352 | } | ||
353 | |||
354 | static int zap_process(struct task_struct *start, int exit_code, int flags) | ||
355 | { | ||
356 | struct task_struct *t; | ||
357 | int nr = 0; | ||
358 | |||
359 | /* ignore all signals except SIGKILL, see prepare_signal() */ | ||
360 | start->signal->flags = SIGNAL_GROUP_COREDUMP | flags; | ||
361 | start->signal->group_exit_code = exit_code; | ||
362 | start->signal->group_stop_count = 0; | ||
363 | |||
364 | for_each_thread(start, t) { | ||
365 | task_clear_jobctl_pending(t, JOBCTL_PENDING_MASK); | ||
366 | if (t != current && t->mm) { | ||
367 | sigaddset(&t->pending.signal, SIGKILL); | ||
368 | signal_wake_up(t, 1); | ||
369 | nr++; | ||
370 | } | ||
371 | } | ||
372 | |||
373 | return nr; | ||
374 | } | ||
375 | |||
376 | static int zap_threads(struct task_struct *tsk, struct mm_struct *mm, | ||
377 | struct core_state *core_state, int exit_code) | ||
378 | { | ||
379 | struct task_struct *g, *p; | ||
380 | unsigned long flags; | ||
381 | int nr = -EAGAIN; | ||
382 | |||
383 | spin_lock_irq(&tsk->sighand->siglock); | ||
384 | if (!signal_group_exit(tsk->signal)) { | ||
385 | mm->core_state = core_state; | ||
386 | tsk->signal->group_exit_task = tsk; | ||
387 | nr = zap_process(tsk, exit_code, 0); | ||
388 | clear_tsk_thread_flag(tsk, TIF_SIGPENDING); | ||
389 | } | ||
390 | spin_unlock_irq(&tsk->sighand->siglock); | ||
391 | if (unlikely(nr < 0)) | ||
392 | return nr; | ||
393 | |||
394 | tsk->flags |= PF_DUMPCORE; | ||
395 | if (atomic_read(&mm->mm_users) == nr + 1) | ||
396 | goto done; | ||
397 | /* | ||
398 | * We should find and kill all tasks which use this mm, and we should | ||
399 | * count them correctly into ->nr_threads. We don't take tasklist | ||
400 | * lock, but this is safe wrt: | ||
401 | * | ||
402 | * fork: | ||
403 | * None of sub-threads can fork after zap_process(leader). All | ||
404 | * processes which were created before this point should be | ||
405 | * visible to zap_threads() because copy_process() adds the new | ||
406 | * process to the tail of init_task.tasks list, and lock/unlock | ||
407 | * of ->siglock provides a memory barrier. | ||
408 | * | ||
409 | * do_exit: | ||
410 | * The caller holds mm->mmap_lock. This means that the task which | ||
411 | * uses this mm can't pass exit_mm(), so it can't exit or clear | ||
412 | * its ->mm. | ||
413 | * | ||
414 | * de_thread: | ||
415 | * It does list_replace_rcu(&leader->tasks, ¤t->tasks), | ||
416 | * we must see either old or new leader, this does not matter. | ||
417 | * However, it can change p->sighand, so lock_task_sighand(p) | ||
418 | * must be used. Since p->mm != NULL and we hold ->mmap_lock | ||
419 | * it can't fail. | ||
420 | * | ||
421 | * Note also that "g" can be the old leader with ->mm == NULL | ||
422 | * and already unhashed and thus removed from ->thread_group. | ||
423 | * This is OK, __unhash_process()->list_del_rcu() does not | ||
424 | * clear the ->next pointer, we will find the new leader via | ||
425 | * next_thread(). | ||
426 | */ | ||
427 | rcu_read_lock(); | ||
428 | for_each_process(g) { | ||
429 | if (g == tsk->group_leader) | ||
430 | continue; | ||
431 | if (g->flags & PF_KTHREAD) | ||
432 | continue; | ||
433 | |||
434 | for_each_thread(g, p) { | ||
435 | if (unlikely(!p->mm)) | ||
436 | continue; | ||
437 | if (unlikely(p->mm == mm)) { | ||
438 | lock_task_sighand(p, &flags); | ||
439 | nr += zap_process(p, exit_code, | ||
440 | SIGNAL_GROUP_EXIT); | ||
441 | unlock_task_sighand(p, &flags); | ||
442 | } | ||
443 | break; | ||
444 | } | ||
445 | } | ||
446 | rcu_read_unlock(); | ||
447 | done: | ||
448 | atomic_set(&core_state->nr_threads, nr); | ||
449 | return nr; | ||
450 | } | ||
451 | |||
452 | static int coredump_wait(int exit_code, struct core_state *core_state) | ||
453 | { | ||
454 | struct task_struct *tsk = current; | ||
455 | struct mm_struct *mm = tsk->mm; | ||
456 | int core_waiters = -EBUSY; | ||
457 | |||
458 | init_completion(&core_state->startup); | ||
459 | core_state->dumper.task = tsk; | ||
460 | core_state->dumper.next = NULL; | ||
461 | |||
462 | if (mmap_write_lock_killable(mm)) | ||
463 | return -EINTR; | ||
464 | |||
465 | if (!mm->core_state) | ||
466 | core_waiters = zap_threads(tsk, mm, core_state, exit_code); | ||
467 | mmap_write_unlock(mm); | ||
468 | |||
469 | if (core_waiters > 0) { | ||
470 | struct core_thread *ptr; | ||
471 | |||
472 | freezer_do_not_count(); | ||
473 | wait_for_completion(&core_state->startup); | ||
474 | freezer_count(); | ||
475 | /* | ||
476 | * Wait for all the threads to become inactive, so that | ||
477 | * all the thread context (extended register state, like | ||
478 | * fpu etc) gets copied to the memory. | ||
479 | */ | ||
480 | ptr = core_state->dumper.next; | ||
481 | while (ptr != NULL) { | ||
482 | wait_task_inactive(ptr->task, 0); | ||
483 | ptr = ptr->next; | ||
484 | } | ||
485 | } | ||
486 | |||
487 | return core_waiters; | ||
488 | } | ||
489 | |||
490 | static void coredump_finish(struct mm_struct *mm, bool core_dumped) | ||
491 | { | ||
492 | struct core_thread *curr, *next; | ||
493 | struct task_struct *task; | ||
494 | |||
495 | spin_lock_irq(¤t->sighand->siglock); | ||
496 | if (core_dumped && !__fatal_signal_pending(current)) | ||
497 | current->signal->group_exit_code |= 0x80; | ||
498 | current->signal->group_exit_task = NULL; | ||
499 | current->signal->flags = SIGNAL_GROUP_EXIT; | ||
500 | spin_unlock_irq(¤t->sighand->siglock); | ||
501 | |||
502 | next = mm->core_state->dumper.next; | ||
503 | while ((curr = next) != NULL) { | ||
504 | next = curr->next; | ||
505 | task = curr->task; | ||
506 | /* | ||
507 | * see exit_mm(), curr->task must not see | ||
508 | * ->task == NULL before we read ->next. | ||
509 | */ | ||
510 | smp_mb(); | ||
511 | curr->task = NULL; | ||
512 | wake_up_process(task); | ||
513 | } | ||
514 | |||
515 | mm->core_state = NULL; | ||
516 | } | ||
517 | |||
518 | static bool dump_interrupted(void) | ||
519 | { | ||
520 | /* | ||
521 | * SIGKILL or freezing() interrupt the coredumping. Perhaps we | ||
522 | * can do try_to_freeze() and check __fatal_signal_pending(), | ||
523 | * but then we need to teach dump_write() to restart and clear | ||
524 | * TIF_SIGPENDING. | ||
525 | */ | ||
526 | return fatal_signal_pending(current) || freezing(current); | ||
527 | } | ||
528 | |||
529 | static void wait_for_dump_helpers(struct file *file) | ||
530 | { | ||
531 | struct pipe_inode_info *pipe = file->private_data; | ||
532 | |||
533 | pipe_lock(pipe); | ||
534 | pipe->readers++; | ||
535 | pipe->writers--; | ||
536 | wake_up_interruptible_sync(&pipe->rd_wait); | ||
537 | kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); | ||
538 | pipe_unlock(pipe); | ||
539 | |||
540 | /* | ||
541 | * We actually want wait_event_freezable() but then we need | ||
542 | * to clear TIF_SIGPENDING and improve dump_interrupted(). | ||
543 | */ | ||
544 | wait_event_interruptible(pipe->rd_wait, pipe->readers == 1); | ||
545 | |||
546 | pipe_lock(pipe); | ||
547 | pipe->readers--; | ||
548 | pipe->writers++; | ||
549 | pipe_unlock(pipe); | ||
550 | } | ||
551 | |||
552 | /* | ||
553 | * umh_pipe_setup | ||
554 | * helper function to customize the process used | ||
555 | * to collect the core in userspace. Specifically | ||
556 | * it sets up a pipe and installs it as fd 0 (stdin) | ||
557 | * for the process. Returns 0 on success, or | ||
558 | * PTR_ERR on failure. | ||
559 | * Note that it also sets the core limit to 1. This | ||
560 | * is a special value that we use to trap recursive | ||
561 | * core dumps | ||
562 | */ | ||
563 | static int umh_pipe_setup(struct subprocess_info *info, struct cred *new) | ||
564 | { | ||
565 | struct file *files[2]; | ||
566 | struct coredump_params *cp = (struct coredump_params *)info->data; | ||
567 | int err = create_pipe_files(files, 0); | ||
568 | if (err) | ||
569 | return err; | ||
570 | |||
571 | cp->file = files[1]; | ||
572 | |||
573 | err = replace_fd(0, files[0], 0); | ||
574 | fput(files[0]); | ||
575 | /* and disallow core files too */ | ||
576 | current->signal->rlim[RLIMIT_CORE] = (struct rlimit){1, 1}; | ||
577 | |||
578 | return err; | ||
579 | } | ||
580 | |||
581 | void do_coredump(const kernel_siginfo_t *siginfo) | ||
582 | { | ||
583 | struct core_state core_state; | ||
584 | struct core_name cn; | ||
585 | struct mm_struct *mm = current->mm; | ||
586 | struct linux_binfmt * binfmt; | ||
587 | const struct cred *old_cred; | ||
588 | struct cred *cred; | ||
589 | int retval = 0; | ||
590 | int ispipe; | ||
591 | size_t *argv = NULL; | ||
592 | int argc = 0; | ||
593 | struct files_struct *displaced; | ||
594 | /* require nonrelative corefile path and be extra careful */ | ||
595 | bool need_suid_safe = false; | ||
596 | bool core_dumped = false; | ||
597 | static atomic_t core_dump_count = ATOMIC_INIT(0); | ||
598 | struct coredump_params cprm = { | ||
599 | .siginfo = siginfo, | ||
600 | .regs = signal_pt_regs(), | ||
601 | .limit = rlimit(RLIMIT_CORE), | ||
602 | /* | ||
603 | * We must use the same mm->flags while dumping core to avoid | ||
604 | * inconsistency of bit flags, since this flag is not protected | ||
605 | * by any locks. | ||
606 | */ | ||
607 | .mm_flags = mm->flags, | ||
608 | .vma_meta = NULL, | ||
609 | }; | ||
610 | |||
611 | audit_core_dumps(siginfo->si_signo); | ||
612 | |||
613 | binfmt = mm->binfmt; | ||
614 | if (!binfmt || !binfmt->core_dump) | ||
615 | goto fail; | ||
616 | if (!__get_dumpable(cprm.mm_flags)) | ||
617 | goto fail; | ||
618 | |||
619 | cred = prepare_creds(); | ||
620 | if (!cred) | ||
621 | goto fail; | ||
622 | /* | ||
623 | * We cannot trust fsuid as being the "true" uid of the process | ||
624 | * nor do we know its entire history. We only know it was tainted | ||
625 | * so we dump it as root in mode 2, and only into a controlled | ||
626 | * environment (pipe handler or fully qualified path). | ||
627 | */ | ||
628 | if (__get_dumpable(cprm.mm_flags) == SUID_DUMP_ROOT) { | ||
629 | /* Setuid core dump mode */ | ||
630 | cred->fsuid = GLOBAL_ROOT_UID; /* Dump root private */ | ||
631 | need_suid_safe = true; | ||
632 | } | ||
633 | |||
634 | retval = coredump_wait(siginfo->si_signo, &core_state); | ||
635 | if (retval < 0) | ||
636 | goto fail_creds; | ||
637 | |||
638 | old_cred = override_creds(cred); | ||
639 | |||
640 | ispipe = format_corename(&cn, &cprm, &argv, &argc); | ||
641 | |||
642 | if (ispipe) { | ||
643 | int argi; | ||
644 | int dump_count; | ||
645 | char **helper_argv; | ||
646 | struct subprocess_info *sub_info; | ||
647 | |||
648 | if (ispipe < 0) { | ||
649 | printk(KERN_WARNING "format_corename failed\n"); | ||
650 | printk(KERN_WARNING "Aborting core\n"); | ||
651 | goto fail_unlock; | ||
652 | } | ||
653 | |||
654 | if (cprm.limit == 1) { | ||
655 | /* See umh_pipe_setup() which sets RLIMIT_CORE = 1. | ||
656 | * | ||
657 | * Normally core limits are irrelevant to pipes, since | ||
658 | * we're not writing to the file system, but we use | ||
659 | * cprm.limit of 1 here as a special value, this is a | ||
660 | * consistent way to catch recursive crashes. | ||
661 | * We can still crash if the core_pattern binary sets | ||
662 | * RLIM_CORE = !1, but it runs as root, and can do | ||
663 | * lots of stupid things. | ||
664 | * | ||
665 | * Note that we use task_tgid_vnr here to grab the pid | ||
666 | * of the process group leader. That way we get the | ||
667 | * right pid if a thread in a multi-threaded | ||
668 | * core_pattern process dies. | ||
669 | */ | ||
670 | printk(KERN_WARNING | ||
671 | "Process %d(%s) has RLIMIT_CORE set to 1\n", | ||
672 | task_tgid_vnr(current), current->comm); | ||
673 | printk(KERN_WARNING "Aborting core\n"); | ||
674 | goto fail_unlock; | ||
675 | } | ||
676 | cprm.limit = RLIM_INFINITY; | ||
677 | |||
678 | dump_count = atomic_inc_return(&core_dump_count); | ||
679 | if (core_pipe_limit && (core_pipe_limit < dump_count)) { | ||
680 | printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n", | ||
681 | task_tgid_vnr(current), current->comm); | ||
682 | printk(KERN_WARNING "Skipping core dump\n"); | ||
683 | goto fail_dropcount; | ||
684 | } | ||
685 | |||
686 | helper_argv = kmalloc_array(argc + 1, sizeof(*helper_argv), | ||
687 | GFP_KERNEL); | ||
688 | if (!helper_argv) { | ||
689 | printk(KERN_WARNING "%s failed to allocate memory\n", | ||
690 | __func__); | ||
691 | goto fail_dropcount; | ||
692 | } | ||
693 | for (argi = 0; argi < argc; argi++) | ||
694 | helper_argv[argi] = cn.corename + argv[argi]; | ||
695 | helper_argv[argi] = NULL; | ||
696 | |||
697 | retval = -ENOMEM; | ||
698 | sub_info = call_usermodehelper_setup(helper_argv[0], | ||
699 | helper_argv, NULL, GFP_KERNEL, | ||
700 | umh_pipe_setup, NULL, &cprm); | ||
701 | if (sub_info) | ||
702 | retval = call_usermodehelper_exec(sub_info, | ||
703 | UMH_WAIT_EXEC); | ||
704 | |||
705 | kfree(helper_argv); | ||
706 | if (retval) { | ||
707 | printk(KERN_INFO "Core dump to |%s pipe failed\n", | ||
708 | cn.corename); | ||
709 | goto close_fail; | ||
710 | } | ||
711 | } else { | ||
712 | struct inode *inode; | ||
713 | int open_flags = O_CREAT | O_RDWR | O_NOFOLLOW | | ||
714 | O_LARGEFILE | O_EXCL; | ||
715 | |||
716 | if (cprm.limit < binfmt->min_coredump) | ||
717 | goto fail_unlock; | ||
718 | |||
719 | if (need_suid_safe && cn.corename[0] != '/') { | ||
720 | printk(KERN_WARNING "Pid %d(%s) can only dump core "\ | ||
721 | "to fully qualified path!\n", | ||
722 | task_tgid_vnr(current), current->comm); | ||
723 | printk(KERN_WARNING "Skipping core dump\n"); | ||
724 | goto fail_unlock; | ||
725 | } | ||
726 | |||
727 | /* | ||
728 | * Unlink the file if it exists unless this is a SUID | ||
729 | * binary - in that case, we're running around with root | ||
730 | * privs and don't want to unlink another user's coredump. | ||
731 | */ | ||
732 | if (!need_suid_safe) { | ||
733 | /* | ||
734 | * If it doesn't exist, that's fine. If there's some | ||
735 | * other problem, we'll catch it at the filp_open(). | ||
736 | */ | ||
737 | do_unlinkat(AT_FDCWD, getname_kernel(cn.corename)); | ||
738 | } | ||
739 | |||
740 | /* | ||
741 | * There is a race between unlinking and creating the | ||
742 | * file, but if that causes an EEXIST here, that's | ||
743 | * fine - another process raced with us while creating | ||
744 | * the corefile, and the other process won. To userspace, | ||
745 | * what matters is that at least one of the two processes | ||
746 | * writes its coredump successfully, not which one. | ||
747 | */ | ||
748 | if (need_suid_safe) { | ||
749 | /* | ||
750 | * Using user namespaces, normal user tasks can change | ||
751 | * their current->fs->root to point to arbitrary | ||
752 | * directories. Since the intention of the "only dump | ||
753 | * with a fully qualified path" rule is to control where | ||
754 | * coredumps may be placed using root privileges, | ||
755 | * current->fs->root must not be used. Instead, use the | ||
756 | * root directory of init_task. | ||
757 | */ | ||
758 | struct path root; | ||
759 | |||
760 | task_lock(&init_task); | ||
761 | get_fs_root(init_task.fs, &root); | ||
762 | task_unlock(&init_task); | ||
763 | cprm.file = file_open_root(&root, cn.corename, | ||
764 | open_flags, 0600); | ||
765 | path_put(&root); | ||
766 | } else { | ||
767 | cprm.file = filp_open(cn.corename, open_flags, 0600); | ||
768 | } | ||
769 | if (IS_ERR(cprm.file)) | ||
770 | goto fail_unlock; | ||
771 | |||
772 | inode = file_inode(cprm.file); | ||
773 | if (inode->i_nlink > 1) | ||
774 | goto close_fail; | ||
775 | if (d_unhashed(cprm.file->f_path.dentry)) | ||
776 | goto close_fail; | ||
777 | /* | ||
778 | * AK: actually i see no reason to not allow this for named | ||
779 | * pipes etc, but keep the previous behaviour for now. | ||
780 | */ | ||
781 | if (!S_ISREG(inode->i_mode)) | ||
782 | goto close_fail; | ||
783 | /* | ||
784 | * Don't dump core if the filesystem changed owner or mode | ||
785 | * of the file during file creation. This is an issue when | ||
786 | * a process dumps core while its cwd is e.g. on a vfat | ||
787 | * filesystem. | ||
788 | */ | ||
789 | if (!uid_eq(inode->i_uid, current_fsuid())) | ||
790 | goto close_fail; | ||
791 | if ((inode->i_mode & 0677) != 0600) | ||
792 | goto close_fail; | ||
793 | if (!(cprm.file->f_mode & FMODE_CAN_WRITE)) | ||
794 | goto close_fail; | ||
795 | if (do_truncate(cprm.file->f_path.dentry, 0, 0, cprm.file)) | ||
796 | goto close_fail; | ||
797 | } | ||
798 | |||
799 | /* get us an unshared descriptor table; almost always a no-op */ | ||
800 | retval = unshare_files(&displaced); | ||
801 | if (retval) | ||
802 | goto close_fail; | ||
803 | if (displaced) | ||
804 | put_files_struct(displaced); | ||
805 | if (!dump_interrupted()) { | ||
806 | /* | ||
807 | * umh disabled with CONFIG_STATIC_USERMODEHELPER_PATH="" would | ||
808 | * have this set to NULL. | ||
809 | */ | ||
810 | if (!cprm.file) { | ||
811 | pr_info("Core dump to |%s disabled\n", cn.corename); | ||
812 | goto close_fail; | ||
813 | } | ||
814 | if (!dump_vma_snapshot(&cprm)) | ||
815 | goto close_fail; | ||
816 | |||
817 | file_start_write(cprm.file); | ||
818 | core_dumped = binfmt->core_dump(&cprm); | ||
819 | file_end_write(cprm.file); | ||
820 | free_vma_snapshot(&cprm); | ||
821 | } | ||
822 | if (ispipe && core_pipe_limit) | ||
823 | wait_for_dump_helpers(cprm.file); | ||
824 | close_fail: | ||
825 | if (cprm.file) | ||
826 | filp_close(cprm.file, NULL); | ||
827 | fail_dropcount: | ||
828 | if (ispipe) | ||
829 | atomic_dec(&core_dump_count); | ||
830 | fail_unlock: | ||
831 | kfree(argv); | ||
832 | kfree(cn.corename); | ||
833 | coredump_finish(mm, core_dumped); | ||
834 | revert_creds(old_cred); | ||
835 | fail_creds: | ||
836 | put_cred(cred); | ||
837 | fail: | ||
838 | return; | ||
839 | } | ||
840 | |||
841 | /* | ||
842 | * Core dumping helper functions. These are the only things you should | ||
843 | * do on a core-file: use only these functions to write out all the | ||
844 | * necessary info. | ||
845 | */ | ||
846 | int dump_emit(struct coredump_params *cprm, const void *addr, int nr) | ||
847 | { | ||
848 | struct file *file = cprm->file; | ||
849 | loff_t pos = file->f_pos; | ||
850 | ssize_t n; | ||
851 | if (cprm->written + nr > cprm->limit) | ||
852 | return 0; | ||
853 | |||
854 | |||
855 | if (dump_interrupted()) | ||
856 | return 0; | ||
857 | n = __kernel_write(file, addr, nr, &pos); | ||
858 | if (n != nr) | ||
859 | return 0; | ||
860 | file->f_pos = pos; | ||
861 | cprm->written += n; | ||
862 | cprm->pos += n; | ||
863 | |||
864 | return 1; | ||
865 | } | ||
866 | EXPORT_SYMBOL(dump_emit); | ||
867 | |||
868 | int dump_skip(struct coredump_params *cprm, size_t nr) | ||
869 | { | ||
870 | static char zeroes[PAGE_SIZE]; | ||
871 | struct file *file = cprm->file; | ||
872 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | ||
873 | if (dump_interrupted() || | ||
874 | file->f_op->llseek(file, nr, SEEK_CUR) < 0) | ||
875 | return 0; | ||
876 | cprm->pos += nr; | ||
877 | return 1; | ||
878 | } else { | ||
879 | while (nr > PAGE_SIZE) { | ||
880 | if (!dump_emit(cprm, zeroes, PAGE_SIZE)) | ||
881 | return 0; | ||
882 | nr -= PAGE_SIZE; | ||
883 | } | ||
884 | return dump_emit(cprm, zeroes, nr); | ||
885 | } | ||
886 | } | ||
887 | EXPORT_SYMBOL(dump_skip); | ||
888 | |||
889 | #ifdef CONFIG_ELF_CORE | ||
890 | int dump_user_range(struct coredump_params *cprm, unsigned long start, | ||
891 | unsigned long len) | ||
892 | { | ||
893 | unsigned long addr; | ||
894 | |||
895 | for (addr = start; addr < start + len; addr += PAGE_SIZE) { | ||
896 | struct page *page; | ||
897 | int stop; | ||
898 | |||
899 | /* | ||
900 | * To avoid having to allocate page tables for virtual address | ||
901 | * ranges that have never been used yet, and also to make it | ||
902 | * easy to generate sparse core files, use a helper that returns | ||
903 | * NULL when encountering an empty page table entry that would | ||
904 | * otherwise have been filled with the zero page. | ||
905 | */ | ||
906 | page = get_dump_page(addr); | ||
907 | if (page) { | ||
908 | void *kaddr = kmap(page); | ||
909 | |||
910 | stop = !dump_emit(cprm, kaddr, PAGE_SIZE); | ||
911 | kunmap(page); | ||
912 | put_page(page); | ||
913 | } else { | ||
914 | stop = !dump_skip(cprm, PAGE_SIZE); | ||
915 | } | ||
916 | if (stop) | ||
917 | return 0; | ||
918 | } | ||
919 | return 1; | ||
920 | } | ||
921 | #endif | ||
922 | |||
923 | int dump_align(struct coredump_params *cprm, int align) | ||
924 | { | ||
925 | unsigned mod = cprm->pos & (align - 1); | ||
926 | if (align & (align - 1)) | ||
927 | return 0; | ||
928 | return mod ? dump_skip(cprm, align - mod) : 1; | ||
929 | } | ||
930 | EXPORT_SYMBOL(dump_align); | ||
931 | |||
932 | /* | ||
933 | * Ensures that file size is big enough to contain the current file | ||
934 | * postion. This prevents gdb from complaining about a truncated file | ||
935 | * if the last "write" to the file was dump_skip. | ||
936 | */ | ||
937 | void dump_truncate(struct coredump_params *cprm) | ||
938 | { | ||
939 | struct file *file = cprm->file; | ||
940 | loff_t offset; | ||
941 | |||
942 | if (file->f_op->llseek && file->f_op->llseek != no_llseek) { | ||
943 | offset = file->f_op->llseek(file, 0, SEEK_CUR); | ||
944 | if (i_size_read(file->f_mapping->host) < offset) | ||
945 | do_truncate(file->f_path.dentry, offset, 0, file); | ||
946 | } | ||
947 | } | ||
948 | EXPORT_SYMBOL(dump_truncate); | ||
949 | |||
950 | /* | ||
951 | * The purpose of always_dump_vma() is to make sure that special kernel mappings | ||
952 | * that are useful for post-mortem analysis are included in every core dump. | ||
953 | * In that way we ensure that the core dump is fully interpretable later | ||
954 | * without matching up the same kernel and hardware config to see what PC values | ||
955 | * meant. These special mappings include - vDSO, vsyscall, and other | ||
956 | * architecture specific mappings | ||
957 | */ | ||
958 | static bool always_dump_vma(struct vm_area_struct *vma) | ||
959 | { | ||
960 | /* Any vsyscall mappings? */ | ||
961 | if (vma == get_gate_vma(vma->vm_mm)) | ||
962 | return true; | ||
963 | |||
964 | /* | ||
965 | * Assume that all vmas with a .name op should always be dumped. | ||
966 | * If this changes, a new vm_ops field can easily be added. | ||
967 | */ | ||
968 | if (vma->vm_ops && vma->vm_ops->name && vma->vm_ops->name(vma)) | ||
969 | return true; | ||
970 | |||
971 | /* | ||
972 | * arch_vma_name() returns non-NULL for special architecture mappings, | ||
973 | * such as vDSO sections. | ||
974 | */ | ||
975 | if (arch_vma_name(vma)) | ||
976 | return true; | ||
977 | |||
978 | return false; | ||
979 | } | ||
980 | |||
981 | #define DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER 1 | ||
982 | |||
983 | /* | ||
984 | * Decide how much of @vma's contents should be included in a core dump. | ||
985 | */ | ||
986 | static unsigned long vma_dump_size(struct vm_area_struct *vma, | ||
987 | unsigned long mm_flags) | ||
988 | { | ||
989 | #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) | ||
990 | |||
991 | /* always dump the vdso and vsyscall sections */ | ||
992 | if (always_dump_vma(vma)) | ||
993 | goto whole; | ||
994 | |||
995 | if (vma->vm_flags & VM_DONTDUMP) | ||
996 | return 0; | ||
997 | |||
998 | /* support for DAX */ | ||
999 | if (vma_is_dax(vma)) { | ||
1000 | if ((vma->vm_flags & VM_SHARED) && FILTER(DAX_SHARED)) | ||
1001 | goto whole; | ||
1002 | if (!(vma->vm_flags & VM_SHARED) && FILTER(DAX_PRIVATE)) | ||
1003 | goto whole; | ||
1004 | return 0; | ||
1005 | } | ||
1006 | |||
1007 | /* Hugetlb memory check */ | ||
1008 | if (is_vm_hugetlb_page(vma)) { | ||
1009 | if ((vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_SHARED)) | ||
1010 | goto whole; | ||
1011 | if (!(vma->vm_flags & VM_SHARED) && FILTER(HUGETLB_PRIVATE)) | ||
1012 | goto whole; | ||
1013 | return 0; | ||
1014 | } | ||
1015 | |||
1016 | /* Do not dump I/O mapped devices or special mappings */ | ||
1017 | if (vma->vm_flags & VM_IO) | ||
1018 | return 0; | ||
1019 | |||
1020 | /* By default, dump shared memory if mapped from an anonymous file. */ | ||
1021 | if (vma->vm_flags & VM_SHARED) { | ||
1022 | if (file_inode(vma->vm_file)->i_nlink == 0 ? | ||
1023 | FILTER(ANON_SHARED) : FILTER(MAPPED_SHARED)) | ||
1024 | goto whole; | ||
1025 | return 0; | ||
1026 | } | ||
1027 | |||
1028 | /* Dump segments that have been written to. */ | ||
1029 | if ((!IS_ENABLED(CONFIG_MMU) || vma->anon_vma) && FILTER(ANON_PRIVATE)) | ||
1030 | goto whole; | ||
1031 | if (vma->vm_file == NULL) | ||
1032 | return 0; | ||
1033 | |||
1034 | if (FILTER(MAPPED_PRIVATE)) | ||
1035 | goto whole; | ||
1036 | |||
1037 | /* | ||
1038 | * If this is the beginning of an executable file mapping, | ||
1039 | * dump the first page to aid in determining what was mapped here. | ||
1040 | */ | ||
1041 | if (FILTER(ELF_HEADERS) && | ||
1042 | vma->vm_pgoff == 0 && (vma->vm_flags & VM_READ)) { | ||
1043 | if ((READ_ONCE(file_inode(vma->vm_file)->i_mode) & 0111) != 0) | ||
1044 | return PAGE_SIZE; | ||
1045 | |||
1046 | /* | ||
1047 | * ELF libraries aren't always executable. | ||
1048 | * We'll want to check whether the mapping starts with the ELF | ||
1049 | * magic, but not now - we're holding the mmap lock, | ||
1050 | * so copy_from_user() doesn't work here. | ||
1051 | * Use a placeholder instead, and fix it up later in | ||
1052 | * dump_vma_snapshot(). | ||
1053 | */ | ||
1054 | return DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER; | ||
1055 | } | ||
1056 | |||
1057 | #undef FILTER | ||
1058 | |||
1059 | return 0; | ||
1060 | |||
1061 | whole: | ||
1062 | return vma->vm_end - vma->vm_start; | ||
1063 | } | ||
1064 | |||
1065 | static struct vm_area_struct *first_vma(struct task_struct *tsk, | ||
1066 | struct vm_area_struct *gate_vma) | ||
1067 | { | ||
1068 | struct vm_area_struct *ret = tsk->mm->mmap; | ||
1069 | |||
1070 | if (ret) | ||
1071 | return ret; | ||
1072 | return gate_vma; | ||
1073 | } | ||
1074 | |||
1075 | /* | ||
1076 | * Helper function for iterating across a vma list. It ensures that the caller | ||
1077 | * will visit `gate_vma' prior to terminating the search. | ||
1078 | */ | ||
1079 | static struct vm_area_struct *next_vma(struct vm_area_struct *this_vma, | ||
1080 | struct vm_area_struct *gate_vma) | ||
1081 | { | ||
1082 | struct vm_area_struct *ret; | ||
1083 | |||
1084 | ret = this_vma->vm_next; | ||
1085 | if (ret) | ||
1086 | return ret; | ||
1087 | if (this_vma == gate_vma) | ||
1088 | return NULL; | ||
1089 | return gate_vma; | ||
1090 | } | ||
1091 | |||
1092 | static void free_vma_snapshot(struct coredump_params *cprm) | ||
1093 | { | ||
1094 | if (cprm->vma_meta) { | ||
1095 | int i; | ||
1096 | for (i = 0; i < cprm->vma_count; i++) { | ||
1097 | struct file *file = cprm->vma_meta[i].file; | ||
1098 | if (file) | ||
1099 | fput(file); | ||
1100 | } | ||
1101 | kvfree(cprm->vma_meta); | ||
1102 | cprm->vma_meta = NULL; | ||
1103 | } | ||
1104 | } | ||
1105 | |||
1106 | /* | ||
1107 | * Under the mmap_lock, take a snapshot of relevant information about the task's | ||
1108 | * VMAs. | ||
1109 | */ | ||
1110 | static bool dump_vma_snapshot(struct coredump_params *cprm) | ||
1111 | { | ||
1112 | struct vm_area_struct *vma, *gate_vma; | ||
1113 | struct mm_struct *mm = current->mm; | ||
1114 | int i; | ||
1115 | |||
1116 | /* | ||
1117 | * Once the stack expansion code is fixed to not change VMA bounds | ||
1118 | * under mmap_lock in read mode, this can be changed to take the | ||
1119 | * mmap_lock in read mode. | ||
1120 | */ | ||
1121 | if (mmap_write_lock_killable(mm)) | ||
1122 | return false; | ||
1123 | |||
1124 | cprm->vma_data_size = 0; | ||
1125 | gate_vma = get_gate_vma(mm); | ||
1126 | cprm->vma_count = mm->map_count + (gate_vma ? 1 : 0); | ||
1127 | |||
1128 | cprm->vma_meta = kvmalloc_array(cprm->vma_count, sizeof(*cprm->vma_meta), GFP_KERNEL); | ||
1129 | if (!cprm->vma_meta) { | ||
1130 | mmap_write_unlock(mm); | ||
1131 | return false; | ||
1132 | } | ||
1133 | |||
1134 | for (i = 0, vma = first_vma(current, gate_vma); vma != NULL; | ||
1135 | vma = next_vma(vma, gate_vma), i++) { | ||
1136 | struct core_vma_metadata *m = cprm->vma_meta + i; | ||
1137 | |||
1138 | m->start = vma->vm_start; | ||
1139 | m->end = vma->vm_end; | ||
1140 | m->flags = vma->vm_flags; | ||
1141 | m->dump_size = vma_dump_size(vma, cprm->mm_flags); | ||
1142 | m->pgoff = vma->vm_pgoff; | ||
1143 | |||
1144 | m->file = vma->vm_file; | ||
1145 | if (m->file) | ||
1146 | get_file(m->file); | ||
1147 | } | ||
1148 | |||
1149 | mmap_write_unlock(mm); | ||
1150 | |||
1151 | for (i = 0; i < cprm->vma_count; i++) { | ||
1152 | struct core_vma_metadata *m = cprm->vma_meta + i; | ||
1153 | |||
1154 | if (m->dump_size == DUMP_SIZE_MAYBE_ELFHDR_PLACEHOLDER) { | ||
1155 | char elfmag[SELFMAG]; | ||
1156 | |||
1157 | if (copy_from_user(elfmag, (void __user *)m->start, SELFMAG) || | ||
1158 | memcmp(elfmag, ELFMAG, SELFMAG) != 0) { | ||
1159 | m->dump_size = 0; | ||
1160 | } else { | ||
1161 | m->dump_size = PAGE_SIZE; | ||
1162 | } | ||
1163 | } | ||
1164 | |||
1165 | cprm->vma_data_size += m->dump_size; | ||
1166 | } | ||
1167 | |||
1168 | return true; | ||
1169 | } | ||