aboutsummaryrefslogtreecommitdiffstats
path: root/arch/mips/oprofile
diff options
context:
space:
mode:
authorWe-unite <3205135446@qq.com>2025-03-08 22:04:20 +0800
committerWe-unite <3205135446@qq.com>2025-03-08 22:04:20 +0800
commita07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch)
tree84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /arch/mips/oprofile
downloadohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz
ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'arch/mips/oprofile')
-rw-r--r--arch/mips/oprofile/Makefile18
-rw-r--r--arch/mips/oprofile/backtrace.c177
-rw-r--r--arch/mips/oprofile/common.c147
-rw-r--r--arch/mips/oprofile/op_impl.h41
-rw-r--r--arch/mips/oprofile/op_model_loongson2.c161
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c213
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c479
7 files changed, 1236 insertions, 0 deletions
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
new file mode 100644
index 000000000..e10f216d0
--- /dev/null
+++ b/arch/mips/oprofile/Makefile
@@ -0,0 +1,18 @@
1# SPDX-License-Identifier: GPL-2.0
2obj-$(CONFIG_OPROFILE) += oprofile.o
3
4DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
5 oprof.o cpu_buffer.o buffer_sync.o \
6 event_buffer.o oprofile_files.o \
7 oprofilefs.o oprofile_stats.o \
8 timer_int.o )
9
10oprofile-y := $(DRIVER_OBJS) common.o backtrace.o
11
12oprofile-$(CONFIG_CPU_MIPS32) += op_model_mipsxx.o
13oprofile-$(CONFIG_CPU_MIPS64) += op_model_mipsxx.o
14oprofile-$(CONFIG_CPU_R10000) += op_model_mipsxx.o
15oprofile-$(CONFIG_CPU_SB1) += op_model_mipsxx.o
16oprofile-$(CONFIG_CPU_XLR) += op_model_mipsxx.o
17oprofile-$(CONFIG_CPU_LOONGSON2EF) += op_model_loongson2.o
18oprofile-$(CONFIG_CPU_LOONGSON64) += op_model_loongson3.o
diff --git a/arch/mips/oprofile/backtrace.c b/arch/mips/oprofile/backtrace.c
new file mode 100644
index 000000000..07d98ba7f
--- /dev/null
+++ b/arch/mips/oprofile/backtrace.c
@@ -0,0 +1,177 @@
1// SPDX-License-Identifier: GPL-2.0
2#include <linux/oprofile.h>
3#include <linux/sched.h>
4#include <linux/mm.h>
5#include <linux/uaccess.h>
6#include <asm/ptrace.h>
7#include <asm/stacktrace.h>
8#include <linux/stacktrace.h>
9#include <linux/kernel.h>
10#include <asm/sections.h>
11#include <asm/inst.h>
12
13struct stackframe {
14 unsigned long sp;
15 unsigned long pc;
16 unsigned long ra;
17};
18
19static inline int get_mem(unsigned long addr, unsigned long *result)
20{
21 unsigned long *address = (unsigned long *) addr;
22 if (!access_ok(address, sizeof(unsigned long)))
23 return -1;
24 if (__copy_from_user_inatomic(result, address, sizeof(unsigned long)))
25 return -3;
26 return 0;
27}
28
29/*
30 * These two instruction helpers were taken from process.c
31 */
32static inline int is_ra_save_ins(union mips_instruction *ip)
33{
34 /* sw / sd $ra, offset($sp) */
35 return (ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op)
36 && ip->i_format.rs == 29 && ip->i_format.rt == 31;
37}
38
39static inline int is_sp_move_ins(union mips_instruction *ip)
40{
41 /* addiu/daddiu sp,sp,-imm */
42 if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
43 return 0;
44 if (ip->i_format.opcode == addiu_op || ip->i_format.opcode == daddiu_op)
45 return 1;
46 return 0;
47}
48
49/*
50 * Looks for specific instructions that mark the end of a function.
51 * This usually means we ran into the code area of the previous function.
52 */
53static inline int is_end_of_function_marker(union mips_instruction *ip)
54{
55 /* jr ra */
56 if (ip->r_format.func == jr_op && ip->r_format.rs == 31)
57 return 1;
58 /* lui gp */
59 if (ip->i_format.opcode == lui_op && ip->i_format.rt == 28)
60 return 1;
61 return 0;
62}
63
64/*
65 * TODO for userspace stack unwinding:
66 * - handle cases where the stack is adjusted inside a function
67 * (generally doesn't happen)
68 * - find optimal value for max_instr_check
69 * - try to find a better way to handle leaf functions
70 */
71
72static inline int unwind_user_frame(struct stackframe *old_frame,
73 const unsigned int max_instr_check)
74{
75 struct stackframe new_frame = *old_frame;
76 off_t ra_offset = 0;
77 size_t stack_size = 0;
78 unsigned long addr;
79
80 if (old_frame->pc == 0 || old_frame->sp == 0 || old_frame->ra == 0)
81 return -9;
82
83 for (addr = new_frame.pc; (addr + max_instr_check > new_frame.pc)
84 && (!ra_offset || !stack_size); --addr) {
85 union mips_instruction ip;
86
87 if (get_mem(addr, (unsigned long *) &ip))
88 return -11;
89
90 if (is_sp_move_ins(&ip)) {
91 int stack_adjustment = ip.i_format.simmediate;
92 if (stack_adjustment > 0)
93 /* This marks the end of the previous function,
94 which means we overran. */
95 break;
96 stack_size = (unsigned long) stack_adjustment;
97 } else if (is_ra_save_ins(&ip)) {
98 int ra_slot = ip.i_format.simmediate;
99 if (ra_slot < 0)
100 /* This shouldn't happen. */
101 break;
102 ra_offset = ra_slot;
103 } else if (is_end_of_function_marker(&ip))
104 break;
105 }
106
107 if (!ra_offset || !stack_size)
108 goto done;
109
110 if (ra_offset) {
111 new_frame.ra = old_frame->sp + ra_offset;
112 if (get_mem(new_frame.ra, &(new_frame.ra)))
113 return -13;
114 }
115
116 if (stack_size) {
117 new_frame.sp = old_frame->sp + stack_size;
118 if (get_mem(new_frame.sp, &(new_frame.sp)))
119 return -14;
120 }
121
122 if (new_frame.sp > old_frame->sp)
123 return -2;
124
125done:
126 new_frame.pc = old_frame->ra;
127 *old_frame = new_frame;
128
129 return 0;
130}
131
132static inline void do_user_backtrace(unsigned long low_addr,
133 struct stackframe *frame,
134 unsigned int depth)
135{
136 const unsigned int max_instr_check = 512;
137 const unsigned long high_addr = low_addr + THREAD_SIZE;
138
139 while (depth-- && !unwind_user_frame(frame, max_instr_check)) {
140 oprofile_add_trace(frame->ra);
141 if (frame->sp < low_addr || frame->sp > high_addr)
142 break;
143 }
144}
145
146#ifndef CONFIG_KALLSYMS
147static inline void do_kernel_backtrace(unsigned long low_addr,
148 struct stackframe *frame,
149 unsigned int depth) { }
150#else
151static inline void do_kernel_backtrace(unsigned long low_addr,
152 struct stackframe *frame,
153 unsigned int depth)
154{
155 while (depth-- && frame->pc) {
156 frame->pc = unwind_stack_by_address(low_addr,
157 &(frame->sp),
158 frame->pc,
159 &(frame->ra));
160 oprofile_add_trace(frame->ra);
161 }
162}
163#endif
164
165void notrace op_mips_backtrace(struct pt_regs *const regs, unsigned int depth)
166{
167 struct stackframe frame = { .sp = regs->regs[29],
168 .pc = regs->cp0_epc,
169 .ra = regs->regs[31] };
170 const int userspace = user_mode(regs);
171 const unsigned long low_addr = ALIGN(frame.sp, THREAD_SIZE);
172
173 if (userspace)
174 do_user_backtrace(low_addr, &frame, depth);
175 else
176 do_kernel_backtrace(low_addr, &frame, depth);
177}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
new file mode 100644
index 000000000..d3996c4c6
--- /dev/null
+++ b/arch/mips/oprofile/common.c
@@ -0,0 +1,147 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 2005 Ralf Baechle
7 * Copyright (C) 2005 MIPS Technologies, Inc.
8 */
9#include <linux/compiler.h>
10#include <linux/errno.h>
11#include <linux/init.h>
12#include <linux/oprofile.h>
13#include <linux/smp.h>
14#include <asm/cpu-info.h>
15#include <asm/cpu-type.h>
16
17#include "op_impl.h"
18
19extern struct op_mips_model op_model_mipsxx_ops __weak;
20extern struct op_mips_model op_model_loongson2_ops __weak;
21extern struct op_mips_model op_model_loongson3_ops __weak;
22
23static struct op_mips_model *model;
24
25static struct op_counter_config ctr[20];
26
27static int op_mips_setup(void)
28{
29 /* Pre-compute the values to stuff in the hardware registers. */
30 model->reg_setup(ctr);
31
32 /* Configure the registers on all cpus. */
33 on_each_cpu(model->cpu_setup, NULL, 1);
34
35 return 0;
36}
37
38static int op_mips_create_files(struct dentry *root)
39{
40 int i;
41
42 for (i = 0; i < model->num_counters; ++i) {
43 struct dentry *dir;
44 char buf[4];
45
46 snprintf(buf, sizeof buf, "%d", i);
47 dir = oprofilefs_mkdir(root, buf);
48
49 oprofilefs_create_ulong(dir, "enabled", &ctr[i].enabled);
50 oprofilefs_create_ulong(dir, "event", &ctr[i].event);
51 oprofilefs_create_ulong(dir, "count", &ctr[i].count);
52 oprofilefs_create_ulong(dir, "kernel", &ctr[i].kernel);
53 oprofilefs_create_ulong(dir, "user", &ctr[i].user);
54 oprofilefs_create_ulong(dir, "exl", &ctr[i].exl);
55 /* Dummy. */
56 oprofilefs_create_ulong(dir, "unit_mask", &ctr[i].unit_mask);
57 }
58
59 return 0;
60}
61
62static int op_mips_start(void)
63{
64 on_each_cpu(model->cpu_start, NULL, 1);
65
66 return 0;
67}
68
69static void op_mips_stop(void)
70{
71 /* Disable performance monitoring for all counters. */
72 on_each_cpu(model->cpu_stop, NULL, 1);
73}
74
75int __init oprofile_arch_init(struct oprofile_operations *ops)
76{
77 struct op_mips_model *lmodel = NULL;
78 int res;
79
80 switch (boot_cpu_type()) {
81 case CPU_5KC:
82 case CPU_M14KC:
83 case CPU_M14KEC:
84 case CPU_20KC:
85 case CPU_24K:
86 case CPU_25KF:
87 case CPU_34K:
88 case CPU_1004K:
89 case CPU_74K:
90 case CPU_1074K:
91 case CPU_INTERAPTIV:
92 case CPU_PROAPTIV:
93 case CPU_P5600:
94 case CPU_I6400:
95 case CPU_M5150:
96 case CPU_LOONGSON32:
97 case CPU_SB1:
98 case CPU_SB1A:
99 case CPU_R10000:
100 case CPU_R12000:
101 case CPU_R14000:
102 case CPU_R16000:
103 case CPU_XLR:
104 lmodel = &op_model_mipsxx_ops;
105 break;
106
107 case CPU_LOONGSON2EF:
108 lmodel = &op_model_loongson2_ops;
109 break;
110 case CPU_LOONGSON64:
111 lmodel = &op_model_loongson3_ops;
112 break;
113 }
114
115 /*
116 * Always set the backtrace. This allows unsupported CPU types to still
117 * use timer-based oprofile.
118 */
119 ops->backtrace = op_mips_backtrace;
120
121 if (!lmodel)
122 return -ENODEV;
123
124 res = lmodel->init();
125 if (res)
126 return res;
127
128 model = lmodel;
129
130 ops->create_files = op_mips_create_files;
131 ops->setup = op_mips_setup;
132 //ops->shutdown = op_mips_shutdown;
133 ops->start = op_mips_start;
134 ops->stop = op_mips_stop;
135 ops->cpu_type = lmodel->cpu_type;
136
137 printk(KERN_INFO "oprofile: using %s performance monitoring.\n",
138 lmodel->cpu_type);
139
140 return 0;
141}
142
143void oprofile_arch_exit(void)
144{
145 if (model)
146 model->exit();
147}
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
new file mode 100644
index 000000000..a4e758a39
--- /dev/null
+++ b/arch/mips/oprofile/op_impl.h
@@ -0,0 +1,41 @@
1/**
2 * @file arch/alpha/oprofile/op_impl.h
3 *
4 * @remark Copyright 2002 OProfile authors
5 * @remark Read the file COPYING
6 *
7 * @author Richard Henderson <rth@twiddle.net>
8 */
9
10#ifndef OP_IMPL_H
11#define OP_IMPL_H 1
12
13extern int (*perf_irq)(void);
14
15/* Per-counter configuration as set via oprofilefs. */
16struct op_counter_config {
17 unsigned long enabled;
18 unsigned long event;
19 unsigned long count;
20 /* Dummies because I am too lazy to hack the userspace tools. */
21 unsigned long kernel;
22 unsigned long user;
23 unsigned long exl;
24 unsigned long unit_mask;
25};
26
27/* Per-architecture configure and hooks. */
28struct op_mips_model {
29 void (*reg_setup) (struct op_counter_config *);
30 void (*cpu_setup) (void *dummy);
31 int (*init)(void);
32 void (*exit)(void);
33 void (*cpu_start)(void *args);
34 void (*cpu_stop)(void *args);
35 char *cpu_type;
36 unsigned char num_counters;
37};
38
39void op_mips_backtrace(struct pt_regs * const regs, unsigned int depth);
40
41#endif
diff --git a/arch/mips/oprofile/op_model_loongson2.c b/arch/mips/oprofile/op_model_loongson2.c
new file mode 100644
index 000000000..b249ec0be
--- /dev/null
+++ b/arch/mips/oprofile/op_model_loongson2.c
@@ -0,0 +1,161 @@
1/*
2 * Loongson2 performance counter driver for oprofile
3 *
4 * Copyright (C) 2009 Lemote Inc.
5 * Author: Yanhua <yanh@lemote.com>
6 * Author: Wu Zhangjin <wuzhangjin@gmail.com>
7 *
8 * This file is subject to the terms and conditions of the GNU General Public
9 * License. See the file "COPYING" in the main directory of this archive
10 * for more details.
11 */
12#include <linux/init.h>
13#include <linux/oprofile.h>
14#include <linux/interrupt.h>
15
16#include <loongson.h> /* LOONGSON2_PERFCNT_IRQ */
17#include "op_impl.h"
18
19#define LOONGSON2_CPU_TYPE "mips/loongson2"
20
21#define LOONGSON2_PERFCNT_OVERFLOW (1ULL << 31)
22
23#define LOONGSON2_PERFCTRL_EXL (1UL << 0)
24#define LOONGSON2_PERFCTRL_KERNEL (1UL << 1)
25#define LOONGSON2_PERFCTRL_SUPERVISOR (1UL << 2)
26#define LOONGSON2_PERFCTRL_USER (1UL << 3)
27#define LOONGSON2_PERFCTRL_ENABLE (1UL << 4)
28#define LOONGSON2_PERFCTRL_EVENT(idx, event) \
29 (((event) & 0x0f) << ((idx) ? 9 : 5))
30
31#define read_c0_perfctrl() __read_64bit_c0_register($24, 0)
32#define write_c0_perfctrl(val) __write_64bit_c0_register($24, 0, val)
33#define read_c0_perfcnt() __read_64bit_c0_register($25, 0)
34#define write_c0_perfcnt(val) __write_64bit_c0_register($25, 0, val)
35
36static struct loongson2_register_config {
37 unsigned int ctrl;
38 unsigned long long reset_counter1;
39 unsigned long long reset_counter2;
40 int cnt1_enabled, cnt2_enabled;
41} reg;
42
43static char *oprofid = "LoongsonPerf";
44static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id);
45
46static void reset_counters(void *arg)
47{
48 write_c0_perfctrl(0);
49 write_c0_perfcnt(0);
50}
51
52static void loongson2_reg_setup(struct op_counter_config *cfg)
53{
54 unsigned int ctrl = 0;
55
56 reg.reset_counter1 = 0;
57 reg.reset_counter2 = 0;
58
59 /*
60 * Compute the performance counter ctrl word.
61 * For now, count kernel and user mode.
62 */
63 if (cfg[0].enabled) {
64 ctrl |= LOONGSON2_PERFCTRL_EVENT(0, cfg[0].event);
65 reg.reset_counter1 = 0x80000000ULL - cfg[0].count;
66 }
67
68 if (cfg[1].enabled) {
69 ctrl |= LOONGSON2_PERFCTRL_EVENT(1, cfg[1].event);
70 reg.reset_counter2 = 0x80000000ULL - cfg[1].count;
71 }
72
73 if (cfg[0].enabled || cfg[1].enabled) {
74 ctrl |= LOONGSON2_PERFCTRL_EXL | LOONGSON2_PERFCTRL_ENABLE;
75 if (cfg[0].kernel || cfg[1].kernel)
76 ctrl |= LOONGSON2_PERFCTRL_KERNEL;
77 if (cfg[0].user || cfg[1].user)
78 ctrl |= LOONGSON2_PERFCTRL_USER;
79 }
80
81 reg.ctrl = ctrl;
82
83 reg.cnt1_enabled = cfg[0].enabled;
84 reg.cnt2_enabled = cfg[1].enabled;
85}
86
87static void loongson2_cpu_setup(void *args)
88{
89 write_c0_perfcnt((reg.reset_counter2 << 32) | reg.reset_counter1);
90}
91
92static void loongson2_cpu_start(void *args)
93{
94 /* Start all counters on current CPU */
95 if (reg.cnt1_enabled || reg.cnt2_enabled)
96 write_c0_perfctrl(reg.ctrl);
97}
98
99static void loongson2_cpu_stop(void *args)
100{
101 /* Stop all counters on current CPU */
102 write_c0_perfctrl(0);
103 memset(&reg, 0, sizeof(reg));
104}
105
106static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
107{
108 uint64_t counter, counter1, counter2;
109 struct pt_regs *regs = get_irq_regs();
110 int enabled;
111
112 /* Check whether the irq belongs to me */
113 enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
114 if (!enabled)
115 return IRQ_NONE;
116 enabled = reg.cnt1_enabled | reg.cnt2_enabled;
117 if (!enabled)
118 return IRQ_NONE;
119
120 counter = read_c0_perfcnt();
121 counter1 = counter & 0xffffffff;
122 counter2 = counter >> 32;
123
124 if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
125 if (reg.cnt1_enabled)
126 oprofile_add_sample(regs, 0);
127 counter1 = reg.reset_counter1;
128 }
129 if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
130 if (reg.cnt2_enabled)
131 oprofile_add_sample(regs, 1);
132 counter2 = reg.reset_counter2;
133 }
134
135 write_c0_perfcnt((counter2 << 32) | counter1);
136
137 return IRQ_HANDLED;
138}
139
140static int __init loongson2_init(void)
141{
142 return request_irq(LOONGSON2_PERFCNT_IRQ, loongson2_perfcount_handler,
143 IRQF_SHARED, "Perfcounter", oprofid);
144}
145
146static void loongson2_exit(void)
147{
148 reset_counters(NULL);
149 free_irq(LOONGSON2_PERFCNT_IRQ, oprofid);
150}
151
152struct op_mips_model op_model_loongson2_ops = {
153 .reg_setup = loongson2_reg_setup,
154 .cpu_setup = loongson2_cpu_setup,
155 .init = loongson2_init,
156 .exit = loongson2_exit,
157 .cpu_start = loongson2_cpu_start,
158 .cpu_stop = loongson2_cpu_stop,
159 .cpu_type = LOONGSON2_CPU_TYPE,
160 .num_counters = 2
161};
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
new file mode 100644
index 000000000..436b1fc99
--- /dev/null
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -0,0 +1,213 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 */
7#include <linux/init.h>
8#include <linux/cpu.h>
9#include <linux/smp.h>
10#include <linux/proc_fs.h>
11#include <linux/oprofile.h>
12#include <linux/spinlock.h>
13#include <linux/interrupt.h>
14#include <linux/uaccess.h>
15#include <irq.h>
16#include <loongson.h>
17#include "op_impl.h"
18
19#define LOONGSON3_PERFCNT_OVERFLOW (1ULL << 63)
20
21#define LOONGSON3_PERFCTRL_EXL (1UL << 0)
22#define LOONGSON3_PERFCTRL_KERNEL (1UL << 1)
23#define LOONGSON3_PERFCTRL_SUPERVISOR (1UL << 2)
24#define LOONGSON3_PERFCTRL_USER (1UL << 3)
25#define LOONGSON3_PERFCTRL_ENABLE (1UL << 4)
26#define LOONGSON3_PERFCTRL_W (1UL << 30)
27#define LOONGSON3_PERFCTRL_M (1UL << 31)
28#define LOONGSON3_PERFCTRL_EVENT(idx, event) \
29 (((event) & (idx ? 0x0f : 0x3f)) << 5)
30
31/* Loongson-3 PerfCount performance counter1 register */
32#define read_c0_perflo1() __read_64bit_c0_register($25, 0)
33#define write_c0_perflo1(val) __write_64bit_c0_register($25, 0, val)
34#define read_c0_perfhi1() __read_64bit_c0_register($25, 1)
35#define write_c0_perfhi1(val) __write_64bit_c0_register($25, 1, val)
36
37/* Loongson-3 PerfCount performance counter2 register */
38#define read_c0_perflo2() __read_64bit_c0_register($25, 2)
39#define write_c0_perflo2(val) __write_64bit_c0_register($25, 2, val)
40#define read_c0_perfhi2() __read_64bit_c0_register($25, 3)
41#define write_c0_perfhi2(val) __write_64bit_c0_register($25, 3, val)
42
43static int (*save_perf_irq)(void);
44
45static struct loongson3_register_config {
46 unsigned int control1;
47 unsigned int control2;
48 unsigned long long reset_counter1;
49 unsigned long long reset_counter2;
50 int ctr1_enable, ctr2_enable;
51} reg;
52
53static void reset_counters(void *arg)
54{
55 write_c0_perfhi1(0);
56 write_c0_perfhi2(0);
57 write_c0_perflo1(0xc0000000);
58 write_c0_perflo2(0x40000000);
59}
60
61/* Compute all of the registers in preparation for enabling profiling. */
62static void loongson3_reg_setup(struct op_counter_config *ctr)
63{
64 unsigned int control1 = 0;
65 unsigned int control2 = 0;
66
67 reg.reset_counter1 = 0;
68 reg.reset_counter2 = 0;
69 /* Compute the performance counter control word. */
70 /* For now count kernel and user mode */
71 if (ctr[0].enabled) {
72 control1 |= LOONGSON3_PERFCTRL_EVENT(0, ctr[0].event) |
73 LOONGSON3_PERFCTRL_ENABLE;
74 if (ctr[0].kernel)
75 control1 |= LOONGSON3_PERFCTRL_KERNEL;
76 if (ctr[0].user)
77 control1 |= LOONGSON3_PERFCTRL_USER;
78 reg.reset_counter1 = 0x8000000000000000ULL - ctr[0].count;
79 }
80
81 if (ctr[1].enabled) {
82 control2 |= LOONGSON3_PERFCTRL_EVENT(1, ctr[1].event) |
83 LOONGSON3_PERFCTRL_ENABLE;
84 if (ctr[1].kernel)
85 control2 |= LOONGSON3_PERFCTRL_KERNEL;
86 if (ctr[1].user)
87 control2 |= LOONGSON3_PERFCTRL_USER;
88 reg.reset_counter2 = 0x8000000000000000ULL - ctr[1].count;
89 }
90
91 if (ctr[0].enabled)
92 control1 |= LOONGSON3_PERFCTRL_EXL;
93 if (ctr[1].enabled)
94 control2 |= LOONGSON3_PERFCTRL_EXL;
95
96 reg.control1 = control1;
97 reg.control2 = control2;
98 reg.ctr1_enable = ctr[0].enabled;
99 reg.ctr2_enable = ctr[1].enabled;
100}
101
102/* Program all of the registers in preparation for enabling profiling. */
103static void loongson3_cpu_setup(void *args)
104{
105 uint64_t perfcount1, perfcount2;
106
107 perfcount1 = reg.reset_counter1;
108 perfcount2 = reg.reset_counter2;
109 write_c0_perfhi1(perfcount1);
110 write_c0_perfhi2(perfcount2);
111}
112
113static void loongson3_cpu_start(void *args)
114{
115 /* Start all counters on current CPU */
116 reg.control1 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
117 reg.control2 |= (LOONGSON3_PERFCTRL_W|LOONGSON3_PERFCTRL_M);
118
119 if (reg.ctr1_enable)
120 write_c0_perflo1(reg.control1);
121 if (reg.ctr2_enable)
122 write_c0_perflo2(reg.control2);
123}
124
125static void loongson3_cpu_stop(void *args)
126{
127 /* Stop all counters on current CPU */
128 write_c0_perflo1(0xc0000000);
129 write_c0_perflo2(0x40000000);
130 memset(&reg, 0, sizeof(reg));
131}
132
133static int loongson3_perfcount_handler(void)
134{
135 unsigned long flags;
136 uint64_t counter1, counter2;
137 uint32_t cause, handled = IRQ_NONE;
138 struct pt_regs *regs = get_irq_regs();
139
140 cause = read_c0_cause();
141 if (!(cause & CAUSEF_PCI))
142 return handled;
143
144 counter1 = read_c0_perfhi1();
145 counter2 = read_c0_perfhi2();
146
147 local_irq_save(flags);
148
149 if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
150 if (reg.ctr1_enable)
151 oprofile_add_sample(regs, 0);
152 counter1 = reg.reset_counter1;
153 }
154 if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
155 if (reg.ctr2_enable)
156 oprofile_add_sample(regs, 1);
157 counter2 = reg.reset_counter2;
158 }
159
160 local_irq_restore(flags);
161
162 write_c0_perfhi1(counter1);
163 write_c0_perfhi2(counter2);
164
165 if (!(cause & CAUSEF_TI))
166 handled = IRQ_HANDLED;
167
168 return handled;
169}
170
171static int loongson3_starting_cpu(unsigned int cpu)
172{
173 write_c0_perflo1(reg.control1);
174 write_c0_perflo2(reg.control2);
175 return 0;
176}
177
178static int loongson3_dying_cpu(unsigned int cpu)
179{
180 write_c0_perflo1(0xc0000000);
181 write_c0_perflo2(0x40000000);
182 return 0;
183}
184
185static int __init loongson3_init(void)
186{
187 on_each_cpu(reset_counters, NULL, 1);
188 cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
189 "mips/oprofile/loongson3:starting",
190 loongson3_starting_cpu, loongson3_dying_cpu);
191 save_perf_irq = perf_irq;
192 perf_irq = loongson3_perfcount_handler;
193
194 return 0;
195}
196
197static void loongson3_exit(void)
198{
199 on_each_cpu(reset_counters, NULL, 1);
200 cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
201 perf_irq = save_perf_irq;
202}
203
204struct op_mips_model op_model_loongson3_ops = {
205 .reg_setup = loongson3_reg_setup,
206 .cpu_setup = loongson3_cpu_setup,
207 .init = loongson3_init,
208 .exit = loongson3_exit,
209 .cpu_start = loongson3_cpu_start,
210 .cpu_stop = loongson3_cpu_stop,
211 .cpu_type = "mips/loongson3",
212 .num_counters = 2
213};
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
new file mode 100644
index 000000000..55d7b7fd1
--- /dev/null
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -0,0 +1,479 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2004, 05, 06 by Ralf Baechle
7 * Copyright (C) 2005 by MIPS Technologies, Inc.
8 */
9#include <linux/cpumask.h>
10#include <linux/oprofile.h>
11#include <linux/interrupt.h>
12#include <linux/smp.h>
13#include <asm/irq_regs.h>
14#include <asm/time.h>
15
16#include "op_impl.h"
17
18#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
19 MIPS_PERFCTRL_EVENT)
20#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
21
22#define M_COUNTER_OVERFLOW (1UL << 31)
23
24static int (*save_perf_irq)(void);
25static int perfcount_irq;
26
27/*
28 * XLR has only one set of counters per core. Designate the
29 * first hardware thread in the core for setup and init.
30 * Skip CPUs with non-zero hardware thread id (4 hwt per core)
31 */
32#if defined(CONFIG_CPU_XLR) && defined(CONFIG_SMP)
33#define oprofile_skip_cpu(c) ((cpu_logical_map(c) & 0x3) != 0)
34#else
35#define oprofile_skip_cpu(c) 0
36#endif
37
38#ifdef CONFIG_MIPS_MT_SMP
39#define WHAT (MIPS_PERFCTRL_MT_EN_VPE | \
40 M_PERFCTL_VPEID(cpu_vpe_id(&current_cpu_data)))
41#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
42 0 : cpu_vpe_id(&current_cpu_data))
43
44/*
45 * The number of bits to shift to convert between counters per core and
46 * counters per VPE. There is no reasonable interface atm to obtain the
47 * number of VPEs used by Linux and in the 34K this number is fixed to two
48 * anyways so we hardcore a few things here for the moment. The way it's
49 * done here will ensure that oprofile VSMP kernel will run right on a lesser
50 * core like a 24K also or with maxcpus=1.
51 */
52static inline unsigned int vpe_shift(void)
53{
54 if (num_possible_cpus() > 1)
55 return 1;
56
57 return 0;
58}
59
60#else
61
62#define WHAT 0
63#define vpe_id() 0
64
65static inline unsigned int vpe_shift(void)
66{
67 return 0;
68}
69
70#endif
71
72static inline unsigned int counters_total_to_per_cpu(unsigned int counters)
73{
74 return counters >> vpe_shift();
75}
76
77static inline unsigned int counters_per_cpu_to_total(unsigned int counters)
78{
79 return counters << vpe_shift();
80}
81
82#define __define_perf_accessors(r, n, np) \
83 \
84static inline unsigned int r_c0_ ## r ## n(void) \
85{ \
86 unsigned int cpu = vpe_id(); \
87 \
88 switch (cpu) { \
89 case 0: \
90 return read_c0_ ## r ## n(); \
91 case 1: \
92 return read_c0_ ## r ## np(); \
93 default: \
94 BUG(); \
95 } \
96 return 0; \
97} \
98 \
99static inline void w_c0_ ## r ## n(unsigned int value) \
100{ \
101 unsigned int cpu = vpe_id(); \
102 \
103 switch (cpu) { \
104 case 0: \
105 write_c0_ ## r ## n(value); \
106 return; \
107 case 1: \
108 write_c0_ ## r ## np(value); \
109 return; \
110 default: \
111 BUG(); \
112 } \
113 return; \
114} \
115
116__define_perf_accessors(perfcntr, 0, 2)
117__define_perf_accessors(perfcntr, 1, 3)
118__define_perf_accessors(perfcntr, 2, 0)
119__define_perf_accessors(perfcntr, 3, 1)
120
121__define_perf_accessors(perfctrl, 0, 2)
122__define_perf_accessors(perfctrl, 1, 3)
123__define_perf_accessors(perfctrl, 2, 0)
124__define_perf_accessors(perfctrl, 3, 1)
125
126struct op_mips_model op_model_mipsxx_ops;
127
128static struct mipsxx_register_config {
129 unsigned int control[4];
130 unsigned int counter[4];
131} reg;
132
133/* Compute all of the registers in preparation for enabling profiling. */
134
135static void mipsxx_reg_setup(struct op_counter_config *ctr)
136{
137 unsigned int counters = op_model_mipsxx_ops.num_counters;
138 int i;
139
140 /* Compute the performance counter control word. */
141 for (i = 0; i < counters; i++) {
142 reg.control[i] = 0;
143 reg.counter[i] = 0;
144
145 if (!ctr[i].enabled)
146 continue;
147
148 reg.control[i] = M_PERFCTL_EVENT(ctr[i].event) |
149 MIPS_PERFCTRL_IE;
150 if (ctr[i].kernel)
151 reg.control[i] |= MIPS_PERFCTRL_K;
152 if (ctr[i].user)
153 reg.control[i] |= MIPS_PERFCTRL_U;
154 if (ctr[i].exl)
155 reg.control[i] |= MIPS_PERFCTRL_EXL;
156 if (boot_cpu_type() == CPU_XLR)
157 reg.control[i] |= XLR_PERFCTRL_ALLTHREADS;
158 reg.counter[i] = 0x80000000 - ctr[i].count;
159 }
160}
161
162/* Program all of the registers in preparation for enabling profiling. */
163
164static void mipsxx_cpu_setup(void *args)
165{
166 unsigned int counters = op_model_mipsxx_ops.num_counters;
167
168 if (oprofile_skip_cpu(smp_processor_id()))
169 return;
170
171 switch (counters) {
172 case 4:
173 w_c0_perfctrl3(0);
174 w_c0_perfcntr3(reg.counter[3]);
175 fallthrough;
176 case 3:
177 w_c0_perfctrl2(0);
178 w_c0_perfcntr2(reg.counter[2]);
179 fallthrough;
180 case 2:
181 w_c0_perfctrl1(0);
182 w_c0_perfcntr1(reg.counter[1]);
183 fallthrough;
184 case 1:
185 w_c0_perfctrl0(0);
186 w_c0_perfcntr0(reg.counter[0]);
187 }
188}
189
190/* Start all counters on current CPU */
191static void mipsxx_cpu_start(void *args)
192{
193 unsigned int counters = op_model_mipsxx_ops.num_counters;
194
195 if (oprofile_skip_cpu(smp_processor_id()))
196 return;
197
198 switch (counters) {
199 case 4:
200 w_c0_perfctrl3(WHAT | reg.control[3]);
201 fallthrough;
202 case 3:
203 w_c0_perfctrl2(WHAT | reg.control[2]);
204 fallthrough;
205 case 2:
206 w_c0_perfctrl1(WHAT | reg.control[1]);
207 fallthrough;
208 case 1:
209 w_c0_perfctrl0(WHAT | reg.control[0]);
210 }
211}
212
213/* Stop all counters on current CPU */
214static void mipsxx_cpu_stop(void *args)
215{
216 unsigned int counters = op_model_mipsxx_ops.num_counters;
217
218 if (oprofile_skip_cpu(smp_processor_id()))
219 return;
220
221 switch (counters) {
222 case 4:
223 w_c0_perfctrl3(0);
224 fallthrough;
225 case 3:
226 w_c0_perfctrl2(0);
227 fallthrough;
228 case 2:
229 w_c0_perfctrl1(0);
230 fallthrough;
231 case 1:
232 w_c0_perfctrl0(0);
233 }
234}
235
236static int mipsxx_perfcount_handler(void)
237{
238 unsigned int counters = op_model_mipsxx_ops.num_counters;
239 unsigned int control;
240 unsigned int counter;
241 int handled = IRQ_NONE;
242
243 if (cpu_has_mips_r2 && !(read_c0_cause() & CAUSEF_PCI))
244 return handled;
245
246 switch (counters) {
247#define HANDLE_COUNTER(n) \
248 case n + 1: \
249 control = r_c0_perfctrl ## n(); \
250 counter = r_c0_perfcntr ## n(); \
251 if ((control & MIPS_PERFCTRL_IE) && \
252 (counter & M_COUNTER_OVERFLOW)) { \
253 oprofile_add_sample(get_irq_regs(), n); \
254 w_c0_perfcntr ## n(reg.counter[n]); \
255 handled = IRQ_HANDLED; \
256 }
257 HANDLE_COUNTER(3)
258 fallthrough;
259 HANDLE_COUNTER(2)
260 fallthrough;
261 HANDLE_COUNTER(1)
262 fallthrough;
263 HANDLE_COUNTER(0)
264 }
265
266 return handled;
267}
268
269static inline int __n_counters(void)
270{
271 if (!cpu_has_perf)
272 return 0;
273 if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
274 return 1;
275 if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
276 return 2;
277 if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
278 return 3;
279
280 return 4;
281}
282
283static inline int n_counters(void)
284{
285 int counters;
286
287 switch (current_cpu_type()) {
288 case CPU_R10000:
289 counters = 2;
290 break;
291
292 case CPU_R12000:
293 case CPU_R14000:
294 case CPU_R16000:
295 counters = 4;
296 break;
297
298 default:
299 counters = __n_counters();
300 }
301
302 return counters;
303}
304
305static void reset_counters(void *arg)
306{
307 int counters = (int)(long)arg;
308 switch (counters) {
309 case 4:
310 w_c0_perfctrl3(0);
311 w_c0_perfcntr3(0);
312 fallthrough;
313 case 3:
314 w_c0_perfctrl2(0);
315 w_c0_perfcntr2(0);
316 fallthrough;
317 case 2:
318 w_c0_perfctrl1(0);
319 w_c0_perfcntr1(0);
320 fallthrough;
321 case 1:
322 w_c0_perfctrl0(0);
323 w_c0_perfcntr0(0);
324 }
325}
326
327static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
328{
329 return mipsxx_perfcount_handler();
330}
331
332static int __init mipsxx_init(void)
333{
334 int counters;
335
336 counters = n_counters();
337 if (counters == 0) {
338 printk(KERN_ERR "Oprofile: CPU has no performance counters\n");
339 return -ENODEV;
340 }
341
342#ifdef CONFIG_MIPS_MT_SMP
343 if (!cpu_has_mipsmt_pertccounters)
344 counters = counters_total_to_per_cpu(counters);
345#endif
346 on_each_cpu(reset_counters, (void *)(long)counters, 1);
347
348 op_model_mipsxx_ops.num_counters = counters;
349 switch (current_cpu_type()) {
350 case CPU_M14KC:
351 op_model_mipsxx_ops.cpu_type = "mips/M14Kc";
352 break;
353
354 case CPU_M14KEC:
355 op_model_mipsxx_ops.cpu_type = "mips/M14KEc";
356 break;
357
358 case CPU_20KC:
359 op_model_mipsxx_ops.cpu_type = "mips/20K";
360 break;
361
362 case CPU_24K:
363 op_model_mipsxx_ops.cpu_type = "mips/24K";
364 break;
365
366 case CPU_25KF:
367 op_model_mipsxx_ops.cpu_type = "mips/25K";
368 break;
369
370 case CPU_1004K:
371 case CPU_34K:
372 op_model_mipsxx_ops.cpu_type = "mips/34K";
373 break;
374
375 case CPU_1074K:
376 case CPU_74K:
377 op_model_mipsxx_ops.cpu_type = "mips/74K";
378 break;
379
380 case CPU_INTERAPTIV:
381 op_model_mipsxx_ops.cpu_type = "mips/interAptiv";
382 break;
383
384 case CPU_PROAPTIV:
385 op_model_mipsxx_ops.cpu_type = "mips/proAptiv";
386 break;
387
388 case CPU_P5600:
389 op_model_mipsxx_ops.cpu_type = "mips/P5600";
390 break;
391
392 case CPU_I6400:
393 op_model_mipsxx_ops.cpu_type = "mips/I6400";
394 break;
395
396 case CPU_M5150:
397 op_model_mipsxx_ops.cpu_type = "mips/M5150";
398 break;
399
400 case CPU_5KC:
401 op_model_mipsxx_ops.cpu_type = "mips/5K";
402 break;
403
404 case CPU_R10000:
405 if ((current_cpu_data.processor_id & 0xff) == 0x20)
406 op_model_mipsxx_ops.cpu_type = "mips/r10000-v2.x";
407 else
408 op_model_mipsxx_ops.cpu_type = "mips/r10000";
409 break;
410
411 case CPU_R12000:
412 case CPU_R14000:
413 op_model_mipsxx_ops.cpu_type = "mips/r12000";
414 break;
415
416 case CPU_R16000:
417 op_model_mipsxx_ops.cpu_type = "mips/r16000";
418 break;
419
420 case CPU_SB1:
421 case CPU_SB1A:
422 op_model_mipsxx_ops.cpu_type = "mips/sb1";
423 break;
424
425 case CPU_LOONGSON32:
426 op_model_mipsxx_ops.cpu_type = "mips/loongson1";
427 break;
428
429 case CPU_XLR:
430 op_model_mipsxx_ops.cpu_type = "mips/xlr";
431 break;
432
433 default:
434 printk(KERN_ERR "Profiling unsupported for this CPU\n");
435
436 return -ENODEV;
437 }
438
439 save_perf_irq = perf_irq;
440 perf_irq = mipsxx_perfcount_handler;
441
442 if (get_c0_perfcount_int)
443 perfcount_irq = get_c0_perfcount_int();
444 else if (cp0_perfcount_irq >= 0)
445 perfcount_irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
446 else
447 perfcount_irq = -1;
448
449 if (perfcount_irq >= 0)
450 return request_irq(perfcount_irq, mipsxx_perfcount_int,
451 IRQF_PERCPU | IRQF_NOBALANCING |
452 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
453 IRQF_SHARED,
454 "Perfcounter", save_perf_irq);
455
456 return 0;
457}
458
459static void mipsxx_exit(void)
460{
461 int counters = op_model_mipsxx_ops.num_counters;
462
463 if (perfcount_irq >= 0)
464 free_irq(perfcount_irq, save_perf_irq);
465
466 counters = counters_per_cpu_to_total(counters);
467 on_each_cpu(reset_counters, (void *)(long)counters, 1);
468
469 perf_irq = save_perf_irq;
470}
471
472struct op_mips_model op_model_mipsxx_ops = {
473 .reg_setup = mipsxx_reg_setup,
474 .cpu_setup = mipsxx_cpu_setup,
475 .init = mipsxx_init,
476 .exit = mipsxx_exit,
477 .cpu_start = mipsxx_cpu_start,
478 .cpu_stop = mipsxx_cpu_stop,
479};