diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /arch/mips/kernel/sync-r4k.c | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'arch/mips/kernel/sync-r4k.c')
-rw-r--r-- | arch/mips/kernel/sync-r4k.c | 122 |
1 files changed, 122 insertions, 0 deletions
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c new file mode 100644 index 000000000..abdd7aaa3 --- /dev/null +++ b/arch/mips/kernel/sync-r4k.c | |||
@@ -0,0 +1,122 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * Count register synchronisation. | ||
4 | * | ||
5 | * All CPUs will have their count registers synchronised to the CPU0 next time | ||
6 | * value. This can cause a small timewarp for CPU0. All other CPU's should | ||
7 | * not have done anything significant (but they may have had interrupts | ||
8 | * enabled briefly - prom_smp_finish() should not be responsible for enabling | ||
9 | * interrupts...) | ||
10 | */ | ||
11 | |||
12 | #include <linux/kernel.h> | ||
13 | #include <linux/irqflags.h> | ||
14 | #include <linux/cpumask.h> | ||
15 | |||
16 | #include <asm/r4k-timer.h> | ||
17 | #include <linux/atomic.h> | ||
18 | #include <asm/barrier.h> | ||
19 | #include <asm/mipsregs.h> | ||
20 | |||
21 | static unsigned int initcount = 0; | ||
22 | static atomic_t count_count_start = ATOMIC_INIT(0); | ||
23 | static atomic_t count_count_stop = ATOMIC_INIT(0); | ||
24 | |||
25 | #define COUNTON 100 | ||
26 | #define NR_LOOPS 3 | ||
27 | |||
28 | void synchronise_count_master(int cpu) | ||
29 | { | ||
30 | int i; | ||
31 | unsigned long flags; | ||
32 | |||
33 | pr_info("Synchronize counters for CPU %u: ", cpu); | ||
34 | |||
35 | local_irq_save(flags); | ||
36 | |||
37 | /* | ||
38 | * We loop a few times to get a primed instruction cache, | ||
39 | * then the last pass is more or less synchronised and | ||
40 | * the master and slaves each set their cycle counters to a known | ||
41 | * value all at once. This reduces the chance of having random offsets | ||
42 | * between the processors, and guarantees that the maximum | ||
43 | * delay between the cycle counters is never bigger than | ||
44 | * the latency of information-passing (cachelines) between | ||
45 | * two CPUs. | ||
46 | */ | ||
47 | |||
48 | for (i = 0; i < NR_LOOPS; i++) { | ||
49 | /* slaves loop on '!= 2' */ | ||
50 | while (atomic_read(&count_count_start) != 1) | ||
51 | mb(); | ||
52 | atomic_set(&count_count_stop, 0); | ||
53 | smp_wmb(); | ||
54 | |||
55 | /* Let the slave writes its count register */ | ||
56 | atomic_inc(&count_count_start); | ||
57 | |||
58 | /* Count will be initialised to current timer */ | ||
59 | if (i == 1) | ||
60 | initcount = read_c0_count(); | ||
61 | |||
62 | /* | ||
63 | * Everyone initialises count in the last loop: | ||
64 | */ | ||
65 | if (i == NR_LOOPS-1) | ||
66 | write_c0_count(initcount); | ||
67 | |||
68 | /* | ||
69 | * Wait for slave to leave the synchronization point: | ||
70 | */ | ||
71 | while (atomic_read(&count_count_stop) != 1) | ||
72 | mb(); | ||
73 | atomic_set(&count_count_start, 0); | ||
74 | smp_wmb(); | ||
75 | atomic_inc(&count_count_stop); | ||
76 | } | ||
77 | /* Arrange for an interrupt in a short while */ | ||
78 | write_c0_compare(read_c0_count() + COUNTON); | ||
79 | |||
80 | local_irq_restore(flags); | ||
81 | |||
82 | /* | ||
83 | * i386 code reported the skew here, but the | ||
84 | * count registers were almost certainly out of sync | ||
85 | * so no point in alarming people | ||
86 | */ | ||
87 | pr_cont("done.\n"); | ||
88 | } | ||
89 | |||
90 | void synchronise_count_slave(int cpu) | ||
91 | { | ||
92 | int i; | ||
93 | unsigned long flags; | ||
94 | |||
95 | local_irq_save(flags); | ||
96 | |||
97 | /* | ||
98 | * Not every cpu is online at the time this gets called, | ||
99 | * so we first wait for the master to say everyone is ready | ||
100 | */ | ||
101 | |||
102 | for (i = 0; i < NR_LOOPS; i++) { | ||
103 | atomic_inc(&count_count_start); | ||
104 | while (atomic_read(&count_count_start) != 2) | ||
105 | mb(); | ||
106 | |||
107 | /* | ||
108 | * Everyone initialises count in the last loop: | ||
109 | */ | ||
110 | if (i == NR_LOOPS-1) | ||
111 | write_c0_count(initcount); | ||
112 | |||
113 | atomic_inc(&count_count_stop); | ||
114 | while (atomic_read(&count_count_stop) != 2) | ||
115 | mb(); | ||
116 | } | ||
117 | /* Arrange for an interrupt in a short while */ | ||
118 | write_c0_compare(read_c0_count() + COUNTON); | ||
119 | |||
120 | local_irq_restore(flags); | ||
121 | } | ||
122 | #undef NR_LOOPS | ||