diff options
Diffstat (limited to 'arch/mips/mm/c-r4k.c')
-rw-r--r-- | arch/mips/mm/c-r4k.c | 2009 |
1 files changed, 2009 insertions, 0 deletions
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c new file mode 100644 index 000000000..96adc3d23 --- /dev/null +++ b/arch/mips/mm/c-r4k.c | |||
@@ -0,0 +1,2009 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1996 David S. Miller (davem@davemloft.net) | ||
7 | * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) | ||
8 | * Copyright (C) 1999, 2000 Silicon Graphics, Inc. | ||
9 | */ | ||
10 | #include <linux/cpu_pm.h> | ||
11 | #include <linux/hardirq.h> | ||
12 | #include <linux/init.h> | ||
13 | #include <linux/highmem.h> | ||
14 | #include <linux/kernel.h> | ||
15 | #include <linux/linkage.h> | ||
16 | #include <linux/preempt.h> | ||
17 | #include <linux/sched.h> | ||
18 | #include <linux/smp.h> | ||
19 | #include <linux/mm.h> | ||
20 | #include <linux/export.h> | ||
21 | #include <linux/bitops.h> | ||
22 | |||
23 | #include <asm/bcache.h> | ||
24 | #include <asm/bootinfo.h> | ||
25 | #include <asm/cache.h> | ||
26 | #include <asm/cacheops.h> | ||
27 | #include <asm/cpu.h> | ||
28 | #include <asm/cpu-features.h> | ||
29 | #include <asm/cpu-type.h> | ||
30 | #include <asm/io.h> | ||
31 | #include <asm/page.h> | ||
32 | #include <asm/r4kcache.h> | ||
33 | #include <asm/sections.h> | ||
34 | #include <asm/mmu_context.h> | ||
35 | #include <asm/war.h> | ||
36 | #include <asm/cacheflush.h> /* for run_uncached() */ | ||
37 | #include <asm/traps.h> | ||
38 | #include <asm/dma-coherence.h> | ||
39 | #include <asm/mips-cps.h> | ||
40 | |||
41 | /* | ||
42 | * Bits describing what cache ops an SMP callback function may perform. | ||
43 | * | ||
44 | * R4K_HIT - Virtual user or kernel address based cache operations. The | ||
45 | * active_mm must be checked before using user addresses, falling | ||
46 | * back to kmap. | ||
47 | * R4K_INDEX - Index based cache operations. | ||
48 | */ | ||
49 | |||
50 | #define R4K_HIT BIT(0) | ||
51 | #define R4K_INDEX BIT(1) | ||
52 | |||
53 | /** | ||
54 | * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core. | ||
55 | * @type: Type of cache operations (R4K_HIT or R4K_INDEX). | ||
56 | * | ||
57 | * Decides whether a cache op needs to be performed on every core in the system. | ||
58 | * This may change depending on the @type of cache operation, as well as the set | ||
59 | * of online CPUs, so preemption should be disabled by the caller to prevent CPU | ||
60 | * hotplug from changing the result. | ||
61 | * | ||
62 | * Returns: 1 if the cache operation @type should be done on every core in | ||
63 | * the system. | ||
64 | * 0 if the cache operation @type is globalized and only needs to | ||
65 | * be performed on a simple CPU. | ||
66 | */ | ||
67 | static inline bool r4k_op_needs_ipi(unsigned int type) | ||
68 | { | ||
69 | /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */ | ||
70 | if (type == R4K_HIT && mips_cm_present()) | ||
71 | return false; | ||
72 | |||
73 | /* | ||
74 | * Hardware doesn't globalize the required cache ops, so SMP calls may | ||
75 | * be needed, but only if there are foreign CPUs (non-siblings with | ||
76 | * separate caches). | ||
77 | */ | ||
78 | /* cpu_foreign_map[] undeclared when !CONFIG_SMP */ | ||
79 | #ifdef CONFIG_SMP | ||
80 | return !cpumask_empty(&cpu_foreign_map[0]); | ||
81 | #else | ||
82 | return false; | ||
83 | #endif | ||
84 | } | ||
85 | |||
86 | /* | ||
87 | * Special Variant of smp_call_function for use by cache functions: | ||
88 | * | ||
89 | * o No return value | ||
90 | * o collapses to normal function call on UP kernels | ||
91 | * o collapses to normal function call on systems with a single shared | ||
92 | * primary cache. | ||
93 | * o doesn't disable interrupts on the local CPU | ||
94 | */ | ||
95 | static inline void r4k_on_each_cpu(unsigned int type, | ||
96 | void (*func)(void *info), void *info) | ||
97 | { | ||
98 | preempt_disable(); | ||
99 | if (r4k_op_needs_ipi(type)) | ||
100 | smp_call_function_many(&cpu_foreign_map[smp_processor_id()], | ||
101 | func, info, 1); | ||
102 | func(info); | ||
103 | preempt_enable(); | ||
104 | } | ||
105 | |||
106 | /* | ||
107 | * Must die. | ||
108 | */ | ||
109 | static unsigned long icache_size __read_mostly; | ||
110 | static unsigned long dcache_size __read_mostly; | ||
111 | static unsigned long vcache_size __read_mostly; | ||
112 | static unsigned long scache_size __read_mostly; | ||
113 | |||
114 | /* | ||
115 | * Dummy cache handling routines for machines without boardcaches | ||
116 | */ | ||
117 | static void cache_noop(void) {} | ||
118 | |||
119 | static struct bcache_ops no_sc_ops = { | ||
120 | .bc_enable = (void *)cache_noop, | ||
121 | .bc_disable = (void *)cache_noop, | ||
122 | .bc_wback_inv = (void *)cache_noop, | ||
123 | .bc_inv = (void *)cache_noop | ||
124 | }; | ||
125 | |||
126 | struct bcache_ops *bcops = &no_sc_ops; | ||
127 | |||
128 | #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) | ||
129 | #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) | ||
130 | |||
131 | #define R4600_HIT_CACHEOP_WAR_IMPL \ | ||
132 | do { \ | ||
133 | if (IS_ENABLED(CONFIG_WAR_R4600_V2_HIT_CACHEOP) && \ | ||
134 | cpu_is_r4600_v2_x()) \ | ||
135 | *(volatile unsigned long *)CKSEG1; \ | ||
136 | if (IS_ENABLED(CONFIG_WAR_R4600_V1_HIT_CACHEOP)) \ | ||
137 | __asm__ __volatile__("nop;nop;nop;nop"); \ | ||
138 | } while (0) | ||
139 | |||
140 | static void (*r4k_blast_dcache_page)(unsigned long addr); | ||
141 | |||
142 | static inline void r4k_blast_dcache_page_dc32(unsigned long addr) | ||
143 | { | ||
144 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
145 | blast_dcache32_page(addr); | ||
146 | } | ||
147 | |||
148 | static inline void r4k_blast_dcache_page_dc64(unsigned long addr) | ||
149 | { | ||
150 | blast_dcache64_page(addr); | ||
151 | } | ||
152 | |||
153 | static inline void r4k_blast_dcache_page_dc128(unsigned long addr) | ||
154 | { | ||
155 | blast_dcache128_page(addr); | ||
156 | } | ||
157 | |||
158 | static void r4k_blast_dcache_page_setup(void) | ||
159 | { | ||
160 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
161 | |||
162 | switch (dc_lsize) { | ||
163 | case 0: | ||
164 | r4k_blast_dcache_page = (void *)cache_noop; | ||
165 | break; | ||
166 | case 16: | ||
167 | r4k_blast_dcache_page = blast_dcache16_page; | ||
168 | break; | ||
169 | case 32: | ||
170 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; | ||
171 | break; | ||
172 | case 64: | ||
173 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; | ||
174 | break; | ||
175 | case 128: | ||
176 | r4k_blast_dcache_page = r4k_blast_dcache_page_dc128; | ||
177 | break; | ||
178 | default: | ||
179 | break; | ||
180 | } | ||
181 | } | ||
182 | |||
183 | #ifndef CONFIG_EVA | ||
184 | #define r4k_blast_dcache_user_page r4k_blast_dcache_page | ||
185 | #else | ||
186 | |||
187 | static void (*r4k_blast_dcache_user_page)(unsigned long addr); | ||
188 | |||
189 | static void r4k_blast_dcache_user_page_setup(void) | ||
190 | { | ||
191 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
192 | |||
193 | if (dc_lsize == 0) | ||
194 | r4k_blast_dcache_user_page = (void *)cache_noop; | ||
195 | else if (dc_lsize == 16) | ||
196 | r4k_blast_dcache_user_page = blast_dcache16_user_page; | ||
197 | else if (dc_lsize == 32) | ||
198 | r4k_blast_dcache_user_page = blast_dcache32_user_page; | ||
199 | else if (dc_lsize == 64) | ||
200 | r4k_blast_dcache_user_page = blast_dcache64_user_page; | ||
201 | } | ||
202 | |||
203 | #endif | ||
204 | |||
205 | static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); | ||
206 | |||
207 | static void r4k_blast_dcache_page_indexed_setup(void) | ||
208 | { | ||
209 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
210 | |||
211 | if (dc_lsize == 0) | ||
212 | r4k_blast_dcache_page_indexed = (void *)cache_noop; | ||
213 | else if (dc_lsize == 16) | ||
214 | r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; | ||
215 | else if (dc_lsize == 32) | ||
216 | r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; | ||
217 | else if (dc_lsize == 64) | ||
218 | r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; | ||
219 | else if (dc_lsize == 128) | ||
220 | r4k_blast_dcache_page_indexed = blast_dcache128_page_indexed; | ||
221 | } | ||
222 | |||
223 | void (* r4k_blast_dcache)(void); | ||
224 | EXPORT_SYMBOL(r4k_blast_dcache); | ||
225 | |||
226 | static void r4k_blast_dcache_setup(void) | ||
227 | { | ||
228 | unsigned long dc_lsize = cpu_dcache_line_size(); | ||
229 | |||
230 | if (dc_lsize == 0) | ||
231 | r4k_blast_dcache = (void *)cache_noop; | ||
232 | else if (dc_lsize == 16) | ||
233 | r4k_blast_dcache = blast_dcache16; | ||
234 | else if (dc_lsize == 32) | ||
235 | r4k_blast_dcache = blast_dcache32; | ||
236 | else if (dc_lsize == 64) | ||
237 | r4k_blast_dcache = blast_dcache64; | ||
238 | else if (dc_lsize == 128) | ||
239 | r4k_blast_dcache = blast_dcache128; | ||
240 | } | ||
241 | |||
242 | /* force code alignment (used for CONFIG_WAR_TX49XX_ICACHE_INDEX_INV) */ | ||
243 | #define JUMP_TO_ALIGN(order) \ | ||
244 | __asm__ __volatile__( \ | ||
245 | "b\t1f\n\t" \ | ||
246 | ".align\t" #order "\n\t" \ | ||
247 | "1:\n\t" \ | ||
248 | ) | ||
249 | #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ | ||
250 | #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) | ||
251 | |||
252 | static inline void blast_r4600_v1_icache32(void) | ||
253 | { | ||
254 | unsigned long flags; | ||
255 | |||
256 | local_irq_save(flags); | ||
257 | blast_icache32(); | ||
258 | local_irq_restore(flags); | ||
259 | } | ||
260 | |||
261 | static inline void tx49_blast_icache32(void) | ||
262 | { | ||
263 | unsigned long start = INDEX_BASE; | ||
264 | unsigned long end = start + current_cpu_data.icache.waysize; | ||
265 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
266 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
267 | current_cpu_data.icache.waybit; | ||
268 | unsigned long ws, addr; | ||
269 | |||
270 | CACHE32_UNROLL32_ALIGN2; | ||
271 | /* I'm in even chunk. blast odd chunks */ | ||
272 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
273 | for (addr = start + 0x400; addr < end; addr += 0x400 * 2) | ||
274 | cache_unroll(32, kernel_cache, Index_Invalidate_I, | ||
275 | addr | ws, 32); | ||
276 | CACHE32_UNROLL32_ALIGN; | ||
277 | /* I'm in odd chunk. blast even chunks */ | ||
278 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
279 | for (addr = start; addr < end; addr += 0x400 * 2) | ||
280 | cache_unroll(32, kernel_cache, Index_Invalidate_I, | ||
281 | addr | ws, 32); | ||
282 | } | ||
283 | |||
284 | static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) | ||
285 | { | ||
286 | unsigned long flags; | ||
287 | |||
288 | local_irq_save(flags); | ||
289 | blast_icache32_page_indexed(page); | ||
290 | local_irq_restore(flags); | ||
291 | } | ||
292 | |||
293 | static inline void tx49_blast_icache32_page_indexed(unsigned long page) | ||
294 | { | ||
295 | unsigned long indexmask = current_cpu_data.icache.waysize - 1; | ||
296 | unsigned long start = INDEX_BASE + (page & indexmask); | ||
297 | unsigned long end = start + PAGE_SIZE; | ||
298 | unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; | ||
299 | unsigned long ws_end = current_cpu_data.icache.ways << | ||
300 | current_cpu_data.icache.waybit; | ||
301 | unsigned long ws, addr; | ||
302 | |||
303 | CACHE32_UNROLL32_ALIGN2; | ||
304 | /* I'm in even chunk. blast odd chunks */ | ||
305 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
306 | for (addr = start + 0x400; addr < end; addr += 0x400 * 2) | ||
307 | cache_unroll(32, kernel_cache, Index_Invalidate_I, | ||
308 | addr | ws, 32); | ||
309 | CACHE32_UNROLL32_ALIGN; | ||
310 | /* I'm in odd chunk. blast even chunks */ | ||
311 | for (ws = 0; ws < ws_end; ws += ws_inc) | ||
312 | for (addr = start; addr < end; addr += 0x400 * 2) | ||
313 | cache_unroll(32, kernel_cache, Index_Invalidate_I, | ||
314 | addr | ws, 32); | ||
315 | } | ||
316 | |||
317 | static void (* r4k_blast_icache_page)(unsigned long addr); | ||
318 | |||
319 | static void r4k_blast_icache_page_setup(void) | ||
320 | { | ||
321 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
322 | |||
323 | if (ic_lsize == 0) | ||
324 | r4k_blast_icache_page = (void *)cache_noop; | ||
325 | else if (ic_lsize == 16) | ||
326 | r4k_blast_icache_page = blast_icache16_page; | ||
327 | else if (ic_lsize == 32 && current_cpu_type() == CPU_LOONGSON2EF) | ||
328 | r4k_blast_icache_page = loongson2_blast_icache32_page; | ||
329 | else if (ic_lsize == 32) | ||
330 | r4k_blast_icache_page = blast_icache32_page; | ||
331 | else if (ic_lsize == 64) | ||
332 | r4k_blast_icache_page = blast_icache64_page; | ||
333 | else if (ic_lsize == 128) | ||
334 | r4k_blast_icache_page = blast_icache128_page; | ||
335 | } | ||
336 | |||
337 | #ifndef CONFIG_EVA | ||
338 | #define r4k_blast_icache_user_page r4k_blast_icache_page | ||
339 | #else | ||
340 | |||
341 | static void (*r4k_blast_icache_user_page)(unsigned long addr); | ||
342 | |||
343 | static void r4k_blast_icache_user_page_setup(void) | ||
344 | { | ||
345 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
346 | |||
347 | if (ic_lsize == 0) | ||
348 | r4k_blast_icache_user_page = (void *)cache_noop; | ||
349 | else if (ic_lsize == 16) | ||
350 | r4k_blast_icache_user_page = blast_icache16_user_page; | ||
351 | else if (ic_lsize == 32) | ||
352 | r4k_blast_icache_user_page = blast_icache32_user_page; | ||
353 | else if (ic_lsize == 64) | ||
354 | r4k_blast_icache_user_page = blast_icache64_user_page; | ||
355 | } | ||
356 | |||
357 | #endif | ||
358 | |||
359 | static void (* r4k_blast_icache_page_indexed)(unsigned long addr); | ||
360 | |||
361 | static void r4k_blast_icache_page_indexed_setup(void) | ||
362 | { | ||
363 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
364 | |||
365 | if (ic_lsize == 0) | ||
366 | r4k_blast_icache_page_indexed = (void *)cache_noop; | ||
367 | else if (ic_lsize == 16) | ||
368 | r4k_blast_icache_page_indexed = blast_icache16_page_indexed; | ||
369 | else if (ic_lsize == 32) { | ||
370 | if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) && | ||
371 | cpu_is_r4600_v1_x()) | ||
372 | r4k_blast_icache_page_indexed = | ||
373 | blast_icache32_r4600_v1_page_indexed; | ||
374 | else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV)) | ||
375 | r4k_blast_icache_page_indexed = | ||
376 | tx49_blast_icache32_page_indexed; | ||
377 | else if (current_cpu_type() == CPU_LOONGSON2EF) | ||
378 | r4k_blast_icache_page_indexed = | ||
379 | loongson2_blast_icache32_page_indexed; | ||
380 | else | ||
381 | r4k_blast_icache_page_indexed = | ||
382 | blast_icache32_page_indexed; | ||
383 | } else if (ic_lsize == 64) | ||
384 | r4k_blast_icache_page_indexed = blast_icache64_page_indexed; | ||
385 | } | ||
386 | |||
387 | void (* r4k_blast_icache)(void); | ||
388 | EXPORT_SYMBOL(r4k_blast_icache); | ||
389 | |||
390 | static void r4k_blast_icache_setup(void) | ||
391 | { | ||
392 | unsigned long ic_lsize = cpu_icache_line_size(); | ||
393 | |||
394 | if (ic_lsize == 0) | ||
395 | r4k_blast_icache = (void *)cache_noop; | ||
396 | else if (ic_lsize == 16) | ||
397 | r4k_blast_icache = blast_icache16; | ||
398 | else if (ic_lsize == 32) { | ||
399 | if (IS_ENABLED(CONFIG_WAR_R4600_V1_INDEX_ICACHEOP) && | ||
400 | cpu_is_r4600_v1_x()) | ||
401 | r4k_blast_icache = blast_r4600_v1_icache32; | ||
402 | else if (IS_ENABLED(CONFIG_WAR_TX49XX_ICACHE_INDEX_INV)) | ||
403 | r4k_blast_icache = tx49_blast_icache32; | ||
404 | else if (current_cpu_type() == CPU_LOONGSON2EF) | ||
405 | r4k_blast_icache = loongson2_blast_icache32; | ||
406 | else | ||
407 | r4k_blast_icache = blast_icache32; | ||
408 | } else if (ic_lsize == 64) | ||
409 | r4k_blast_icache = blast_icache64; | ||
410 | else if (ic_lsize == 128) | ||
411 | r4k_blast_icache = blast_icache128; | ||
412 | } | ||
413 | |||
414 | static void (* r4k_blast_scache_page)(unsigned long addr); | ||
415 | |||
416 | static void r4k_blast_scache_page_setup(void) | ||
417 | { | ||
418 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
419 | |||
420 | if (scache_size == 0) | ||
421 | r4k_blast_scache_page = (void *)cache_noop; | ||
422 | else if (sc_lsize == 16) | ||
423 | r4k_blast_scache_page = blast_scache16_page; | ||
424 | else if (sc_lsize == 32) | ||
425 | r4k_blast_scache_page = blast_scache32_page; | ||
426 | else if (sc_lsize == 64) | ||
427 | r4k_blast_scache_page = blast_scache64_page; | ||
428 | else if (sc_lsize == 128) | ||
429 | r4k_blast_scache_page = blast_scache128_page; | ||
430 | } | ||
431 | |||
432 | static void (* r4k_blast_scache_page_indexed)(unsigned long addr); | ||
433 | |||
434 | static void r4k_blast_scache_page_indexed_setup(void) | ||
435 | { | ||
436 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
437 | |||
438 | if (scache_size == 0) | ||
439 | r4k_blast_scache_page_indexed = (void *)cache_noop; | ||
440 | else if (sc_lsize == 16) | ||
441 | r4k_blast_scache_page_indexed = blast_scache16_page_indexed; | ||
442 | else if (sc_lsize == 32) | ||
443 | r4k_blast_scache_page_indexed = blast_scache32_page_indexed; | ||
444 | else if (sc_lsize == 64) | ||
445 | r4k_blast_scache_page_indexed = blast_scache64_page_indexed; | ||
446 | else if (sc_lsize == 128) | ||
447 | r4k_blast_scache_page_indexed = blast_scache128_page_indexed; | ||
448 | } | ||
449 | |||
450 | static void (* r4k_blast_scache)(void); | ||
451 | |||
452 | static void r4k_blast_scache_setup(void) | ||
453 | { | ||
454 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
455 | |||
456 | if (scache_size == 0) | ||
457 | r4k_blast_scache = (void *)cache_noop; | ||
458 | else if (sc_lsize == 16) | ||
459 | r4k_blast_scache = blast_scache16; | ||
460 | else if (sc_lsize == 32) | ||
461 | r4k_blast_scache = blast_scache32; | ||
462 | else if (sc_lsize == 64) | ||
463 | r4k_blast_scache = blast_scache64; | ||
464 | else if (sc_lsize == 128) | ||
465 | r4k_blast_scache = blast_scache128; | ||
466 | } | ||
467 | |||
468 | static void (*r4k_blast_scache_node)(long node); | ||
469 | |||
470 | static void r4k_blast_scache_node_setup(void) | ||
471 | { | ||
472 | unsigned long sc_lsize = cpu_scache_line_size(); | ||
473 | |||
474 | if (current_cpu_type() != CPU_LOONGSON64) | ||
475 | r4k_blast_scache_node = (void *)cache_noop; | ||
476 | else if (sc_lsize == 16) | ||
477 | r4k_blast_scache_node = blast_scache16_node; | ||
478 | else if (sc_lsize == 32) | ||
479 | r4k_blast_scache_node = blast_scache32_node; | ||
480 | else if (sc_lsize == 64) | ||
481 | r4k_blast_scache_node = blast_scache64_node; | ||
482 | else if (sc_lsize == 128) | ||
483 | r4k_blast_scache_node = blast_scache128_node; | ||
484 | } | ||
485 | |||
486 | static inline void local_r4k___flush_cache_all(void * args) | ||
487 | { | ||
488 | switch (current_cpu_type()) { | ||
489 | case CPU_LOONGSON2EF: | ||
490 | case CPU_R4000SC: | ||
491 | case CPU_R4000MC: | ||
492 | case CPU_R4400SC: | ||
493 | case CPU_R4400MC: | ||
494 | case CPU_R10000: | ||
495 | case CPU_R12000: | ||
496 | case CPU_R14000: | ||
497 | case CPU_R16000: | ||
498 | /* | ||
499 | * These caches are inclusive caches, that is, if something | ||
500 | * is not cached in the S-cache, we know it also won't be | ||
501 | * in one of the primary caches. | ||
502 | */ | ||
503 | r4k_blast_scache(); | ||
504 | break; | ||
505 | |||
506 | case CPU_LOONGSON64: | ||
507 | /* Use get_ebase_cpunum() for both NUMA=y/n */ | ||
508 | r4k_blast_scache_node(get_ebase_cpunum() >> 2); | ||
509 | break; | ||
510 | |||
511 | case CPU_BMIPS5000: | ||
512 | r4k_blast_scache(); | ||
513 | __sync(); | ||
514 | break; | ||
515 | |||
516 | default: | ||
517 | r4k_blast_dcache(); | ||
518 | r4k_blast_icache(); | ||
519 | break; | ||
520 | } | ||
521 | } | ||
522 | |||
523 | static void r4k___flush_cache_all(void) | ||
524 | { | ||
525 | r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL); | ||
526 | } | ||
527 | |||
528 | /** | ||
529 | * has_valid_asid() - Determine if an mm already has an ASID. | ||
530 | * @mm: Memory map. | ||
531 | * @type: R4K_HIT or R4K_INDEX, type of cache op. | ||
532 | * | ||
533 | * Determines whether @mm already has an ASID on any of the CPUs which cache ops | ||
534 | * of type @type within an r4k_on_each_cpu() call will affect. If | ||
535 | * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the | ||
536 | * scope of the operation is confined to sibling CPUs, otherwise all online CPUs | ||
537 | * will need to be checked. | ||
538 | * | ||
539 | * Must be called in non-preemptive context. | ||
540 | * | ||
541 | * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm. | ||
542 | * 0 otherwise. | ||
543 | */ | ||
544 | static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type) | ||
545 | { | ||
546 | unsigned int i; | ||
547 | const cpumask_t *mask = cpu_present_mask; | ||
548 | |||
549 | if (cpu_has_mmid) | ||
550 | return cpu_context(0, mm) != 0; | ||
551 | |||
552 | /* cpu_sibling_map[] undeclared when !CONFIG_SMP */ | ||
553 | #ifdef CONFIG_SMP | ||
554 | /* | ||
555 | * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in | ||
556 | * each foreign core, so we only need to worry about siblings. | ||
557 | * Otherwise we need to worry about all present CPUs. | ||
558 | */ | ||
559 | if (r4k_op_needs_ipi(type)) | ||
560 | mask = &cpu_sibling_map[smp_processor_id()]; | ||
561 | #endif | ||
562 | for_each_cpu(i, mask) | ||
563 | if (cpu_context(i, mm)) | ||
564 | return 1; | ||
565 | return 0; | ||
566 | } | ||
567 | |||
568 | static void r4k__flush_cache_vmap(void) | ||
569 | { | ||
570 | r4k_blast_dcache(); | ||
571 | } | ||
572 | |||
573 | static void r4k__flush_cache_vunmap(void) | ||
574 | { | ||
575 | r4k_blast_dcache(); | ||
576 | } | ||
577 | |||
578 | /* | ||
579 | * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes | ||
580 | * whole caches when vma is executable. | ||
581 | */ | ||
582 | static inline void local_r4k_flush_cache_range(void * args) | ||
583 | { | ||
584 | struct vm_area_struct *vma = args; | ||
585 | int exec = vma->vm_flags & VM_EXEC; | ||
586 | |||
587 | if (!has_valid_asid(vma->vm_mm, R4K_INDEX)) | ||
588 | return; | ||
589 | |||
590 | /* | ||
591 | * If dcache can alias, we must blast it since mapping is changing. | ||
592 | * If executable, we must ensure any dirty lines are written back far | ||
593 | * enough to be visible to icache. | ||
594 | */ | ||
595 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) | ||
596 | r4k_blast_dcache(); | ||
597 | /* If executable, blast stale lines from icache */ | ||
598 | if (exec) | ||
599 | r4k_blast_icache(); | ||
600 | } | ||
601 | |||
602 | static void r4k_flush_cache_range(struct vm_area_struct *vma, | ||
603 | unsigned long start, unsigned long end) | ||
604 | { | ||
605 | int exec = vma->vm_flags & VM_EXEC; | ||
606 | |||
607 | if (cpu_has_dc_aliases || exec) | ||
608 | r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma); | ||
609 | } | ||
610 | |||
611 | static inline void local_r4k_flush_cache_mm(void * args) | ||
612 | { | ||
613 | struct mm_struct *mm = args; | ||
614 | |||
615 | if (!has_valid_asid(mm, R4K_INDEX)) | ||
616 | return; | ||
617 | |||
618 | /* | ||
619 | * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we | ||
620 | * only flush the primary caches but R1x000 behave sane ... | ||
621 | * R4000SC and R4400SC indexed S-cache ops also invalidate primary | ||
622 | * caches, so we can bail out early. | ||
623 | */ | ||
624 | if (current_cpu_type() == CPU_R4000SC || | ||
625 | current_cpu_type() == CPU_R4000MC || | ||
626 | current_cpu_type() == CPU_R4400SC || | ||
627 | current_cpu_type() == CPU_R4400MC) { | ||
628 | r4k_blast_scache(); | ||
629 | return; | ||
630 | } | ||
631 | |||
632 | r4k_blast_dcache(); | ||
633 | } | ||
634 | |||
635 | static void r4k_flush_cache_mm(struct mm_struct *mm) | ||
636 | { | ||
637 | if (!cpu_has_dc_aliases) | ||
638 | return; | ||
639 | |||
640 | r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm); | ||
641 | } | ||
642 | |||
643 | struct flush_cache_page_args { | ||
644 | struct vm_area_struct *vma; | ||
645 | unsigned long addr; | ||
646 | unsigned long pfn; | ||
647 | }; | ||
648 | |||
649 | static inline void local_r4k_flush_cache_page(void *args) | ||
650 | { | ||
651 | struct flush_cache_page_args *fcp_args = args; | ||
652 | struct vm_area_struct *vma = fcp_args->vma; | ||
653 | unsigned long addr = fcp_args->addr; | ||
654 | struct page *page = pfn_to_page(fcp_args->pfn); | ||
655 | int exec = vma->vm_flags & VM_EXEC; | ||
656 | struct mm_struct *mm = vma->vm_mm; | ||
657 | int map_coherent = 0; | ||
658 | pmd_t *pmdp; | ||
659 | pte_t *ptep; | ||
660 | void *vaddr; | ||
661 | |||
662 | /* | ||
663 | * If owns no valid ASID yet, cannot possibly have gotten | ||
664 | * this page into the cache. | ||
665 | */ | ||
666 | if (!has_valid_asid(mm, R4K_HIT)) | ||
667 | return; | ||
668 | |||
669 | addr &= PAGE_MASK; | ||
670 | pmdp = pmd_off(mm, addr); | ||
671 | ptep = pte_offset_kernel(pmdp, addr); | ||
672 | |||
673 | /* | ||
674 | * If the page isn't marked valid, the page cannot possibly be | ||
675 | * in the cache. | ||
676 | */ | ||
677 | if (!(pte_present(*ptep))) | ||
678 | return; | ||
679 | |||
680 | if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) | ||
681 | vaddr = NULL; | ||
682 | else { | ||
683 | /* | ||
684 | * Use kmap_coherent or kmap_atomic to do flushes for | ||
685 | * another ASID than the current one. | ||
686 | */ | ||
687 | map_coherent = (cpu_has_dc_aliases && | ||
688 | page_mapcount(page) && | ||
689 | !Page_dcache_dirty(page)); | ||
690 | if (map_coherent) | ||
691 | vaddr = kmap_coherent(page, addr); | ||
692 | else | ||
693 | vaddr = kmap_atomic(page); | ||
694 | addr = (unsigned long)vaddr; | ||
695 | } | ||
696 | |||
697 | if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { | ||
698 | vaddr ? r4k_blast_dcache_page(addr) : | ||
699 | r4k_blast_dcache_user_page(addr); | ||
700 | if (exec && !cpu_icache_snoops_remote_store) | ||
701 | r4k_blast_scache_page(addr); | ||
702 | } | ||
703 | if (exec) { | ||
704 | if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { | ||
705 | drop_mmu_context(mm); | ||
706 | } else | ||
707 | vaddr ? r4k_blast_icache_page(addr) : | ||
708 | r4k_blast_icache_user_page(addr); | ||
709 | } | ||
710 | |||
711 | if (vaddr) { | ||
712 | if (map_coherent) | ||
713 | kunmap_coherent(); | ||
714 | else | ||
715 | kunmap_atomic(vaddr); | ||
716 | } | ||
717 | } | ||
718 | |||
719 | static void r4k_flush_cache_page(struct vm_area_struct *vma, | ||
720 | unsigned long addr, unsigned long pfn) | ||
721 | { | ||
722 | struct flush_cache_page_args args; | ||
723 | |||
724 | args.vma = vma; | ||
725 | args.addr = addr; | ||
726 | args.pfn = pfn; | ||
727 | |||
728 | r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args); | ||
729 | } | ||
730 | |||
731 | static inline void local_r4k_flush_data_cache_page(void * addr) | ||
732 | { | ||
733 | r4k_blast_dcache_page((unsigned long) addr); | ||
734 | } | ||
735 | |||
736 | static void r4k_flush_data_cache_page(unsigned long addr) | ||
737 | { | ||
738 | if (in_atomic()) | ||
739 | local_r4k_flush_data_cache_page((void *)addr); | ||
740 | else | ||
741 | r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page, | ||
742 | (void *) addr); | ||
743 | } | ||
744 | |||
745 | struct flush_icache_range_args { | ||
746 | unsigned long start; | ||
747 | unsigned long end; | ||
748 | unsigned int type; | ||
749 | bool user; | ||
750 | }; | ||
751 | |||
752 | static inline void __local_r4k_flush_icache_range(unsigned long start, | ||
753 | unsigned long end, | ||
754 | unsigned int type, | ||
755 | bool user) | ||
756 | { | ||
757 | if (!cpu_has_ic_fills_f_dc) { | ||
758 | if (type == R4K_INDEX || | ||
759 | (type & R4K_INDEX && end - start >= dcache_size)) { | ||
760 | r4k_blast_dcache(); | ||
761 | } else { | ||
762 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
763 | if (user) | ||
764 | protected_blast_dcache_range(start, end); | ||
765 | else | ||
766 | blast_dcache_range(start, end); | ||
767 | } | ||
768 | } | ||
769 | |||
770 | if (type == R4K_INDEX || | ||
771 | (type & R4K_INDEX && end - start > icache_size)) | ||
772 | r4k_blast_icache(); | ||
773 | else { | ||
774 | switch (boot_cpu_type()) { | ||
775 | case CPU_LOONGSON2EF: | ||
776 | protected_loongson2_blast_icache_range(start, end); | ||
777 | break; | ||
778 | |||
779 | default: | ||
780 | if (user) | ||
781 | protected_blast_icache_range(start, end); | ||
782 | else | ||
783 | blast_icache_range(start, end); | ||
784 | break; | ||
785 | } | ||
786 | } | ||
787 | } | ||
788 | |||
789 | static inline void local_r4k_flush_icache_range(unsigned long start, | ||
790 | unsigned long end) | ||
791 | { | ||
792 | __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, false); | ||
793 | } | ||
794 | |||
795 | static inline void local_r4k_flush_icache_user_range(unsigned long start, | ||
796 | unsigned long end) | ||
797 | { | ||
798 | __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX, true); | ||
799 | } | ||
800 | |||
801 | static inline void local_r4k_flush_icache_range_ipi(void *args) | ||
802 | { | ||
803 | struct flush_icache_range_args *fir_args = args; | ||
804 | unsigned long start = fir_args->start; | ||
805 | unsigned long end = fir_args->end; | ||
806 | unsigned int type = fir_args->type; | ||
807 | bool user = fir_args->user; | ||
808 | |||
809 | __local_r4k_flush_icache_range(start, end, type, user); | ||
810 | } | ||
811 | |||
812 | static void __r4k_flush_icache_range(unsigned long start, unsigned long end, | ||
813 | bool user) | ||
814 | { | ||
815 | struct flush_icache_range_args args; | ||
816 | unsigned long size, cache_size; | ||
817 | |||
818 | args.start = start; | ||
819 | args.end = end; | ||
820 | args.type = R4K_HIT | R4K_INDEX; | ||
821 | args.user = user; | ||
822 | |||
823 | /* | ||
824 | * Indexed cache ops require an SMP call. | ||
825 | * Consider if that can or should be avoided. | ||
826 | */ | ||
827 | preempt_disable(); | ||
828 | if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) { | ||
829 | /* | ||
830 | * If address-based cache ops don't require an SMP call, then | ||
831 | * use them exclusively for small flushes. | ||
832 | */ | ||
833 | size = end - start; | ||
834 | cache_size = icache_size; | ||
835 | if (!cpu_has_ic_fills_f_dc) { | ||
836 | size *= 2; | ||
837 | cache_size += dcache_size; | ||
838 | } | ||
839 | if (size <= cache_size) | ||
840 | args.type &= ~R4K_INDEX; | ||
841 | } | ||
842 | r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args); | ||
843 | preempt_enable(); | ||
844 | instruction_hazard(); | ||
845 | } | ||
846 | |||
847 | static void r4k_flush_icache_range(unsigned long start, unsigned long end) | ||
848 | { | ||
849 | return __r4k_flush_icache_range(start, end, false); | ||
850 | } | ||
851 | |||
852 | static void r4k_flush_icache_user_range(unsigned long start, unsigned long end) | ||
853 | { | ||
854 | return __r4k_flush_icache_range(start, end, true); | ||
855 | } | ||
856 | |||
857 | #ifdef CONFIG_DMA_NONCOHERENT | ||
858 | |||
859 | static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) | ||
860 | { | ||
861 | /* Catch bad driver code */ | ||
862 | if (WARN_ON(size == 0)) | ||
863 | return; | ||
864 | |||
865 | preempt_disable(); | ||
866 | if (cpu_has_inclusive_pcaches) { | ||
867 | if (size >= scache_size) { | ||
868 | if (current_cpu_type() != CPU_LOONGSON64) | ||
869 | r4k_blast_scache(); | ||
870 | else | ||
871 | r4k_blast_scache_node(pa_to_nid(addr)); | ||
872 | } else { | ||
873 | blast_scache_range(addr, addr + size); | ||
874 | } | ||
875 | preempt_enable(); | ||
876 | __sync(); | ||
877 | return; | ||
878 | } | ||
879 | |||
880 | /* | ||
881 | * Either no secondary cache or the available caches don't have the | ||
882 | * subset property so we have to flush the primary caches | ||
883 | * explicitly. | ||
884 | * If we would need IPI to perform an INDEX-type operation, then | ||
885 | * we have to use the HIT-type alternative as IPI cannot be used | ||
886 | * here due to interrupts possibly being disabled. | ||
887 | */ | ||
888 | if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { | ||
889 | r4k_blast_dcache(); | ||
890 | } else { | ||
891 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
892 | blast_dcache_range(addr, addr + size); | ||
893 | } | ||
894 | preempt_enable(); | ||
895 | |||
896 | bc_wback_inv(addr, size); | ||
897 | __sync(); | ||
898 | } | ||
899 | |||
900 | static void prefetch_cache_inv(unsigned long addr, unsigned long size) | ||
901 | { | ||
902 | unsigned int linesz = cpu_scache_line_size(); | ||
903 | unsigned long addr0 = addr, addr1; | ||
904 | |||
905 | addr0 &= ~(linesz - 1); | ||
906 | addr1 = (addr0 + size - 1) & ~(linesz - 1); | ||
907 | |||
908 | protected_writeback_scache_line(addr0); | ||
909 | if (likely(addr1 != addr0)) | ||
910 | protected_writeback_scache_line(addr1); | ||
911 | else | ||
912 | return; | ||
913 | |||
914 | addr0 += linesz; | ||
915 | if (likely(addr1 != addr0)) | ||
916 | protected_writeback_scache_line(addr0); | ||
917 | else | ||
918 | return; | ||
919 | |||
920 | addr1 -= linesz; | ||
921 | if (likely(addr1 > addr0)) | ||
922 | protected_writeback_scache_line(addr0); | ||
923 | } | ||
924 | |||
925 | static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) | ||
926 | { | ||
927 | /* Catch bad driver code */ | ||
928 | if (WARN_ON(size == 0)) | ||
929 | return; | ||
930 | |||
931 | preempt_disable(); | ||
932 | |||
933 | if (current_cpu_type() == CPU_BMIPS5000) | ||
934 | prefetch_cache_inv(addr, size); | ||
935 | |||
936 | if (cpu_has_inclusive_pcaches) { | ||
937 | if (size >= scache_size) { | ||
938 | if (current_cpu_type() != CPU_LOONGSON64) | ||
939 | r4k_blast_scache(); | ||
940 | else | ||
941 | r4k_blast_scache_node(pa_to_nid(addr)); | ||
942 | } else { | ||
943 | /* | ||
944 | * There is no clearly documented alignment requirement | ||
945 | * for the cache instruction on MIPS processors and | ||
946 | * some processors, among them the RM5200 and RM7000 | ||
947 | * QED processors will throw an address error for cache | ||
948 | * hit ops with insufficient alignment. Solved by | ||
949 | * aligning the address to cache line size. | ||
950 | */ | ||
951 | blast_inv_scache_range(addr, addr + size); | ||
952 | } | ||
953 | preempt_enable(); | ||
954 | __sync(); | ||
955 | return; | ||
956 | } | ||
957 | |||
958 | if (!r4k_op_needs_ipi(R4K_INDEX) && size >= dcache_size) { | ||
959 | r4k_blast_dcache(); | ||
960 | } else { | ||
961 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
962 | blast_inv_dcache_range(addr, addr + size); | ||
963 | } | ||
964 | preempt_enable(); | ||
965 | |||
966 | bc_inv(addr, size); | ||
967 | __sync(); | ||
968 | } | ||
969 | #endif /* CONFIG_DMA_NONCOHERENT */ | ||
970 | |||
971 | static void r4k_flush_icache_all(void) | ||
972 | { | ||
973 | if (cpu_has_vtag_icache) | ||
974 | r4k_blast_icache(); | ||
975 | } | ||
976 | |||
977 | struct flush_kernel_vmap_range_args { | ||
978 | unsigned long vaddr; | ||
979 | int size; | ||
980 | }; | ||
981 | |||
982 | static inline void local_r4k_flush_kernel_vmap_range_index(void *args) | ||
983 | { | ||
984 | /* | ||
985 | * Aliases only affect the primary caches so don't bother with | ||
986 | * S-caches or T-caches. | ||
987 | */ | ||
988 | r4k_blast_dcache(); | ||
989 | } | ||
990 | |||
991 | static inline void local_r4k_flush_kernel_vmap_range(void *args) | ||
992 | { | ||
993 | struct flush_kernel_vmap_range_args *vmra = args; | ||
994 | unsigned long vaddr = vmra->vaddr; | ||
995 | int size = vmra->size; | ||
996 | |||
997 | /* | ||
998 | * Aliases only affect the primary caches so don't bother with | ||
999 | * S-caches or T-caches. | ||
1000 | */ | ||
1001 | R4600_HIT_CACHEOP_WAR_IMPL; | ||
1002 | blast_dcache_range(vaddr, vaddr + size); | ||
1003 | } | ||
1004 | |||
1005 | static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) | ||
1006 | { | ||
1007 | struct flush_kernel_vmap_range_args args; | ||
1008 | |||
1009 | args.vaddr = (unsigned long) vaddr; | ||
1010 | args.size = size; | ||
1011 | |||
1012 | if (size >= dcache_size) | ||
1013 | r4k_on_each_cpu(R4K_INDEX, | ||
1014 | local_r4k_flush_kernel_vmap_range_index, NULL); | ||
1015 | else | ||
1016 | r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range, | ||
1017 | &args); | ||
1018 | } | ||
1019 | |||
1020 | static inline void rm7k_erratum31(void) | ||
1021 | { | ||
1022 | const unsigned long ic_lsize = 32; | ||
1023 | unsigned long addr; | ||
1024 | |||
1025 | /* RM7000 erratum #31. The icache is screwed at startup. */ | ||
1026 | write_c0_taglo(0); | ||
1027 | write_c0_taghi(0); | ||
1028 | |||
1029 | for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { | ||
1030 | __asm__ __volatile__ ( | ||
1031 | ".set push\n\t" | ||
1032 | ".set noreorder\n\t" | ||
1033 | ".set mips3\n\t" | ||
1034 | "cache\t%1, 0(%0)\n\t" | ||
1035 | "cache\t%1, 0x1000(%0)\n\t" | ||
1036 | "cache\t%1, 0x2000(%0)\n\t" | ||
1037 | "cache\t%1, 0x3000(%0)\n\t" | ||
1038 | "cache\t%2, 0(%0)\n\t" | ||
1039 | "cache\t%2, 0x1000(%0)\n\t" | ||
1040 | "cache\t%2, 0x2000(%0)\n\t" | ||
1041 | "cache\t%2, 0x3000(%0)\n\t" | ||
1042 | "cache\t%1, 0(%0)\n\t" | ||
1043 | "cache\t%1, 0x1000(%0)\n\t" | ||
1044 | "cache\t%1, 0x2000(%0)\n\t" | ||
1045 | "cache\t%1, 0x3000(%0)\n\t" | ||
1046 | ".set pop\n" | ||
1047 | : | ||
1048 | : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill_I)); | ||
1049 | } | ||
1050 | } | ||
1051 | |||
1052 | static inline int alias_74k_erratum(struct cpuinfo_mips *c) | ||
1053 | { | ||
1054 | unsigned int imp = c->processor_id & PRID_IMP_MASK; | ||
1055 | unsigned int rev = c->processor_id & PRID_REV_MASK; | ||
1056 | int present = 0; | ||
1057 | |||
1058 | /* | ||
1059 | * Early versions of the 74K do not update the cache tags on a | ||
1060 | * vtag miss/ptag hit which can occur in the case of KSEG0/KUSEG | ||
1061 | * aliases. In this case it is better to treat the cache as always | ||
1062 | * having aliases. Also disable the synonym tag update feature | ||
1063 | * where available. In this case no opportunistic tag update will | ||
1064 | * happen where a load causes a virtual address miss but a physical | ||
1065 | * address hit during a D-cache look-up. | ||
1066 | */ | ||
1067 | switch (imp) { | ||
1068 | case PRID_IMP_74K: | ||
1069 | if (rev <= PRID_REV_ENCODE_332(2, 4, 0)) | ||
1070 | present = 1; | ||
1071 | if (rev == PRID_REV_ENCODE_332(2, 4, 0)) | ||
1072 | write_c0_config6(read_c0_config6() | MTI_CONF6_SYND); | ||
1073 | break; | ||
1074 | case PRID_IMP_1074K: | ||
1075 | if (rev <= PRID_REV_ENCODE_332(1, 1, 0)) { | ||
1076 | present = 1; | ||
1077 | write_c0_config6(read_c0_config6() | MTI_CONF6_SYND); | ||
1078 | } | ||
1079 | break; | ||
1080 | default: | ||
1081 | BUG(); | ||
1082 | } | ||
1083 | |||
1084 | return present; | ||
1085 | } | ||
1086 | |||
1087 | static void b5k_instruction_hazard(void) | ||
1088 | { | ||
1089 | __sync(); | ||
1090 | __sync(); | ||
1091 | __asm__ __volatile__( | ||
1092 | " nop; nop; nop; nop; nop; nop; nop; nop\n" | ||
1093 | " nop; nop; nop; nop; nop; nop; nop; nop\n" | ||
1094 | " nop; nop; nop; nop; nop; nop; nop; nop\n" | ||
1095 | " nop; nop; nop; nop; nop; nop; nop; nop\n" | ||
1096 | : : : "memory"); | ||
1097 | } | ||
1098 | |||
1099 | static char *way_string[] = { NULL, "direct mapped", "2-way", | ||
1100 | "3-way", "4-way", "5-way", "6-way", "7-way", "8-way", | ||
1101 | "9-way", "10-way", "11-way", "12-way", | ||
1102 | "13-way", "14-way", "15-way", "16-way", | ||
1103 | }; | ||
1104 | |||
1105 | static void probe_pcache(void) | ||
1106 | { | ||
1107 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1108 | unsigned int config = read_c0_config(); | ||
1109 | unsigned int prid = read_c0_prid(); | ||
1110 | int has_74k_erratum = 0; | ||
1111 | unsigned long config1; | ||
1112 | unsigned int lsize; | ||
1113 | |||
1114 | switch (current_cpu_type()) { | ||
1115 | case CPU_R4600: /* QED style two way caches? */ | ||
1116 | case CPU_R4700: | ||
1117 | case CPU_R5000: | ||
1118 | case CPU_NEVADA: | ||
1119 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
1120 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1121 | c->icache.ways = 2; | ||
1122 | c->icache.waybit = __ffs(icache_size/2); | ||
1123 | |||
1124 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
1125 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1126 | c->dcache.ways = 2; | ||
1127 | c->dcache.waybit= __ffs(dcache_size/2); | ||
1128 | |||
1129 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
1130 | break; | ||
1131 | |||
1132 | case CPU_R5500: | ||
1133 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
1134 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1135 | c->icache.ways = 2; | ||
1136 | c->icache.waybit= 0; | ||
1137 | |||
1138 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
1139 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1140 | c->dcache.ways = 2; | ||
1141 | c->dcache.waybit = 0; | ||
1142 | |||
1143 | c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; | ||
1144 | break; | ||
1145 | |||
1146 | case CPU_TX49XX: | ||
1147 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
1148 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1149 | c->icache.ways = 4; | ||
1150 | c->icache.waybit= 0; | ||
1151 | |||
1152 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
1153 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1154 | c->dcache.ways = 4; | ||
1155 | c->dcache.waybit = 0; | ||
1156 | |||
1157 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
1158 | c->options |= MIPS_CPU_PREFETCH; | ||
1159 | break; | ||
1160 | |||
1161 | case CPU_R4000PC: | ||
1162 | case CPU_R4000SC: | ||
1163 | case CPU_R4000MC: | ||
1164 | case CPU_R4400PC: | ||
1165 | case CPU_R4400SC: | ||
1166 | case CPU_R4400MC: | ||
1167 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
1168 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1169 | c->icache.ways = 1; | ||
1170 | c->icache.waybit = 0; /* doesn't matter */ | ||
1171 | |||
1172 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
1173 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1174 | c->dcache.ways = 1; | ||
1175 | c->dcache.waybit = 0; /* does not matter */ | ||
1176 | |||
1177 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
1178 | break; | ||
1179 | |||
1180 | case CPU_R10000: | ||
1181 | case CPU_R12000: | ||
1182 | case CPU_R14000: | ||
1183 | case CPU_R16000: | ||
1184 | icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); | ||
1185 | c->icache.linesz = 64; | ||
1186 | c->icache.ways = 2; | ||
1187 | c->icache.waybit = 0; | ||
1188 | |||
1189 | dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); | ||
1190 | c->dcache.linesz = 32; | ||
1191 | c->dcache.ways = 2; | ||
1192 | c->dcache.waybit = 0; | ||
1193 | |||
1194 | c->options |= MIPS_CPU_PREFETCH; | ||
1195 | break; | ||
1196 | |||
1197 | case CPU_VR4133: | ||
1198 | write_c0_config(config & ~VR41_CONF_P4K); | ||
1199 | fallthrough; | ||
1200 | case CPU_VR4131: | ||
1201 | /* Workaround for cache instruction bug of VR4131 */ | ||
1202 | if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || | ||
1203 | c->processor_id == 0x0c82U) { | ||
1204 | config |= 0x00400000U; | ||
1205 | if (c->processor_id == 0x0c80U) | ||
1206 | config |= VR41_CONF_BP; | ||
1207 | write_c0_config(config); | ||
1208 | } else | ||
1209 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
1210 | |||
1211 | icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); | ||
1212 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1213 | c->icache.ways = 2; | ||
1214 | c->icache.waybit = __ffs(icache_size/2); | ||
1215 | |||
1216 | dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); | ||
1217 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1218 | c->dcache.ways = 2; | ||
1219 | c->dcache.waybit = __ffs(dcache_size/2); | ||
1220 | break; | ||
1221 | |||
1222 | case CPU_VR41XX: | ||
1223 | case CPU_VR4111: | ||
1224 | case CPU_VR4121: | ||
1225 | case CPU_VR4122: | ||
1226 | case CPU_VR4181: | ||
1227 | case CPU_VR4181A: | ||
1228 | icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); | ||
1229 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1230 | c->icache.ways = 1; | ||
1231 | c->icache.waybit = 0; /* doesn't matter */ | ||
1232 | |||
1233 | dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); | ||
1234 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1235 | c->dcache.ways = 1; | ||
1236 | c->dcache.waybit = 0; /* does not matter */ | ||
1237 | |||
1238 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
1239 | break; | ||
1240 | |||
1241 | case CPU_RM7000: | ||
1242 | rm7k_erratum31(); | ||
1243 | |||
1244 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
1245 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1246 | c->icache.ways = 4; | ||
1247 | c->icache.waybit = __ffs(icache_size / c->icache.ways); | ||
1248 | |||
1249 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
1250 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1251 | c->dcache.ways = 4; | ||
1252 | c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); | ||
1253 | |||
1254 | c->options |= MIPS_CPU_CACHE_CDEX_P; | ||
1255 | c->options |= MIPS_CPU_PREFETCH; | ||
1256 | break; | ||
1257 | |||
1258 | case CPU_LOONGSON2EF: | ||
1259 | icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); | ||
1260 | c->icache.linesz = 16 << ((config & CONF_IB) >> 5); | ||
1261 | if (prid & 0x3) | ||
1262 | c->icache.ways = 4; | ||
1263 | else | ||
1264 | c->icache.ways = 2; | ||
1265 | c->icache.waybit = 0; | ||
1266 | |||
1267 | dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); | ||
1268 | c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); | ||
1269 | if (prid & 0x3) | ||
1270 | c->dcache.ways = 4; | ||
1271 | else | ||
1272 | c->dcache.ways = 2; | ||
1273 | c->dcache.waybit = 0; | ||
1274 | break; | ||
1275 | |||
1276 | case CPU_LOONGSON64: | ||
1277 | config1 = read_c0_config1(); | ||
1278 | lsize = (config1 >> 19) & 7; | ||
1279 | if (lsize) | ||
1280 | c->icache.linesz = 2 << lsize; | ||
1281 | else | ||
1282 | c->icache.linesz = 0; | ||
1283 | c->icache.sets = 64 << ((config1 >> 22) & 7); | ||
1284 | c->icache.ways = 1 + ((config1 >> 16) & 7); | ||
1285 | icache_size = c->icache.sets * | ||
1286 | c->icache.ways * | ||
1287 | c->icache.linesz; | ||
1288 | c->icache.waybit = 0; | ||
1289 | |||
1290 | lsize = (config1 >> 10) & 7; | ||
1291 | if (lsize) | ||
1292 | c->dcache.linesz = 2 << lsize; | ||
1293 | else | ||
1294 | c->dcache.linesz = 0; | ||
1295 | c->dcache.sets = 64 << ((config1 >> 13) & 7); | ||
1296 | c->dcache.ways = 1 + ((config1 >> 7) & 7); | ||
1297 | dcache_size = c->dcache.sets * | ||
1298 | c->dcache.ways * | ||
1299 | c->dcache.linesz; | ||
1300 | c->dcache.waybit = 0; | ||
1301 | if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >= | ||
1302 | (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) || | ||
1303 | (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) | ||
1304 | c->options |= MIPS_CPU_PREFETCH; | ||
1305 | break; | ||
1306 | |||
1307 | case CPU_CAVIUM_OCTEON3: | ||
1308 | /* For now lie about the number of ways. */ | ||
1309 | c->icache.linesz = 128; | ||
1310 | c->icache.sets = 16; | ||
1311 | c->icache.ways = 8; | ||
1312 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
1313 | icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; | ||
1314 | |||
1315 | c->dcache.linesz = 128; | ||
1316 | c->dcache.ways = 8; | ||
1317 | c->dcache.sets = 8; | ||
1318 | dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; | ||
1319 | c->options |= MIPS_CPU_PREFETCH; | ||
1320 | break; | ||
1321 | |||
1322 | default: | ||
1323 | if (!(config & MIPS_CONF_M)) | ||
1324 | panic("Don't know how to probe P-caches on this cpu."); | ||
1325 | |||
1326 | /* | ||
1327 | * So we seem to be a MIPS32 or MIPS64 CPU | ||
1328 | * So let's probe the I-cache ... | ||
1329 | */ | ||
1330 | config1 = read_c0_config1(); | ||
1331 | |||
1332 | lsize = (config1 >> 19) & 7; | ||
1333 | |||
1334 | /* IL == 7 is reserved */ | ||
1335 | if (lsize == 7) | ||
1336 | panic("Invalid icache line size"); | ||
1337 | |||
1338 | c->icache.linesz = lsize ? 2 << lsize : 0; | ||
1339 | |||
1340 | c->icache.sets = 32 << (((config1 >> 22) + 1) & 7); | ||
1341 | c->icache.ways = 1 + ((config1 >> 16) & 7); | ||
1342 | |||
1343 | icache_size = c->icache.sets * | ||
1344 | c->icache.ways * | ||
1345 | c->icache.linesz; | ||
1346 | c->icache.waybit = __ffs(icache_size/c->icache.ways); | ||
1347 | |||
1348 | if (config & MIPS_CONF_VI) | ||
1349 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
1350 | |||
1351 | /* | ||
1352 | * Now probe the MIPS32 / MIPS64 data cache. | ||
1353 | */ | ||
1354 | c->dcache.flags = 0; | ||
1355 | |||
1356 | lsize = (config1 >> 10) & 7; | ||
1357 | |||
1358 | /* DL == 7 is reserved */ | ||
1359 | if (lsize == 7) | ||
1360 | panic("Invalid dcache line size"); | ||
1361 | |||
1362 | c->dcache.linesz = lsize ? 2 << lsize : 0; | ||
1363 | |||
1364 | c->dcache.sets = 32 << (((config1 >> 13) + 1) & 7); | ||
1365 | c->dcache.ways = 1 + ((config1 >> 7) & 7); | ||
1366 | |||
1367 | dcache_size = c->dcache.sets * | ||
1368 | c->dcache.ways * | ||
1369 | c->dcache.linesz; | ||
1370 | c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); | ||
1371 | |||
1372 | c->options |= MIPS_CPU_PREFETCH; | ||
1373 | break; | ||
1374 | } | ||
1375 | |||
1376 | /* | ||
1377 | * Processor configuration sanity check for the R4000SC erratum | ||
1378 | * #5. With page sizes larger than 32kB there is no possibility | ||
1379 | * to get a VCE exception anymore so we don't care about this | ||
1380 | * misconfiguration. The case is rather theoretical anyway; | ||
1381 | * presumably no vendor is shipping his hardware in the "bad" | ||
1382 | * configuration. | ||
1383 | */ | ||
1384 | if ((prid & PRID_IMP_MASK) == PRID_IMP_R4000 && | ||
1385 | (prid & PRID_REV_MASK) < PRID_REV_R4400 && | ||
1386 | !(config & CONF_SC) && c->icache.linesz != 16 && | ||
1387 | PAGE_SIZE <= 0x8000) | ||
1388 | panic("Improper R4000SC processor configuration detected"); | ||
1389 | |||
1390 | /* compute a couple of other cache variables */ | ||
1391 | c->icache.waysize = icache_size / c->icache.ways; | ||
1392 | c->dcache.waysize = dcache_size / c->dcache.ways; | ||
1393 | |||
1394 | c->icache.sets = c->icache.linesz ? | ||
1395 | icache_size / (c->icache.linesz * c->icache.ways) : 0; | ||
1396 | c->dcache.sets = c->dcache.linesz ? | ||
1397 | dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; | ||
1398 | |||
1399 | /* | ||
1400 | * R1x000 P-caches are odd in a positive way. They're 32kB 2-way | ||
1401 | * virtually indexed so normally would suffer from aliases. So | ||
1402 | * normally they'd suffer from aliases but magic in the hardware deals | ||
1403 | * with that for us so we don't need to take care ourselves. | ||
1404 | */ | ||
1405 | switch (current_cpu_type()) { | ||
1406 | case CPU_20KC: | ||
1407 | case CPU_25KF: | ||
1408 | case CPU_I6400: | ||
1409 | case CPU_I6500: | ||
1410 | case CPU_SB1: | ||
1411 | case CPU_SB1A: | ||
1412 | case CPU_XLR: | ||
1413 | c->dcache.flags |= MIPS_CACHE_PINDEX; | ||
1414 | break; | ||
1415 | |||
1416 | case CPU_R10000: | ||
1417 | case CPU_R12000: | ||
1418 | case CPU_R14000: | ||
1419 | case CPU_R16000: | ||
1420 | break; | ||
1421 | |||
1422 | case CPU_74K: | ||
1423 | case CPU_1074K: | ||
1424 | has_74k_erratum = alias_74k_erratum(c); | ||
1425 | fallthrough; | ||
1426 | case CPU_M14KC: | ||
1427 | case CPU_M14KEC: | ||
1428 | case CPU_24K: | ||
1429 | case CPU_34K: | ||
1430 | case CPU_1004K: | ||
1431 | case CPU_INTERAPTIV: | ||
1432 | case CPU_P5600: | ||
1433 | case CPU_PROAPTIV: | ||
1434 | case CPU_M5150: | ||
1435 | case CPU_QEMU_GENERIC: | ||
1436 | case CPU_P6600: | ||
1437 | case CPU_M6250: | ||
1438 | if (!(read_c0_config7() & MIPS_CONF7_IAR) && | ||
1439 | (c->icache.waysize > PAGE_SIZE)) | ||
1440 | c->icache.flags |= MIPS_CACHE_ALIASES; | ||
1441 | if (!has_74k_erratum && (read_c0_config7() & MIPS_CONF7_AR)) { | ||
1442 | /* | ||
1443 | * Effectively physically indexed dcache, | ||
1444 | * thus no virtual aliases. | ||
1445 | */ | ||
1446 | c->dcache.flags |= MIPS_CACHE_PINDEX; | ||
1447 | break; | ||
1448 | } | ||
1449 | fallthrough; | ||
1450 | default: | ||
1451 | if (has_74k_erratum || c->dcache.waysize > PAGE_SIZE) | ||
1452 | c->dcache.flags |= MIPS_CACHE_ALIASES; | ||
1453 | } | ||
1454 | |||
1455 | /* Physically indexed caches don't suffer from virtual aliasing */ | ||
1456 | if (c->dcache.flags & MIPS_CACHE_PINDEX) | ||
1457 | c->dcache.flags &= ~MIPS_CACHE_ALIASES; | ||
1458 | |||
1459 | /* | ||
1460 | * In systems with CM the icache fills from L2 or closer caches, and | ||
1461 | * thus sees remote stores without needing to write them back any | ||
1462 | * further than that. | ||
1463 | */ | ||
1464 | if (mips_cm_present()) | ||
1465 | c->icache.flags |= MIPS_IC_SNOOPS_REMOTE; | ||
1466 | |||
1467 | switch (current_cpu_type()) { | ||
1468 | case CPU_20KC: | ||
1469 | /* | ||
1470 | * Some older 20Kc chips doesn't have the 'VI' bit in | ||
1471 | * the config register. | ||
1472 | */ | ||
1473 | c->icache.flags |= MIPS_CACHE_VTAG; | ||
1474 | break; | ||
1475 | |||
1476 | case CPU_ALCHEMY: | ||
1477 | case CPU_I6400: | ||
1478 | case CPU_I6500: | ||
1479 | c->icache.flags |= MIPS_CACHE_IC_F_DC; | ||
1480 | break; | ||
1481 | |||
1482 | case CPU_BMIPS5000: | ||
1483 | c->icache.flags |= MIPS_CACHE_IC_F_DC; | ||
1484 | /* Cache aliases are handled in hardware; allow HIGHMEM */ | ||
1485 | c->dcache.flags &= ~MIPS_CACHE_ALIASES; | ||
1486 | break; | ||
1487 | |||
1488 | case CPU_LOONGSON2EF: | ||
1489 | /* | ||
1490 | * LOONGSON2 has 4 way icache, but when using indexed cache op, | ||
1491 | * one op will act on all 4 ways | ||
1492 | */ | ||
1493 | c->icache.ways = 1; | ||
1494 | } | ||
1495 | |||
1496 | pr_info("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", | ||
1497 | icache_size >> 10, | ||
1498 | c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", | ||
1499 | way_string[c->icache.ways], c->icache.linesz); | ||
1500 | |||
1501 | pr_info("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", | ||
1502 | dcache_size >> 10, way_string[c->dcache.ways], | ||
1503 | (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", | ||
1504 | (c->dcache.flags & MIPS_CACHE_ALIASES) ? | ||
1505 | "cache aliases" : "no aliases", | ||
1506 | c->dcache.linesz); | ||
1507 | } | ||
1508 | |||
1509 | static void probe_vcache(void) | ||
1510 | { | ||
1511 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1512 | unsigned int config2, lsize; | ||
1513 | |||
1514 | if (current_cpu_type() != CPU_LOONGSON64) | ||
1515 | return; | ||
1516 | |||
1517 | config2 = read_c0_config2(); | ||
1518 | if ((lsize = ((config2 >> 20) & 15))) | ||
1519 | c->vcache.linesz = 2 << lsize; | ||
1520 | else | ||
1521 | c->vcache.linesz = lsize; | ||
1522 | |||
1523 | c->vcache.sets = 64 << ((config2 >> 24) & 15); | ||
1524 | c->vcache.ways = 1 + ((config2 >> 16) & 15); | ||
1525 | |||
1526 | vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz; | ||
1527 | |||
1528 | c->vcache.waybit = 0; | ||
1529 | c->vcache.waysize = vcache_size / c->vcache.ways; | ||
1530 | |||
1531 | pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n", | ||
1532 | vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz); | ||
1533 | } | ||
1534 | |||
1535 | /* | ||
1536 | * If you even _breathe_ on this function, look at the gcc output and make sure | ||
1537 | * it does not pop things on and off the stack for the cache sizing loop that | ||
1538 | * executes in KSEG1 space or else you will crash and burn badly. You have | ||
1539 | * been warned. | ||
1540 | */ | ||
1541 | static int probe_scache(void) | ||
1542 | { | ||
1543 | unsigned long flags, addr, begin, end, pow2; | ||
1544 | unsigned int config = read_c0_config(); | ||
1545 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1546 | |||
1547 | if (config & CONF_SC) | ||
1548 | return 0; | ||
1549 | |||
1550 | begin = (unsigned long) &_stext; | ||
1551 | begin &= ~((4 * 1024 * 1024) - 1); | ||
1552 | end = begin + (4 * 1024 * 1024); | ||
1553 | |||
1554 | /* | ||
1555 | * This is such a bitch, you'd think they would make it easy to do | ||
1556 | * this. Away you daemons of stupidity! | ||
1557 | */ | ||
1558 | local_irq_save(flags); | ||
1559 | |||
1560 | /* Fill each size-multiple cache line with a valid tag. */ | ||
1561 | pow2 = (64 * 1024); | ||
1562 | for (addr = begin; addr < end; addr = (begin + pow2)) { | ||
1563 | unsigned long *p = (unsigned long *) addr; | ||
1564 | __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ | ||
1565 | pow2 <<= 1; | ||
1566 | } | ||
1567 | |||
1568 | /* Load first line with zero (therefore invalid) tag. */ | ||
1569 | write_c0_taglo(0); | ||
1570 | write_c0_taghi(0); | ||
1571 | __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ | ||
1572 | cache_op(Index_Store_Tag_I, begin); | ||
1573 | cache_op(Index_Store_Tag_D, begin); | ||
1574 | cache_op(Index_Store_Tag_SD, begin); | ||
1575 | |||
1576 | /* Now search for the wrap around point. */ | ||
1577 | pow2 = (128 * 1024); | ||
1578 | for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { | ||
1579 | cache_op(Index_Load_Tag_SD, addr); | ||
1580 | __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ | ||
1581 | if (!read_c0_taglo()) | ||
1582 | break; | ||
1583 | pow2 <<= 1; | ||
1584 | } | ||
1585 | local_irq_restore(flags); | ||
1586 | addr -= begin; | ||
1587 | |||
1588 | scache_size = addr; | ||
1589 | c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); | ||
1590 | c->scache.ways = 1; | ||
1591 | c->scache.waybit = 0; /* does not matter */ | ||
1592 | |||
1593 | return 1; | ||
1594 | } | ||
1595 | |||
1596 | static void loongson2_sc_init(void) | ||
1597 | { | ||
1598 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1599 | |||
1600 | scache_size = 512*1024; | ||
1601 | c->scache.linesz = 32; | ||
1602 | c->scache.ways = 4; | ||
1603 | c->scache.waybit = 0; | ||
1604 | c->scache.waysize = scache_size / (c->scache.ways); | ||
1605 | c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); | ||
1606 | pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", | ||
1607 | scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); | ||
1608 | |||
1609 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; | ||
1610 | } | ||
1611 | |||
1612 | static void loongson3_sc_init(void) | ||
1613 | { | ||
1614 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1615 | unsigned int config2, lsize; | ||
1616 | |||
1617 | config2 = read_c0_config2(); | ||
1618 | lsize = (config2 >> 4) & 15; | ||
1619 | if (lsize) | ||
1620 | c->scache.linesz = 2 << lsize; | ||
1621 | else | ||
1622 | c->scache.linesz = 0; | ||
1623 | c->scache.sets = 64 << ((config2 >> 8) & 15); | ||
1624 | c->scache.ways = 1 + (config2 & 15); | ||
1625 | |||
1626 | scache_size = c->scache.sets * | ||
1627 | c->scache.ways * | ||
1628 | c->scache.linesz; | ||
1629 | |||
1630 | /* Loongson-3 has 4-Scache banks, while Loongson-2K have only 2 banks */ | ||
1631 | if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R) | ||
1632 | scache_size *= 2; | ||
1633 | else | ||
1634 | scache_size *= 4; | ||
1635 | |||
1636 | c->scache.waybit = 0; | ||
1637 | c->scache.waysize = scache_size / c->scache.ways; | ||
1638 | pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", | ||
1639 | scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); | ||
1640 | if (scache_size) | ||
1641 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; | ||
1642 | return; | ||
1643 | } | ||
1644 | |||
1645 | extern int r5k_sc_init(void); | ||
1646 | extern int rm7k_sc_init(void); | ||
1647 | extern int mips_sc_init(void); | ||
1648 | |||
1649 | static void setup_scache(void) | ||
1650 | { | ||
1651 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1652 | unsigned int config = read_c0_config(); | ||
1653 | int sc_present = 0; | ||
1654 | |||
1655 | /* | ||
1656 | * Do the probing thing on R4000SC and R4400SC processors. Other | ||
1657 | * processors don't have a S-cache that would be relevant to the | ||
1658 | * Linux memory management. | ||
1659 | */ | ||
1660 | switch (current_cpu_type()) { | ||
1661 | case CPU_R4000SC: | ||
1662 | case CPU_R4000MC: | ||
1663 | case CPU_R4400SC: | ||
1664 | case CPU_R4400MC: | ||
1665 | sc_present = run_uncached(probe_scache); | ||
1666 | if (sc_present) | ||
1667 | c->options |= MIPS_CPU_CACHE_CDEX_S; | ||
1668 | break; | ||
1669 | |||
1670 | case CPU_R10000: | ||
1671 | case CPU_R12000: | ||
1672 | case CPU_R14000: | ||
1673 | case CPU_R16000: | ||
1674 | scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); | ||
1675 | c->scache.linesz = 64 << ((config >> 13) & 1); | ||
1676 | c->scache.ways = 2; | ||
1677 | c->scache.waybit= 0; | ||
1678 | sc_present = 1; | ||
1679 | break; | ||
1680 | |||
1681 | case CPU_R5000: | ||
1682 | case CPU_NEVADA: | ||
1683 | #ifdef CONFIG_R5000_CPU_SCACHE | ||
1684 | r5k_sc_init(); | ||
1685 | #endif | ||
1686 | return; | ||
1687 | |||
1688 | case CPU_RM7000: | ||
1689 | #ifdef CONFIG_RM7000_CPU_SCACHE | ||
1690 | rm7k_sc_init(); | ||
1691 | #endif | ||
1692 | return; | ||
1693 | |||
1694 | case CPU_LOONGSON2EF: | ||
1695 | loongson2_sc_init(); | ||
1696 | return; | ||
1697 | |||
1698 | case CPU_LOONGSON64: | ||
1699 | loongson3_sc_init(); | ||
1700 | return; | ||
1701 | |||
1702 | case CPU_CAVIUM_OCTEON3: | ||
1703 | case CPU_XLP: | ||
1704 | /* don't need to worry about L2, fully coherent */ | ||
1705 | return; | ||
1706 | |||
1707 | default: | ||
1708 | if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 | | ||
1709 | MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 | | ||
1710 | MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 | | ||
1711 | MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) { | ||
1712 | #ifdef CONFIG_MIPS_CPU_SCACHE | ||
1713 | if (mips_sc_init ()) { | ||
1714 | scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; | ||
1715 | printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", | ||
1716 | scache_size >> 10, | ||
1717 | way_string[c->scache.ways], c->scache.linesz); | ||
1718 | |||
1719 | if (current_cpu_type() == CPU_BMIPS5000) | ||
1720 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; | ||
1721 | } | ||
1722 | |||
1723 | #else | ||
1724 | if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) | ||
1725 | panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); | ||
1726 | #endif | ||
1727 | return; | ||
1728 | } | ||
1729 | sc_present = 0; | ||
1730 | } | ||
1731 | |||
1732 | if (!sc_present) | ||
1733 | return; | ||
1734 | |||
1735 | /* compute a couple of other cache variables */ | ||
1736 | c->scache.waysize = scache_size / c->scache.ways; | ||
1737 | |||
1738 | c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); | ||
1739 | |||
1740 | printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", | ||
1741 | scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); | ||
1742 | |||
1743 | c->options |= MIPS_CPU_INCLUSIVE_CACHES; | ||
1744 | } | ||
1745 | |||
1746 | void au1x00_fixup_config_od(void) | ||
1747 | { | ||
1748 | /* | ||
1749 | * c0_config.od (bit 19) was write only (and read as 0) | ||
1750 | * on the early revisions of Alchemy SOCs. It disables the bus | ||
1751 | * transaction overlapping and needs to be set to fix various errata. | ||
1752 | */ | ||
1753 | switch (read_c0_prid()) { | ||
1754 | case 0x00030100: /* Au1000 DA */ | ||
1755 | case 0x00030201: /* Au1000 HA */ | ||
1756 | case 0x00030202: /* Au1000 HB */ | ||
1757 | case 0x01030200: /* Au1500 AB */ | ||
1758 | /* | ||
1759 | * Au1100 errata actually keeps silence about this bit, so we set it | ||
1760 | * just in case for those revisions that require it to be set according | ||
1761 | * to the (now gone) cpu table. | ||
1762 | */ | ||
1763 | case 0x02030200: /* Au1100 AB */ | ||
1764 | case 0x02030201: /* Au1100 BA */ | ||
1765 | case 0x02030202: /* Au1100 BC */ | ||
1766 | set_c0_config(1 << 19); | ||
1767 | break; | ||
1768 | } | ||
1769 | } | ||
1770 | |||
1771 | /* CP0 hazard avoidance. */ | ||
1772 | #define NXP_BARRIER() \ | ||
1773 | __asm__ __volatile__( \ | ||
1774 | ".set noreorder\n\t" \ | ||
1775 | "nop; nop; nop; nop; nop; nop;\n\t" \ | ||
1776 | ".set reorder\n\t") | ||
1777 | |||
1778 | static void nxp_pr4450_fixup_config(void) | ||
1779 | { | ||
1780 | unsigned long config0; | ||
1781 | |||
1782 | config0 = read_c0_config(); | ||
1783 | |||
1784 | /* clear all three cache coherency fields */ | ||
1785 | config0 &= ~(0x7 | (7 << 25) | (7 << 28)); | ||
1786 | config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | | ||
1787 | ((_page_cachable_default >> _CACHE_SHIFT) << 25) | | ||
1788 | ((_page_cachable_default >> _CACHE_SHIFT) << 28)); | ||
1789 | write_c0_config(config0); | ||
1790 | NXP_BARRIER(); | ||
1791 | } | ||
1792 | |||
1793 | static int cca = -1; | ||
1794 | |||
1795 | static int __init cca_setup(char *str) | ||
1796 | { | ||
1797 | get_option(&str, &cca); | ||
1798 | |||
1799 | return 0; | ||
1800 | } | ||
1801 | |||
1802 | early_param("cca", cca_setup); | ||
1803 | |||
1804 | static void coherency_setup(void) | ||
1805 | { | ||
1806 | if (cca < 0 || cca > 7) | ||
1807 | cca = read_c0_config() & CONF_CM_CMASK; | ||
1808 | _page_cachable_default = cca << _CACHE_SHIFT; | ||
1809 | |||
1810 | pr_debug("Using cache attribute %d\n", cca); | ||
1811 | change_c0_config(CONF_CM_CMASK, cca); | ||
1812 | |||
1813 | /* | ||
1814 | * c0_status.cu=0 specifies that updates by the sc instruction use | ||
1815 | * the coherency mode specified by the TLB; 1 means cachable | ||
1816 | * coherent update on write will be used. Not all processors have | ||
1817 | * this bit and; some wire it to zero, others like Toshiba had the | ||
1818 | * silly idea of putting something else there ... | ||
1819 | */ | ||
1820 | switch (current_cpu_type()) { | ||
1821 | case CPU_R4000PC: | ||
1822 | case CPU_R4000SC: | ||
1823 | case CPU_R4000MC: | ||
1824 | case CPU_R4400PC: | ||
1825 | case CPU_R4400SC: | ||
1826 | case CPU_R4400MC: | ||
1827 | clear_c0_config(CONF_CU); | ||
1828 | break; | ||
1829 | /* | ||
1830 | * We need to catch the early Alchemy SOCs with | ||
1831 | * the write-only co_config.od bit and set it back to one on: | ||
1832 | * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB | ||
1833 | */ | ||
1834 | case CPU_ALCHEMY: | ||
1835 | au1x00_fixup_config_od(); | ||
1836 | break; | ||
1837 | |||
1838 | case PRID_IMP_PR4450: | ||
1839 | nxp_pr4450_fixup_config(); | ||
1840 | break; | ||
1841 | } | ||
1842 | } | ||
1843 | |||
1844 | static void r4k_cache_error_setup(void) | ||
1845 | { | ||
1846 | extern char __weak except_vec2_generic; | ||
1847 | extern char __weak except_vec2_sb1; | ||
1848 | |||
1849 | switch (current_cpu_type()) { | ||
1850 | case CPU_SB1: | ||
1851 | case CPU_SB1A: | ||
1852 | set_uncached_handler(0x100, &except_vec2_sb1, 0x80); | ||
1853 | break; | ||
1854 | |||
1855 | default: | ||
1856 | set_uncached_handler(0x100, &except_vec2_generic, 0x80); | ||
1857 | break; | ||
1858 | } | ||
1859 | } | ||
1860 | |||
1861 | void r4k_cache_init(void) | ||
1862 | { | ||
1863 | extern void build_clear_page(void); | ||
1864 | extern void build_copy_page(void); | ||
1865 | struct cpuinfo_mips *c = ¤t_cpu_data; | ||
1866 | |||
1867 | probe_pcache(); | ||
1868 | probe_vcache(); | ||
1869 | setup_scache(); | ||
1870 | |||
1871 | r4k_blast_dcache_page_setup(); | ||
1872 | r4k_blast_dcache_page_indexed_setup(); | ||
1873 | r4k_blast_dcache_setup(); | ||
1874 | r4k_blast_icache_page_setup(); | ||
1875 | r4k_blast_icache_page_indexed_setup(); | ||
1876 | r4k_blast_icache_setup(); | ||
1877 | r4k_blast_scache_page_setup(); | ||
1878 | r4k_blast_scache_page_indexed_setup(); | ||
1879 | r4k_blast_scache_setup(); | ||
1880 | r4k_blast_scache_node_setup(); | ||
1881 | #ifdef CONFIG_EVA | ||
1882 | r4k_blast_dcache_user_page_setup(); | ||
1883 | r4k_blast_icache_user_page_setup(); | ||
1884 | #endif | ||
1885 | |||
1886 | /* | ||
1887 | * Some MIPS32 and MIPS64 processors have physically indexed caches. | ||
1888 | * This code supports virtually indexed processors and will be | ||
1889 | * unnecessarily inefficient on physically indexed processors. | ||
1890 | */ | ||
1891 | if (c->dcache.linesz && cpu_has_dc_aliases) | ||
1892 | shm_align_mask = max_t( unsigned long, | ||
1893 | c->dcache.sets * c->dcache.linesz - 1, | ||
1894 | PAGE_SIZE - 1); | ||
1895 | else | ||
1896 | shm_align_mask = PAGE_SIZE-1; | ||
1897 | |||
1898 | __flush_cache_vmap = r4k__flush_cache_vmap; | ||
1899 | __flush_cache_vunmap = r4k__flush_cache_vunmap; | ||
1900 | |||
1901 | flush_cache_all = cache_noop; | ||
1902 | __flush_cache_all = r4k___flush_cache_all; | ||
1903 | flush_cache_mm = r4k_flush_cache_mm; | ||
1904 | flush_cache_page = r4k_flush_cache_page; | ||
1905 | flush_cache_range = r4k_flush_cache_range; | ||
1906 | |||
1907 | __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; | ||
1908 | |||
1909 | flush_icache_all = r4k_flush_icache_all; | ||
1910 | local_flush_data_cache_page = local_r4k_flush_data_cache_page; | ||
1911 | flush_data_cache_page = r4k_flush_data_cache_page; | ||
1912 | flush_icache_range = r4k_flush_icache_range; | ||
1913 | local_flush_icache_range = local_r4k_flush_icache_range; | ||
1914 | __flush_icache_user_range = r4k_flush_icache_user_range; | ||
1915 | __local_flush_icache_user_range = local_r4k_flush_icache_user_range; | ||
1916 | |||
1917 | #ifdef CONFIG_DMA_NONCOHERENT | ||
1918 | #ifdef CONFIG_DMA_MAYBE_COHERENT | ||
1919 | if (coherentio == IO_COHERENCE_ENABLED || | ||
1920 | (coherentio == IO_COHERENCE_DEFAULT && hw_coherentio)) { | ||
1921 | _dma_cache_wback_inv = (void *)cache_noop; | ||
1922 | _dma_cache_wback = (void *)cache_noop; | ||
1923 | _dma_cache_inv = (void *)cache_noop; | ||
1924 | } else | ||
1925 | #endif /* CONFIG_DMA_MAYBE_COHERENT */ | ||
1926 | { | ||
1927 | _dma_cache_wback_inv = r4k_dma_cache_wback_inv; | ||
1928 | _dma_cache_wback = r4k_dma_cache_wback_inv; | ||
1929 | _dma_cache_inv = r4k_dma_cache_inv; | ||
1930 | } | ||
1931 | #endif /* CONFIG_DMA_NONCOHERENT */ | ||
1932 | |||
1933 | build_clear_page(); | ||
1934 | build_copy_page(); | ||
1935 | |||
1936 | /* | ||
1937 | * We want to run CMP kernels on core with and without coherent | ||
1938 | * caches. Therefore, do not use CONFIG_MIPS_CMP to decide whether | ||
1939 | * or not to flush caches. | ||
1940 | */ | ||
1941 | local_r4k___flush_cache_all(NULL); | ||
1942 | |||
1943 | coherency_setup(); | ||
1944 | board_cache_error_setup = r4k_cache_error_setup; | ||
1945 | |||
1946 | /* | ||
1947 | * Per-CPU overrides | ||
1948 | */ | ||
1949 | switch (current_cpu_type()) { | ||
1950 | case CPU_BMIPS4350: | ||
1951 | case CPU_BMIPS4380: | ||
1952 | /* No IPI is needed because all CPUs share the same D$ */ | ||
1953 | flush_data_cache_page = r4k_blast_dcache_page; | ||
1954 | break; | ||
1955 | case CPU_BMIPS5000: | ||
1956 | /* We lose our superpowers if L2 is disabled */ | ||
1957 | if (c->scache.flags & MIPS_CACHE_NOT_PRESENT) | ||
1958 | break; | ||
1959 | |||
1960 | /* I$ fills from D$ just by emptying the write buffers */ | ||
1961 | flush_cache_page = (void *)b5k_instruction_hazard; | ||
1962 | flush_cache_range = (void *)b5k_instruction_hazard; | ||
1963 | local_flush_data_cache_page = (void *)b5k_instruction_hazard; | ||
1964 | flush_data_cache_page = (void *)b5k_instruction_hazard; | ||
1965 | flush_icache_range = (void *)b5k_instruction_hazard; | ||
1966 | local_flush_icache_range = (void *)b5k_instruction_hazard; | ||
1967 | |||
1968 | |||
1969 | /* Optimization: an L2 flush implicitly flushes the L1 */ | ||
1970 | current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES; | ||
1971 | break; | ||
1972 | case CPU_LOONGSON64: | ||
1973 | /* Loongson-3 maintains cache coherency by hardware */ | ||
1974 | __flush_cache_all = cache_noop; | ||
1975 | __flush_cache_vmap = cache_noop; | ||
1976 | __flush_cache_vunmap = cache_noop; | ||
1977 | __flush_kernel_vmap_range = (void *)cache_noop; | ||
1978 | flush_cache_mm = (void *)cache_noop; | ||
1979 | flush_cache_page = (void *)cache_noop; | ||
1980 | flush_cache_range = (void *)cache_noop; | ||
1981 | flush_icache_all = (void *)cache_noop; | ||
1982 | flush_data_cache_page = (void *)cache_noop; | ||
1983 | local_flush_data_cache_page = (void *)cache_noop; | ||
1984 | break; | ||
1985 | } | ||
1986 | } | ||
1987 | |||
1988 | static int r4k_cache_pm_notifier(struct notifier_block *self, unsigned long cmd, | ||
1989 | void *v) | ||
1990 | { | ||
1991 | switch (cmd) { | ||
1992 | case CPU_PM_ENTER_FAILED: | ||
1993 | case CPU_PM_EXIT: | ||
1994 | coherency_setup(); | ||
1995 | break; | ||
1996 | } | ||
1997 | |||
1998 | return NOTIFY_OK; | ||
1999 | } | ||
2000 | |||
2001 | static struct notifier_block r4k_cache_pm_notifier_block = { | ||
2002 | .notifier_call = r4k_cache_pm_notifier, | ||
2003 | }; | ||
2004 | |||
2005 | int __init r4k_cache_init_pm(void) | ||
2006 | { | ||
2007 | return cpu_pm_register_notifier(&r4k_cache_pm_notifier_block); | ||
2008 | } | ||
2009 | arch_initcall(r4k_cache_init_pm); | ||