diff options
Diffstat (limited to 'arch/mips/mm/cache.c')
-rw-r--r-- | arch/mips/mm/cache.c | 242 |
1 files changed, 242 insertions, 0 deletions
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c new file mode 100644 index 000000000..3e81ba000 --- /dev/null +++ b/arch/mips/mm/cache.c | |||
@@ -0,0 +1,242 @@ | |||
1 | /* | ||
2 | * This file is subject to the terms and conditions of the GNU General Public | ||
3 | * License. See the file "COPYING" in the main directory of this archive | ||
4 | * for more details. | ||
5 | * | ||
6 | * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org) | ||
7 | * Copyright (C) 2007 MIPS Technologies, Inc. | ||
8 | */ | ||
9 | #include <linux/fs.h> | ||
10 | #include <linux/fcntl.h> | ||
11 | #include <linux/kernel.h> | ||
12 | #include <linux/linkage.h> | ||
13 | #include <linux/export.h> | ||
14 | #include <linux/sched.h> | ||
15 | #include <linux/syscalls.h> | ||
16 | #include <linux/mm.h> | ||
17 | #include <linux/highmem.h> | ||
18 | |||
19 | #include <asm/cacheflush.h> | ||
20 | #include <asm/processor.h> | ||
21 | #include <asm/cpu.h> | ||
22 | #include <asm/cpu-features.h> | ||
23 | #include <asm/setup.h> | ||
24 | |||
25 | /* Cache operations. */ | ||
26 | void (*flush_cache_all)(void); | ||
27 | void (*__flush_cache_all)(void); | ||
28 | EXPORT_SYMBOL_GPL(__flush_cache_all); | ||
29 | void (*flush_cache_mm)(struct mm_struct *mm); | ||
30 | void (*flush_cache_range)(struct vm_area_struct *vma, unsigned long start, | ||
31 | unsigned long end); | ||
32 | void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, | ||
33 | unsigned long pfn); | ||
34 | void (*flush_icache_range)(unsigned long start, unsigned long end); | ||
35 | EXPORT_SYMBOL_GPL(flush_icache_range); | ||
36 | void (*local_flush_icache_range)(unsigned long start, unsigned long end); | ||
37 | EXPORT_SYMBOL_GPL(local_flush_icache_range); | ||
38 | void (*__flush_icache_user_range)(unsigned long start, unsigned long end); | ||
39 | void (*__local_flush_icache_user_range)(unsigned long start, unsigned long end); | ||
40 | EXPORT_SYMBOL_GPL(__local_flush_icache_user_range); | ||
41 | |||
42 | void (*__flush_cache_vmap)(void); | ||
43 | void (*__flush_cache_vunmap)(void); | ||
44 | |||
45 | void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size); | ||
46 | EXPORT_SYMBOL_GPL(__flush_kernel_vmap_range); | ||
47 | |||
48 | /* MIPS specific cache operations */ | ||
49 | void (*local_flush_data_cache_page)(void * addr); | ||
50 | void (*flush_data_cache_page)(unsigned long addr); | ||
51 | void (*flush_icache_all)(void); | ||
52 | |||
53 | EXPORT_SYMBOL_GPL(local_flush_data_cache_page); | ||
54 | EXPORT_SYMBOL(flush_data_cache_page); | ||
55 | EXPORT_SYMBOL(flush_icache_all); | ||
56 | |||
57 | #ifdef CONFIG_DMA_NONCOHERENT | ||
58 | |||
59 | /* DMA cache operations. */ | ||
60 | void (*_dma_cache_wback_inv)(unsigned long start, unsigned long size); | ||
61 | void (*_dma_cache_wback)(unsigned long start, unsigned long size); | ||
62 | void (*_dma_cache_inv)(unsigned long start, unsigned long size); | ||
63 | |||
64 | #endif /* CONFIG_DMA_NONCOHERENT */ | ||
65 | |||
66 | /* | ||
67 | * We could optimize the case where the cache argument is not BCACHE but | ||
68 | * that seems very atypical use ... | ||
69 | */ | ||
70 | SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, bytes, | ||
71 | unsigned int, cache) | ||
72 | { | ||
73 | if (bytes == 0) | ||
74 | return 0; | ||
75 | if (!access_ok((void __user *) addr, bytes)) | ||
76 | return -EFAULT; | ||
77 | |||
78 | __flush_icache_user_range(addr, addr + bytes); | ||
79 | |||
80 | return 0; | ||
81 | } | ||
82 | |||
83 | void __flush_dcache_page(struct page *page) | ||
84 | { | ||
85 | struct address_space *mapping = page_mapping_file(page); | ||
86 | unsigned long addr; | ||
87 | |||
88 | if (mapping && !mapping_mapped(mapping)) { | ||
89 | SetPageDcacheDirty(page); | ||
90 | return; | ||
91 | } | ||
92 | |||
93 | /* | ||
94 | * We could delay the flush for the !page_mapping case too. But that | ||
95 | * case is for exec env/arg pages and those are %99 certainly going to | ||
96 | * get faulted into the tlb (and thus flushed) anyways. | ||
97 | */ | ||
98 | if (PageHighMem(page)) | ||
99 | addr = (unsigned long)kmap_atomic(page); | ||
100 | else | ||
101 | addr = (unsigned long)page_address(page); | ||
102 | |||
103 | flush_data_cache_page(addr); | ||
104 | |||
105 | if (PageHighMem(page)) | ||
106 | kunmap_atomic((void *)addr); | ||
107 | } | ||
108 | |||
109 | EXPORT_SYMBOL(__flush_dcache_page); | ||
110 | |||
111 | void __flush_anon_page(struct page *page, unsigned long vmaddr) | ||
112 | { | ||
113 | unsigned long addr = (unsigned long) page_address(page); | ||
114 | |||
115 | if (pages_do_alias(addr, vmaddr)) { | ||
116 | if (page_mapcount(page) && !Page_dcache_dirty(page)) { | ||
117 | void *kaddr; | ||
118 | |||
119 | kaddr = kmap_coherent(page, vmaddr); | ||
120 | flush_data_cache_page((unsigned long)kaddr); | ||
121 | kunmap_coherent(); | ||
122 | } else | ||
123 | flush_data_cache_page(addr); | ||
124 | } | ||
125 | } | ||
126 | |||
127 | EXPORT_SYMBOL(__flush_anon_page); | ||
128 | |||
129 | void __update_cache(unsigned long address, pte_t pte) | ||
130 | { | ||
131 | struct page *page; | ||
132 | unsigned long pfn, addr; | ||
133 | int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc; | ||
134 | |||
135 | pfn = pte_pfn(pte); | ||
136 | if (unlikely(!pfn_valid(pfn))) | ||
137 | return; | ||
138 | page = pfn_to_page(pfn); | ||
139 | if (Page_dcache_dirty(page)) { | ||
140 | if (PageHighMem(page)) | ||
141 | addr = (unsigned long)kmap_atomic(page); | ||
142 | else | ||
143 | addr = (unsigned long)page_address(page); | ||
144 | |||
145 | if (exec || pages_do_alias(addr, address & PAGE_MASK)) | ||
146 | flush_data_cache_page(addr); | ||
147 | |||
148 | if (PageHighMem(page)) | ||
149 | kunmap_atomic((void *)addr); | ||
150 | |||
151 | ClearPageDcacheDirty(page); | ||
152 | } | ||
153 | } | ||
154 | |||
155 | unsigned long _page_cachable_default; | ||
156 | EXPORT_SYMBOL(_page_cachable_default); | ||
157 | |||
158 | static inline void setup_protection_map(void) | ||
159 | { | ||
160 | if (cpu_has_rixi) { | ||
161 | protection_map[0] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); | ||
162 | protection_map[1] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); | ||
163 | protection_map[2] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); | ||
164 | protection_map[3] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); | ||
165 | protection_map[4] = __pgprot(_page_cachable_default | _PAGE_PRESENT); | ||
166 | protection_map[5] = __pgprot(_page_cachable_default | _PAGE_PRESENT); | ||
167 | protection_map[6] = __pgprot(_page_cachable_default | _PAGE_PRESENT); | ||
168 | protection_map[7] = __pgprot(_page_cachable_default | _PAGE_PRESENT); | ||
169 | |||
170 | protection_map[8] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_NO_READ); | ||
171 | protection_map[9] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC); | ||
172 | protection_map[10] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE | _PAGE_NO_READ); | ||
173 | protection_map[11] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_NO_EXEC | _PAGE_WRITE); | ||
174 | protection_map[12] = __pgprot(_page_cachable_default | _PAGE_PRESENT); | ||
175 | protection_map[13] = __pgprot(_page_cachable_default | _PAGE_PRESENT); | ||
176 | protection_map[14] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); | ||
177 | protection_map[15] = __pgprot(_page_cachable_default | _PAGE_PRESENT | _PAGE_WRITE); | ||
178 | |||
179 | } else { | ||
180 | protection_map[0] = PAGE_NONE; | ||
181 | protection_map[1] = PAGE_READONLY; | ||
182 | protection_map[2] = PAGE_COPY; | ||
183 | protection_map[3] = PAGE_COPY; | ||
184 | protection_map[4] = PAGE_READONLY; | ||
185 | protection_map[5] = PAGE_READONLY; | ||
186 | protection_map[6] = PAGE_COPY; | ||
187 | protection_map[7] = PAGE_COPY; | ||
188 | protection_map[8] = PAGE_NONE; | ||
189 | protection_map[9] = PAGE_READONLY; | ||
190 | protection_map[10] = PAGE_SHARED; | ||
191 | protection_map[11] = PAGE_SHARED; | ||
192 | protection_map[12] = PAGE_READONLY; | ||
193 | protection_map[13] = PAGE_READONLY; | ||
194 | protection_map[14] = PAGE_SHARED; | ||
195 | protection_map[15] = PAGE_SHARED; | ||
196 | } | ||
197 | } | ||
198 | |||
199 | void cpu_cache_init(void) | ||
200 | { | ||
201 | if (cpu_has_3k_cache) { | ||
202 | extern void __weak r3k_cache_init(void); | ||
203 | |||
204 | r3k_cache_init(); | ||
205 | } | ||
206 | if (cpu_has_6k_cache) { | ||
207 | extern void __weak r6k_cache_init(void); | ||
208 | |||
209 | r6k_cache_init(); | ||
210 | } | ||
211 | if (cpu_has_4k_cache) { | ||
212 | extern void __weak r4k_cache_init(void); | ||
213 | |||
214 | r4k_cache_init(); | ||
215 | } | ||
216 | if (cpu_has_8k_cache) { | ||
217 | extern void __weak r8k_cache_init(void); | ||
218 | |||
219 | r8k_cache_init(); | ||
220 | } | ||
221 | if (cpu_has_tx39_cache) { | ||
222 | extern void __weak tx39_cache_init(void); | ||
223 | |||
224 | tx39_cache_init(); | ||
225 | } | ||
226 | |||
227 | if (cpu_has_octeon_cache) { | ||
228 | extern void __weak octeon_cache_init(void); | ||
229 | |||
230 | octeon_cache_init(); | ||
231 | } | ||
232 | |||
233 | setup_protection_map(); | ||
234 | } | ||
235 | |||
236 | int __weak __uncached_access(struct file *file, unsigned long addr) | ||
237 | { | ||
238 | if (file->f_flags & O_DSYNC) | ||
239 | return 1; | ||
240 | |||
241 | return addr >= __pa(high_memory); | ||
242 | } | ||