diff options
author | 2025-03-08 22:04:20 +0800 | |
---|---|---|
committer | 2025-03-08 22:04:20 +0800 | |
commit | a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a (patch) | |
tree | 84f21bd0bf7071bc5fc7dd989e77d7ceb5476682 /mm/sparse.c | |
download | ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.tar.gz ohosKernel-a07bb8fd1299070229f0e8f3dcb57ffd5ef9870a.zip |
Initial commit: OpenHarmony-v4.0-ReleaseOpenHarmony-v4.0-Release
Diffstat (limited to 'mm/sparse.c')
-rw-r--r-- | mm/sparse.c | 974 |
1 files changed, 974 insertions, 0 deletions
diff --git a/mm/sparse.c b/mm/sparse.c new file mode 100644 index 000000000..33406ea2e --- /dev/null +++ b/mm/sparse.c | |||
@@ -0,0 +1,974 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * sparse memory mappings. | ||
4 | */ | ||
5 | #include <linux/mm.h> | ||
6 | #include <linux/slab.h> | ||
7 | #include <linux/mmzone.h> | ||
8 | #include <linux/memblock.h> | ||
9 | #include <linux/compiler.h> | ||
10 | #include <linux/highmem.h> | ||
11 | #include <linux/export.h> | ||
12 | #include <linux/spinlock.h> | ||
13 | #include <linux/vmalloc.h> | ||
14 | #include <linux/swap.h> | ||
15 | #include <linux/swapops.h> | ||
16 | |||
17 | #include "internal.h" | ||
18 | #include <asm/dma.h> | ||
19 | |||
20 | /* | ||
21 | * Permanent SPARSEMEM data: | ||
22 | * | ||
23 | * 1) mem_section - memory sections, mem_map's for valid memory | ||
24 | */ | ||
25 | #ifdef CONFIG_SPARSEMEM_EXTREME | ||
26 | struct mem_section **mem_section; | ||
27 | #else | ||
28 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | ||
29 | ____cacheline_internodealigned_in_smp; | ||
30 | #endif | ||
31 | EXPORT_SYMBOL(mem_section); | ||
32 | |||
33 | #ifdef NODE_NOT_IN_PAGE_FLAGS | ||
34 | /* | ||
35 | * If we did not store the node number in the page then we have to | ||
36 | * do a lookup in the section_to_node_table in order to find which | ||
37 | * node the page belongs to. | ||
38 | */ | ||
39 | #if MAX_NUMNODES <= 256 | ||
40 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | ||
41 | #else | ||
42 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | ||
43 | #endif | ||
44 | |||
45 | int page_to_nid(const struct page *page) | ||
46 | { | ||
47 | return section_to_node_table[page_to_section(page)]; | ||
48 | } | ||
49 | EXPORT_SYMBOL(page_to_nid); | ||
50 | |||
51 | static void set_section_nid(unsigned long section_nr, int nid) | ||
52 | { | ||
53 | section_to_node_table[section_nr] = nid; | ||
54 | } | ||
55 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | ||
56 | static inline void set_section_nid(unsigned long section_nr, int nid) | ||
57 | { | ||
58 | } | ||
59 | #endif | ||
60 | |||
61 | #ifdef CONFIG_SPARSEMEM_EXTREME | ||
62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) | ||
63 | { | ||
64 | struct mem_section *section = NULL; | ||
65 | unsigned long array_size = SECTIONS_PER_ROOT * | ||
66 | sizeof(struct mem_section); | ||
67 | |||
68 | if (slab_is_available()) { | ||
69 | section = kzalloc_node(array_size, GFP_KERNEL, nid); | ||
70 | } else { | ||
71 | section = memblock_alloc_node(array_size, SMP_CACHE_BYTES, | ||
72 | nid); | ||
73 | if (!section) | ||
74 | panic("%s: Failed to allocate %lu bytes nid=%d\n", | ||
75 | __func__, array_size, nid); | ||
76 | } | ||
77 | |||
78 | return section; | ||
79 | } | ||
80 | |||
81 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) | ||
82 | { | ||
83 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | ||
84 | struct mem_section *section; | ||
85 | |||
86 | /* | ||
87 | * An existing section is possible in the sub-section hotplug | ||
88 | * case. First hot-add instantiates, follow-on hot-add reuses | ||
89 | * the existing section. | ||
90 | * | ||
91 | * The mem_hotplug_lock resolves the apparent race below. | ||
92 | */ | ||
93 | if (mem_section[root]) | ||
94 | return 0; | ||
95 | |||
96 | section = sparse_index_alloc(nid); | ||
97 | if (!section) | ||
98 | return -ENOMEM; | ||
99 | |||
100 | mem_section[root] = section; | ||
101 | |||
102 | return 0; | ||
103 | } | ||
104 | #else /* !SPARSEMEM_EXTREME */ | ||
105 | static inline int sparse_index_init(unsigned long section_nr, int nid) | ||
106 | { | ||
107 | return 0; | ||
108 | } | ||
109 | #endif | ||
110 | |||
111 | #ifdef CONFIG_SPARSEMEM_EXTREME | ||
112 | unsigned long __section_nr(struct mem_section *ms) | ||
113 | { | ||
114 | unsigned long root_nr; | ||
115 | struct mem_section *root = NULL; | ||
116 | |||
117 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { | ||
118 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | ||
119 | if (!root) | ||
120 | continue; | ||
121 | |||
122 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | ||
123 | break; | ||
124 | } | ||
125 | |||
126 | VM_BUG_ON(!root); | ||
127 | |||
128 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | ||
129 | } | ||
130 | #else | ||
131 | unsigned long __section_nr(struct mem_section *ms) | ||
132 | { | ||
133 | return (unsigned long)(ms - mem_section[0]); | ||
134 | } | ||
135 | #endif | ||
136 | |||
137 | /* | ||
138 | * During early boot, before section_mem_map is used for an actual | ||
139 | * mem_map, we use section_mem_map to store the section's NUMA | ||
140 | * node. This keeps us from having to use another data structure. The | ||
141 | * node information is cleared just before we store the real mem_map. | ||
142 | */ | ||
143 | static inline unsigned long sparse_encode_early_nid(int nid) | ||
144 | { | ||
145 | return (nid << SECTION_NID_SHIFT); | ||
146 | } | ||
147 | |||
148 | static inline int sparse_early_nid(struct mem_section *section) | ||
149 | { | ||
150 | return (section->section_mem_map >> SECTION_NID_SHIFT); | ||
151 | } | ||
152 | |||
153 | /* Validate the physical addressing limitations of the model */ | ||
154 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | ||
155 | unsigned long *end_pfn) | ||
156 | { | ||
157 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); | ||
158 | |||
159 | /* | ||
160 | * Sanity checks - do not allow an architecture to pass | ||
161 | * in larger pfns than the maximum scope of sparsemem: | ||
162 | */ | ||
163 | if (*start_pfn > max_sparsemem_pfn) { | ||
164 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | ||
165 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | ||
166 | *start_pfn, *end_pfn, max_sparsemem_pfn); | ||
167 | WARN_ON_ONCE(1); | ||
168 | *start_pfn = max_sparsemem_pfn; | ||
169 | *end_pfn = max_sparsemem_pfn; | ||
170 | } else if (*end_pfn > max_sparsemem_pfn) { | ||
171 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | ||
172 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | ||
173 | *start_pfn, *end_pfn, max_sparsemem_pfn); | ||
174 | WARN_ON_ONCE(1); | ||
175 | *end_pfn = max_sparsemem_pfn; | ||
176 | } | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * There are a number of times that we loop over NR_MEM_SECTIONS, | ||
181 | * looking for section_present() on each. But, when we have very | ||
182 | * large physical address spaces, NR_MEM_SECTIONS can also be | ||
183 | * very large which makes the loops quite long. | ||
184 | * | ||
185 | * Keeping track of this gives us an easy way to break out of | ||
186 | * those loops early. | ||
187 | */ | ||
188 | unsigned long __highest_present_section_nr; | ||
189 | static void section_mark_present(struct mem_section *ms) | ||
190 | { | ||
191 | unsigned long section_nr = __section_nr(ms); | ||
192 | |||
193 | if (section_nr > __highest_present_section_nr) | ||
194 | __highest_present_section_nr = section_nr; | ||
195 | |||
196 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | ||
197 | } | ||
198 | |||
199 | #define for_each_present_section_nr(start, section_nr) \ | ||
200 | for (section_nr = next_present_section_nr(start-1); \ | ||
201 | ((section_nr != -1) && \ | ||
202 | (section_nr <= __highest_present_section_nr)); \ | ||
203 | section_nr = next_present_section_nr(section_nr)) | ||
204 | |||
205 | static inline unsigned long first_present_section_nr(void) | ||
206 | { | ||
207 | return next_present_section_nr(-1); | ||
208 | } | ||
209 | |||
210 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
211 | static void subsection_mask_set(unsigned long *map, unsigned long pfn, | ||
212 | unsigned long nr_pages) | ||
213 | { | ||
214 | int idx = subsection_map_index(pfn); | ||
215 | int end = subsection_map_index(pfn + nr_pages - 1); | ||
216 | |||
217 | bitmap_set(map, idx, end - idx + 1); | ||
218 | } | ||
219 | |||
220 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) | ||
221 | { | ||
222 | int end_sec = pfn_to_section_nr(pfn + nr_pages - 1); | ||
223 | unsigned long nr, start_sec = pfn_to_section_nr(pfn); | ||
224 | |||
225 | if (!nr_pages) | ||
226 | return; | ||
227 | |||
228 | for (nr = start_sec; nr <= end_sec; nr++) { | ||
229 | struct mem_section *ms; | ||
230 | unsigned long pfns; | ||
231 | |||
232 | pfns = min(nr_pages, PAGES_PER_SECTION | ||
233 | - (pfn & ~PAGE_SECTION_MASK)); | ||
234 | ms = __nr_to_section(nr); | ||
235 | subsection_mask_set(ms->usage->subsection_map, pfn, pfns); | ||
236 | |||
237 | pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr, | ||
238 | pfns, subsection_map_index(pfn), | ||
239 | subsection_map_index(pfn + pfns - 1)); | ||
240 | |||
241 | pfn += pfns; | ||
242 | nr_pages -= pfns; | ||
243 | } | ||
244 | } | ||
245 | #else | ||
246 | void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages) | ||
247 | { | ||
248 | } | ||
249 | #endif | ||
250 | |||
251 | /* Record a memory area against a node. */ | ||
252 | static void __init memory_present(int nid, unsigned long start, unsigned long end) | ||
253 | { | ||
254 | unsigned long pfn; | ||
255 | |||
256 | #ifdef CONFIG_SPARSEMEM_EXTREME | ||
257 | if (unlikely(!mem_section)) { | ||
258 | unsigned long size, align; | ||
259 | |||
260 | size = sizeof(struct mem_section*) * NR_SECTION_ROOTS; | ||
261 | align = 1 << (INTERNODE_CACHE_SHIFT); | ||
262 | mem_section = memblock_alloc(size, align); | ||
263 | if (!mem_section) | ||
264 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", | ||
265 | __func__, size, align); | ||
266 | } | ||
267 | #endif | ||
268 | |||
269 | start &= PAGE_SECTION_MASK; | ||
270 | mminit_validate_memmodel_limits(&start, &end); | ||
271 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { | ||
272 | unsigned long section = pfn_to_section_nr(pfn); | ||
273 | struct mem_section *ms; | ||
274 | |||
275 | sparse_index_init(section, nid); | ||
276 | set_section_nid(section, nid); | ||
277 | |||
278 | ms = __nr_to_section(section); | ||
279 | if (!ms->section_mem_map) { | ||
280 | ms->section_mem_map = sparse_encode_early_nid(nid) | | ||
281 | SECTION_IS_ONLINE; | ||
282 | section_mark_present(ms); | ||
283 | } | ||
284 | } | ||
285 | } | ||
286 | |||
287 | /* | ||
288 | * Mark all memblocks as present using memory_present(). | ||
289 | * This is a convenience function that is useful to mark all of the systems | ||
290 | * memory as present during initialization. | ||
291 | */ | ||
292 | static void __init memblocks_present(void) | ||
293 | { | ||
294 | unsigned long start, end; | ||
295 | int i, nid; | ||
296 | |||
297 | for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid) | ||
298 | memory_present(nid, start, end); | ||
299 | } | ||
300 | |||
301 | /* | ||
302 | * Subtle, we encode the real pfn into the mem_map such that | ||
303 | * the identity pfn - section_mem_map will return the actual | ||
304 | * physical page frame number. | ||
305 | */ | ||
306 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | ||
307 | { | ||
308 | unsigned long coded_mem_map = | ||
309 | (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | ||
310 | BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT)); | ||
311 | BUG_ON(coded_mem_map & ~SECTION_MAP_MASK); | ||
312 | return coded_mem_map; | ||
313 | } | ||
314 | |||
315 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
316 | /* | ||
317 | * Decode mem_map from the coded memmap | ||
318 | */ | ||
319 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) | ||
320 | { | ||
321 | /* mask off the extra low bits of information */ | ||
322 | coded_mem_map &= SECTION_MAP_MASK; | ||
323 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); | ||
324 | } | ||
325 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||
326 | |||
327 | static void __meminit sparse_init_one_section(struct mem_section *ms, | ||
328 | unsigned long pnum, struct page *mem_map, | ||
329 | struct mem_section_usage *usage, unsigned long flags) | ||
330 | { | ||
331 | ms->section_mem_map &= ~SECTION_MAP_MASK; | ||
332 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | ||
333 | | SECTION_HAS_MEM_MAP | flags; | ||
334 | ms->usage = usage; | ||
335 | } | ||
336 | |||
337 | static unsigned long usemap_size(void) | ||
338 | { | ||
339 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); | ||
340 | } | ||
341 | |||
342 | size_t mem_section_usage_size(void) | ||
343 | { | ||
344 | return sizeof(struct mem_section_usage) + usemap_size(); | ||
345 | } | ||
346 | |||
347 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
348 | static struct mem_section_usage * __init | ||
349 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, | ||
350 | unsigned long size) | ||
351 | { | ||
352 | struct mem_section_usage *usage; | ||
353 | unsigned long goal, limit; | ||
354 | int nid; | ||
355 | /* | ||
356 | * A page may contain usemaps for other sections preventing the | ||
357 | * page being freed and making a section unremovable while | ||
358 | * other sections referencing the usemap remain active. Similarly, | ||
359 | * a pgdat can prevent a section being removed. If section A | ||
360 | * contains a pgdat and section B contains the usemap, both | ||
361 | * sections become inter-dependent. This allocates usemaps | ||
362 | * from the same section as the pgdat where possible to avoid | ||
363 | * this problem. | ||
364 | */ | ||
365 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); | ||
366 | limit = goal + (1UL << PA_SECTION_SHIFT); | ||
367 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); | ||
368 | again: | ||
369 | usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid); | ||
370 | if (!usage && limit) { | ||
371 | limit = 0; | ||
372 | goto again; | ||
373 | } | ||
374 | return usage; | ||
375 | } | ||
376 | |||
377 | static void __init check_usemap_section_nr(int nid, | ||
378 | struct mem_section_usage *usage) | ||
379 | { | ||
380 | unsigned long usemap_snr, pgdat_snr; | ||
381 | static unsigned long old_usemap_snr; | ||
382 | static unsigned long old_pgdat_snr; | ||
383 | struct pglist_data *pgdat = NODE_DATA(nid); | ||
384 | int usemap_nid; | ||
385 | |||
386 | /* First call */ | ||
387 | if (!old_usemap_snr) { | ||
388 | old_usemap_snr = NR_MEM_SECTIONS; | ||
389 | old_pgdat_snr = NR_MEM_SECTIONS; | ||
390 | } | ||
391 | |||
392 | usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT); | ||
393 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | ||
394 | if (usemap_snr == pgdat_snr) | ||
395 | return; | ||
396 | |||
397 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | ||
398 | /* skip redundant message */ | ||
399 | return; | ||
400 | |||
401 | old_usemap_snr = usemap_snr; | ||
402 | old_pgdat_snr = pgdat_snr; | ||
403 | |||
404 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | ||
405 | if (usemap_nid != nid) { | ||
406 | pr_info("node %d must be removed before remove section %ld\n", | ||
407 | nid, usemap_snr); | ||
408 | return; | ||
409 | } | ||
410 | /* | ||
411 | * There is a circular dependency. | ||
412 | * Some platforms allow un-removable section because they will just | ||
413 | * gather other removable sections for dynamic partitioning. | ||
414 | * Just notify un-removable section's number here. | ||
415 | */ | ||
416 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", | ||
417 | usemap_snr, pgdat_snr, nid); | ||
418 | } | ||
419 | #else | ||
420 | static struct mem_section_usage * __init | ||
421 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, | ||
422 | unsigned long size) | ||
423 | { | ||
424 | return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id); | ||
425 | } | ||
426 | |||
427 | static void __init check_usemap_section_nr(int nid, | ||
428 | struct mem_section_usage *usage) | ||
429 | { | ||
430 | } | ||
431 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | ||
432 | |||
433 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
434 | static unsigned long __init section_map_size(void) | ||
435 | { | ||
436 | return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE); | ||
437 | } | ||
438 | |||
439 | #else | ||
440 | static unsigned long __init section_map_size(void) | ||
441 | { | ||
442 | return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); | ||
443 | } | ||
444 | |||
445 | struct page __init *__populate_section_memmap(unsigned long pfn, | ||
446 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) | ||
447 | { | ||
448 | unsigned long size = section_map_size(); | ||
449 | struct page *map = sparse_buffer_alloc(size); | ||
450 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); | ||
451 | |||
452 | if (map) | ||
453 | return map; | ||
454 | |||
455 | map = memblock_alloc_try_nid_raw(size, size, addr, | ||
456 | MEMBLOCK_ALLOC_ACCESSIBLE, nid); | ||
457 | if (!map) | ||
458 | panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n", | ||
459 | __func__, size, PAGE_SIZE, nid, &addr); | ||
460 | |||
461 | return map; | ||
462 | } | ||
463 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | ||
464 | |||
465 | static void *sparsemap_buf __meminitdata; | ||
466 | static void *sparsemap_buf_end __meminitdata; | ||
467 | |||
468 | static inline void __meminit sparse_buffer_free(unsigned long size) | ||
469 | { | ||
470 | WARN_ON(!sparsemap_buf || size == 0); | ||
471 | memblock_free_early(__pa(sparsemap_buf), size); | ||
472 | } | ||
473 | |||
474 | static void __init sparse_buffer_init(unsigned long size, int nid) | ||
475 | { | ||
476 | phys_addr_t addr = __pa(MAX_DMA_ADDRESS); | ||
477 | WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */ | ||
478 | /* | ||
479 | * Pre-allocated buffer is mainly used by __populate_section_memmap | ||
480 | * and we want it to be properly aligned to the section size - this is | ||
481 | * especially the case for VMEMMAP which maps memmap to PMDs | ||
482 | */ | ||
483 | sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(), | ||
484 | addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid); | ||
485 | sparsemap_buf_end = sparsemap_buf + size; | ||
486 | } | ||
487 | |||
488 | static void __init sparse_buffer_fini(void) | ||
489 | { | ||
490 | unsigned long size = sparsemap_buf_end - sparsemap_buf; | ||
491 | |||
492 | if (sparsemap_buf && size > 0) | ||
493 | sparse_buffer_free(size); | ||
494 | sparsemap_buf = NULL; | ||
495 | } | ||
496 | |||
497 | void * __meminit sparse_buffer_alloc(unsigned long size) | ||
498 | { | ||
499 | void *ptr = NULL; | ||
500 | |||
501 | if (sparsemap_buf) { | ||
502 | ptr = (void *) roundup((unsigned long)sparsemap_buf, size); | ||
503 | if (ptr + size > sparsemap_buf_end) | ||
504 | ptr = NULL; | ||
505 | else { | ||
506 | /* Free redundant aligned space */ | ||
507 | if ((unsigned long)(ptr - sparsemap_buf) > 0) | ||
508 | sparse_buffer_free((unsigned long)(ptr - sparsemap_buf)); | ||
509 | sparsemap_buf = ptr + size; | ||
510 | } | ||
511 | } | ||
512 | return ptr; | ||
513 | } | ||
514 | |||
515 | void __weak __meminit vmemmap_populate_print_last(void) | ||
516 | { | ||
517 | } | ||
518 | |||
519 | /* | ||
520 | * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end) | ||
521 | * And number of present sections in this node is map_count. | ||
522 | */ | ||
523 | static void __init sparse_init_nid(int nid, unsigned long pnum_begin, | ||
524 | unsigned long pnum_end, | ||
525 | unsigned long map_count) | ||
526 | { | ||
527 | struct mem_section_usage *usage; | ||
528 | unsigned long pnum; | ||
529 | struct page *map; | ||
530 | |||
531 | usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid), | ||
532 | mem_section_usage_size() * map_count); | ||
533 | if (!usage) { | ||
534 | pr_err("%s: node[%d] usemap allocation failed", __func__, nid); | ||
535 | goto failed; | ||
536 | } | ||
537 | sparse_buffer_init(map_count * section_map_size(), nid); | ||
538 | for_each_present_section_nr(pnum_begin, pnum) { | ||
539 | unsigned long pfn = section_nr_to_pfn(pnum); | ||
540 | |||
541 | if (pnum >= pnum_end) | ||
542 | break; | ||
543 | |||
544 | map = __populate_section_memmap(pfn, PAGES_PER_SECTION, | ||
545 | nid, NULL); | ||
546 | if (!map) { | ||
547 | pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.", | ||
548 | __func__, nid); | ||
549 | pnum_begin = pnum; | ||
550 | sparse_buffer_fini(); | ||
551 | goto failed; | ||
552 | } | ||
553 | check_usemap_section_nr(nid, usage); | ||
554 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage, | ||
555 | SECTION_IS_EARLY); | ||
556 | usage = (void *) usage + mem_section_usage_size(); | ||
557 | } | ||
558 | sparse_buffer_fini(); | ||
559 | return; | ||
560 | failed: | ||
561 | /* We failed to allocate, mark all the following pnums as not present */ | ||
562 | for_each_present_section_nr(pnum_begin, pnum) { | ||
563 | struct mem_section *ms; | ||
564 | |||
565 | if (pnum >= pnum_end) | ||
566 | break; | ||
567 | ms = __nr_to_section(pnum); | ||
568 | ms->section_mem_map = 0; | ||
569 | } | ||
570 | } | ||
571 | |||
572 | /* | ||
573 | * Allocate the accumulated non-linear sections, allocate a mem_map | ||
574 | * for each and record the physical to section mapping. | ||
575 | */ | ||
576 | void __init sparse_init(void) | ||
577 | { | ||
578 | unsigned long pnum_end, pnum_begin, map_count = 1; | ||
579 | int nid_begin; | ||
580 | |||
581 | memblocks_present(); | ||
582 | |||
583 | pnum_begin = first_present_section_nr(); | ||
584 | nid_begin = sparse_early_nid(__nr_to_section(pnum_begin)); | ||
585 | |||
586 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ | ||
587 | set_pageblock_order(); | ||
588 | |||
589 | for_each_present_section_nr(pnum_begin + 1, pnum_end) { | ||
590 | int nid = sparse_early_nid(__nr_to_section(pnum_end)); | ||
591 | |||
592 | if (nid == nid_begin) { | ||
593 | map_count++; | ||
594 | continue; | ||
595 | } | ||
596 | /* Init node with sections in range [pnum_begin, pnum_end) */ | ||
597 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); | ||
598 | nid_begin = nid; | ||
599 | pnum_begin = pnum_end; | ||
600 | map_count = 1; | ||
601 | } | ||
602 | /* cover the last node */ | ||
603 | sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count); | ||
604 | vmemmap_populate_print_last(); | ||
605 | } | ||
606 | |||
607 | #ifdef CONFIG_MEMORY_HOTPLUG | ||
608 | |||
609 | /* Mark all memory sections within the pfn range as online */ | ||
610 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | ||
611 | { | ||
612 | unsigned long pfn; | ||
613 | |||
614 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | ||
615 | unsigned long section_nr = pfn_to_section_nr(pfn); | ||
616 | struct mem_section *ms; | ||
617 | |||
618 | /* onlining code should never touch invalid ranges */ | ||
619 | if (WARN_ON(!valid_section_nr(section_nr))) | ||
620 | continue; | ||
621 | |||
622 | ms = __nr_to_section(section_nr); | ||
623 | ms->section_mem_map |= SECTION_IS_ONLINE; | ||
624 | } | ||
625 | } | ||
626 | |||
627 | #ifdef CONFIG_MEMORY_HOTREMOVE | ||
628 | /* Mark all memory sections within the pfn range as offline */ | ||
629 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | ||
630 | { | ||
631 | unsigned long pfn; | ||
632 | |||
633 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | ||
634 | unsigned long section_nr = pfn_to_section_nr(pfn); | ||
635 | struct mem_section *ms; | ||
636 | |||
637 | /* | ||
638 | * TODO this needs some double checking. Offlining code makes | ||
639 | * sure to check pfn_valid but those checks might be just bogus | ||
640 | */ | ||
641 | if (WARN_ON(!valid_section_nr(section_nr))) | ||
642 | continue; | ||
643 | |||
644 | ms = __nr_to_section(section_nr); | ||
645 | ms->section_mem_map &= ~SECTION_IS_ONLINE; | ||
646 | } | ||
647 | } | ||
648 | #endif | ||
649 | |||
650 | #ifdef CONFIG_SPARSEMEM_VMEMMAP | ||
651 | static struct page * __meminit populate_section_memmap(unsigned long pfn, | ||
652 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) | ||
653 | { | ||
654 | return __populate_section_memmap(pfn, nr_pages, nid, altmap); | ||
655 | } | ||
656 | |||
657 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, | ||
658 | struct vmem_altmap *altmap) | ||
659 | { | ||
660 | unsigned long start = (unsigned long) pfn_to_page(pfn); | ||
661 | unsigned long end = start + nr_pages * sizeof(struct page); | ||
662 | |||
663 | vmemmap_free(start, end, altmap); | ||
664 | } | ||
665 | static void free_map_bootmem(struct page *memmap) | ||
666 | { | ||
667 | unsigned long start = (unsigned long)memmap; | ||
668 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); | ||
669 | |||
670 | vmemmap_free(start, end, NULL); | ||
671 | } | ||
672 | |||
673 | static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) | ||
674 | { | ||
675 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; | ||
676 | DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 }; | ||
677 | struct mem_section *ms = __pfn_to_section(pfn); | ||
678 | unsigned long *subsection_map = ms->usage | ||
679 | ? &ms->usage->subsection_map[0] : NULL; | ||
680 | |||
681 | subsection_mask_set(map, pfn, nr_pages); | ||
682 | if (subsection_map) | ||
683 | bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION); | ||
684 | |||
685 | if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION), | ||
686 | "section already deactivated (%#lx + %ld)\n", | ||
687 | pfn, nr_pages)) | ||
688 | return -EINVAL; | ||
689 | |||
690 | bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION); | ||
691 | return 0; | ||
692 | } | ||
693 | |||
694 | static bool is_subsection_map_empty(struct mem_section *ms) | ||
695 | { | ||
696 | return bitmap_empty(&ms->usage->subsection_map[0], | ||
697 | SUBSECTIONS_PER_SECTION); | ||
698 | } | ||
699 | |||
700 | static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) | ||
701 | { | ||
702 | struct mem_section *ms = __pfn_to_section(pfn); | ||
703 | DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 }; | ||
704 | unsigned long *subsection_map; | ||
705 | int rc = 0; | ||
706 | |||
707 | subsection_mask_set(map, pfn, nr_pages); | ||
708 | |||
709 | subsection_map = &ms->usage->subsection_map[0]; | ||
710 | |||
711 | if (bitmap_empty(map, SUBSECTIONS_PER_SECTION)) | ||
712 | rc = -EINVAL; | ||
713 | else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION)) | ||
714 | rc = -EEXIST; | ||
715 | else | ||
716 | bitmap_or(subsection_map, map, subsection_map, | ||
717 | SUBSECTIONS_PER_SECTION); | ||
718 | |||
719 | return rc; | ||
720 | } | ||
721 | #else | ||
722 | struct page * __meminit populate_section_memmap(unsigned long pfn, | ||
723 | unsigned long nr_pages, int nid, struct vmem_altmap *altmap) | ||
724 | { | ||
725 | return kvmalloc_node(array_size(sizeof(struct page), | ||
726 | PAGES_PER_SECTION), GFP_KERNEL, nid); | ||
727 | } | ||
728 | |||
729 | static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages, | ||
730 | struct vmem_altmap *altmap) | ||
731 | { | ||
732 | kvfree(pfn_to_page(pfn)); | ||
733 | } | ||
734 | |||
735 | static void free_map_bootmem(struct page *memmap) | ||
736 | { | ||
737 | unsigned long maps_section_nr, removing_section_nr, i; | ||
738 | unsigned long magic, nr_pages; | ||
739 | struct page *page = virt_to_page(memmap); | ||
740 | |||
741 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | ||
742 | >> PAGE_SHIFT; | ||
743 | |||
744 | for (i = 0; i < nr_pages; i++, page++) { | ||
745 | magic = (unsigned long) page->freelist; | ||
746 | |||
747 | BUG_ON(magic == NODE_INFO); | ||
748 | |||
749 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | ||
750 | removing_section_nr = page_private(page); | ||
751 | |||
752 | /* | ||
753 | * When this function is called, the removing section is | ||
754 | * logical offlined state. This means all pages are isolated | ||
755 | * from page allocator. If removing section's memmap is placed | ||
756 | * on the same section, it must not be freed. | ||
757 | * If it is freed, page allocator may allocate it which will | ||
758 | * be removed physically soon. | ||
759 | */ | ||
760 | if (maps_section_nr != removing_section_nr) | ||
761 | put_page_bootmem(page); | ||
762 | } | ||
763 | } | ||
764 | |||
765 | static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages) | ||
766 | { | ||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static bool is_subsection_map_empty(struct mem_section *ms) | ||
771 | { | ||
772 | return true; | ||
773 | } | ||
774 | |||
775 | static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages) | ||
776 | { | ||
777 | return 0; | ||
778 | } | ||
779 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ | ||
780 | |||
781 | /* | ||
782 | * To deactivate a memory region, there are 3 cases to handle across | ||
783 | * two configurations (SPARSEMEM_VMEMMAP={y,n}): | ||
784 | * | ||
785 | * 1. deactivation of a partial hot-added section (only possible in | ||
786 | * the SPARSEMEM_VMEMMAP=y case). | ||
787 | * a) section was present at memory init. | ||
788 | * b) section was hot-added post memory init. | ||
789 | * 2. deactivation of a complete hot-added section. | ||
790 | * 3. deactivation of a complete section from memory init. | ||
791 | * | ||
792 | * For 1, when subsection_map does not empty we will not be freeing the | ||
793 | * usage map, but still need to free the vmemmap range. | ||
794 | * | ||
795 | * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified | ||
796 | */ | ||
797 | static void section_deactivate(unsigned long pfn, unsigned long nr_pages, | ||
798 | struct vmem_altmap *altmap) | ||
799 | { | ||
800 | struct mem_section *ms = __pfn_to_section(pfn); | ||
801 | bool section_is_early = early_section(ms); | ||
802 | struct page *memmap = NULL; | ||
803 | bool empty; | ||
804 | |||
805 | if (clear_subsection_map(pfn, nr_pages)) | ||
806 | return; | ||
807 | |||
808 | empty = is_subsection_map_empty(ms); | ||
809 | if (empty) { | ||
810 | unsigned long section_nr = pfn_to_section_nr(pfn); | ||
811 | |||
812 | /* | ||
813 | * When removing an early section, the usage map is kept (as the | ||
814 | * usage maps of other sections fall into the same page). It | ||
815 | * will be re-used when re-adding the section - which is then no | ||
816 | * longer an early section. If the usage map is PageReserved, it | ||
817 | * was allocated during boot. | ||
818 | */ | ||
819 | if (!PageReserved(virt_to_page(ms->usage))) { | ||
820 | kfree(ms->usage); | ||
821 | ms->usage = NULL; | ||
822 | } | ||
823 | memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr); | ||
824 | /* | ||
825 | * Mark the section invalid so that valid_section() | ||
826 | * return false. This prevents code from dereferencing | ||
827 | * ms->usage array. | ||
828 | */ | ||
829 | ms->section_mem_map &= ~SECTION_HAS_MEM_MAP; | ||
830 | } | ||
831 | |||
832 | /* | ||
833 | * The memmap of early sections is always fully populated. See | ||
834 | * section_activate() and pfn_valid() . | ||
835 | */ | ||
836 | if (!section_is_early) | ||
837 | depopulate_section_memmap(pfn, nr_pages, altmap); | ||
838 | else if (memmap) | ||
839 | free_map_bootmem(memmap); | ||
840 | |||
841 | if (empty) | ||
842 | ms->section_mem_map = (unsigned long)NULL; | ||
843 | } | ||
844 | |||
845 | static struct page * __meminit section_activate(int nid, unsigned long pfn, | ||
846 | unsigned long nr_pages, struct vmem_altmap *altmap) | ||
847 | { | ||
848 | struct mem_section *ms = __pfn_to_section(pfn); | ||
849 | struct mem_section_usage *usage = NULL; | ||
850 | struct page *memmap; | ||
851 | int rc = 0; | ||
852 | |||
853 | if (!ms->usage) { | ||
854 | usage = kzalloc(mem_section_usage_size(), GFP_KERNEL); | ||
855 | if (!usage) | ||
856 | return ERR_PTR(-ENOMEM); | ||
857 | ms->usage = usage; | ||
858 | } | ||
859 | |||
860 | rc = fill_subsection_map(pfn, nr_pages); | ||
861 | if (rc) { | ||
862 | if (usage) | ||
863 | ms->usage = NULL; | ||
864 | kfree(usage); | ||
865 | return ERR_PTR(rc); | ||
866 | } | ||
867 | |||
868 | /* | ||
869 | * The early init code does not consider partially populated | ||
870 | * initial sections, it simply assumes that memory will never be | ||
871 | * referenced. If we hot-add memory into such a section then we | ||
872 | * do not need to populate the memmap and can simply reuse what | ||
873 | * is already there. | ||
874 | */ | ||
875 | if (nr_pages < PAGES_PER_SECTION && early_section(ms)) | ||
876 | return pfn_to_page(pfn); | ||
877 | |||
878 | memmap = populate_section_memmap(pfn, nr_pages, nid, altmap); | ||
879 | if (!memmap) { | ||
880 | section_deactivate(pfn, nr_pages, altmap); | ||
881 | return ERR_PTR(-ENOMEM); | ||
882 | } | ||
883 | |||
884 | return memmap; | ||
885 | } | ||
886 | |||
887 | /** | ||
888 | * sparse_add_section - add a memory section, or populate an existing one | ||
889 | * @nid: The node to add section on | ||
890 | * @start_pfn: start pfn of the memory range | ||
891 | * @nr_pages: number of pfns to add in the section | ||
892 | * @altmap: device page map | ||
893 | * | ||
894 | * This is only intended for hotplug. | ||
895 | * | ||
896 | * Note that only VMEMMAP supports sub-section aligned hotplug, | ||
897 | * the proper alignment and size are gated by check_pfn_span(). | ||
898 | * | ||
899 | * | ||
900 | * Return: | ||
901 | * * 0 - On success. | ||
902 | * * -EEXIST - Section has been present. | ||
903 | * * -ENOMEM - Out of memory. | ||
904 | */ | ||
905 | int __meminit sparse_add_section(int nid, unsigned long start_pfn, | ||
906 | unsigned long nr_pages, struct vmem_altmap *altmap) | ||
907 | { | ||
908 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | ||
909 | struct mem_section *ms; | ||
910 | struct page *memmap; | ||
911 | int ret; | ||
912 | |||
913 | ret = sparse_index_init(section_nr, nid); | ||
914 | if (ret < 0) | ||
915 | return ret; | ||
916 | |||
917 | memmap = section_activate(nid, start_pfn, nr_pages, altmap); | ||
918 | if (IS_ERR(memmap)) | ||
919 | return PTR_ERR(memmap); | ||
920 | |||
921 | /* | ||
922 | * Poison uninitialized struct pages in order to catch invalid flags | ||
923 | * combinations. | ||
924 | */ | ||
925 | page_init_poison(memmap, sizeof(struct page) * nr_pages); | ||
926 | |||
927 | ms = __nr_to_section(section_nr); | ||
928 | set_section_nid(section_nr, nid); | ||
929 | section_mark_present(ms); | ||
930 | |||
931 | /* Align memmap to section boundary in the subsection case */ | ||
932 | if (section_nr_to_pfn(section_nr) != start_pfn) | ||
933 | memmap = pfn_to_page(section_nr_to_pfn(section_nr)); | ||
934 | sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0); | ||
935 | |||
936 | return 0; | ||
937 | } | ||
938 | |||
939 | #ifdef CONFIG_MEMORY_FAILURE | ||
940 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | ||
941 | { | ||
942 | int i; | ||
943 | |||
944 | /* | ||
945 | * A further optimization is to have per section refcounted | ||
946 | * num_poisoned_pages. But that would need more space per memmap, so | ||
947 | * for now just do a quick global check to speed up this routine in the | ||
948 | * absence of bad pages. | ||
949 | */ | ||
950 | if (atomic_long_read(&num_poisoned_pages) == 0) | ||
951 | return; | ||
952 | |||
953 | for (i = 0; i < nr_pages; i++) { | ||
954 | if (PageHWPoison(&memmap[i])) { | ||
955 | num_poisoned_pages_dec(); | ||
956 | ClearPageHWPoison(&memmap[i]); | ||
957 | } | ||
958 | } | ||
959 | } | ||
960 | #else | ||
961 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | ||
962 | { | ||
963 | } | ||
964 | #endif | ||
965 | |||
966 | void sparse_remove_section(struct mem_section *ms, unsigned long pfn, | ||
967 | unsigned long nr_pages, unsigned long map_offset, | ||
968 | struct vmem_altmap *altmap) | ||
969 | { | ||
970 | clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset, | ||
971 | nr_pages - map_offset); | ||
972 | section_deactivate(pfn, nr_pages, altmap); | ||
973 | } | ||
974 | #endif /* CONFIG_MEMORY_HOTPLUG */ | ||