Skip to content

Commit aabef63

Browse files
riteshharjanigregkh
authored andcommitted
powerpc/fadump: Move fadump_cma_init to setup_arch() after initmem_init()
[ Upstream commit 05b94ca ] During early init CMA_MIN_ALIGNMENT_BYTES can be PAGE_SIZE, since pageblock_order is still zero and it gets initialized later during initmem_init() e.g. setup_arch() -> initmem_init() -> sparse_init() -> set_pageblock_order() One such use case where this causes issue is - early_setup() -> early_init_devtree() -> fadump_reserve_mem() -> fadump_cma_init() This causes CMA memory alignment check to be bypassed in cma_init_reserved_mem(). Then later cma_activate_area() can hit a VM_BUG_ON_PAGE(pfn & ((1 << order) - 1)) if the reserved memory area was not pageblock_order aligned. Fix it by moving the fadump_cma_init() after initmem_init(), where other such cma reservations also gets called. <stack trace> ============== page: refcount:0 mapcount:0 mapping:0000000000000000 index:0x0 pfn:0x10010 flags: 0x13ffff800000000(node=1|zone=0|lastcpupid=0x7ffff) CMA raw: 013ffff800000000 5deadbeef0000100 5deadbeef0000122 0000000000000000 raw: 0000000000000000 0000000000000000 00000000ffffffff 0000000000000000 page dumped because: VM_BUG_ON_PAGE(pfn & ((1 << order) - 1)) ------------[ cut here ]------------ kernel BUG at mm/page_alloc.c:778! Call Trace: __free_one_page+0x57c/0x7b0 (unreliable) free_pcppages_bulk+0x1a8/0x2c8 free_unref_page_commit+0x3d4/0x4e4 free_unref_page+0x458/0x6d0 init_cma_reserved_pageblock+0x114/0x198 cma_init_reserved_areas+0x270/0x3e0 do_one_initcall+0x80/0x2f8 kernel_init_freeable+0x33c/0x530 kernel_init+0x34/0x26c ret_from_kernel_user_thread+0x14/0x1c Fixes: 11ac3e8 ("mm: cma: use pageblock_order as the single alignment") Suggested-by: David Hildenbrand <david@redhat.com> Reported-by: Sachin P Bappalige <sachinpb@linux.ibm.com> Acked-by: Hari Bathini <hbathini@linux.ibm.com> Reviewed-by: Madhavan Srinivasan <maddy@linux.ibm.com> Signed-off-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://patch.msgid.link/3ae208e48c0d9cefe53d2dc4f593388067405b7d.1729146153.git.ritesh.list@gmail.com Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 6ffdb03 commit aabef63

3 files changed

Lines changed: 12 additions & 7 deletions

File tree

arch/powerpc/include/asm/fadump.h

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -32,4 +32,11 @@ extern int early_init_dt_scan_fw_dump(unsigned long node, const char *uname,
3232
int depth, void *data);
3333
extern int fadump_reserve_mem(void);
3434
#endif
35+
36+
#if defined(CONFIG_FA_DUMP) && defined(CONFIG_CMA)
37+
void fadump_cma_init(void);
38+
#else
39+
static inline void fadump_cma_init(void) { }
40+
#endif
41+
3542
#endif /* _ASM_POWERPC_FADUMP_H */

arch/powerpc/kernel/fadump.c

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -80,7 +80,7 @@ static struct cma *fadump_cma;
8080
* But for some reason even if it fails we still have the memory reservation
8181
* with us and we can still continue doing fadump.
8282
*/
83-
static void __init fadump_cma_init(void)
83+
void __init fadump_cma_init(void)
8484
{
8585
unsigned long long base, size;
8686
int rc;
@@ -124,8 +124,6 @@ static void __init fadump_cma_init(void)
124124
(unsigned long)cma_get_base(fadump_cma) >> 20,
125125
fw_dump.reserve_dump_area_size);
126126
}
127-
#else
128-
static void __init fadump_cma_init(void) { }
129127
#endif /* CONFIG_CMA */
130128

131129
/* Scan the Firmware Assisted dump configuration details. */
@@ -642,8 +640,6 @@ int __init fadump_reserve_mem(void)
642640

643641
pr_info("Reserved %lldMB of memory at %#016llx (System RAM: %lldMB)\n",
644642
(size >> 20), base, (memblock_phys_mem_size() >> 20));
645-
646-
fadump_cma_init();
647643
}
648644

649645
return ret;

arch/powerpc/kernel/setup-common.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -982,9 +982,11 @@ void __init setup_arch(char **cmdline_p)
982982
initmem_init();
983983

984984
/*
985-
* Reserve large chunks of memory for use by CMA for KVM and hugetlb. These must
986-
* be called after initmem_init(), so that pageblock_order is initialised.
985+
* Reserve large chunks of memory for use by CMA for fadump, KVM and
986+
* hugetlb. These must be called after initmem_init(), so that
987+
* pageblock_order is initialised.
987988
*/
989+
fadump_cma_init();
988990
kvm_cma_reserve();
989991
gigantic_hugetlb_cma_reserve();
990992

0 commit comments

Comments
 (0)