Skip to content

Commit 3ae2f2b

Browse files
kvaneeshgregkh
authored andcommitted
powerpc/vmemmap: Fix memory leak with vmemmap list allocation failures.
[ Upstream commit ccaea15 ] If we fail to allocate vmemmap list, we don't keep track of allocated vmemmap block buf. Hence on section deactivate we skip vmemmap block buf free. This results in memory leak. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au> Link: https://lore.kernel.org/r/20200731113500.248306-1-aneesh.kumar@linux.ibm.com Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 7120a84 commit 3ae2f2b

1 file changed

Lines changed: 28 additions & 7 deletions

File tree

arch/powerpc/mm/init_64.c

Lines changed: 28 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -162,23 +162,24 @@ static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node)
162162
return next++;
163163
}
164164

165-
static __meminit void vmemmap_list_populate(unsigned long phys,
166-
unsigned long start,
167-
int node)
165+
static __meminit int vmemmap_list_populate(unsigned long phys,
166+
unsigned long start,
167+
int node)
168168
{
169169
struct vmemmap_backing *vmem_back;
170170

171171
vmem_back = vmemmap_list_alloc(node);
172172
if (unlikely(!vmem_back)) {
173-
WARN_ON(1);
174-
return;
173+
pr_debug("vmemap list allocation failed\n");
174+
return -ENOMEM;
175175
}
176176

177177
vmem_back->phys = phys;
178178
vmem_back->virt_addr = start;
179179
vmem_back->list = vmemmap_list;
180180

181181
vmemmap_list = vmem_back;
182+
return 0;
182183
}
183184

184185
static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long start,
@@ -199,6 +200,7 @@ static bool altmap_cross_boundary(struct vmem_altmap *altmap, unsigned long star
199200
int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
200201
struct vmem_altmap *altmap)
201202
{
203+
bool altmap_alloc;
202204
unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift;
203205

204206
/* Align to the page size of the linear mapping. */
@@ -228,13 +230,32 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
228230
p = vmemmap_alloc_block_buf(page_size, node, altmap);
229231
if (!p)
230232
pr_debug("altmap block allocation failed, falling back to system memory");
233+
else
234+
altmap_alloc = true;
231235
}
232-
if (!p)
236+
if (!p) {
233237
p = vmemmap_alloc_block_buf(page_size, node, NULL);
238+
altmap_alloc = false;
239+
}
234240
if (!p)
235241
return -ENOMEM;
236242

237-
vmemmap_list_populate(__pa(p), start, node);
243+
if (vmemmap_list_populate(__pa(p), start, node)) {
244+
/*
245+
* If we don't populate vmemap list, we don't have
246+
* the ability to free the allocated vmemmap
247+
* pages in section_deactivate. Hence free them
248+
* here.
249+
*/
250+
int nr_pfns = page_size >> PAGE_SHIFT;
251+
unsigned long page_order = get_order(page_size);
252+
253+
if (altmap_alloc)
254+
vmem_altmap_free(altmap, nr_pfns);
255+
else
256+
free_pages((unsigned long)p, page_order);
257+
return -ENOMEM;
258+
}
238259

239260
pr_debug(" * %016lx..%016lx allocated at %p\n",
240261
start, start + page_size, p);

0 commit comments

Comments
 (0)