Skip to content

Commit ad9d55d

Browse files
nivedita76gregkh
authored andcommitted
x86/kaslr: Initialize mem_limit to the real maximum address
[ Upstream commit 4512869 ] On 64-bit, the kernel must be placed below MAXMEM (64TiB with 4-level paging or 4PiB with 5-level paging). This is currently not enforced by KASLR, which thus implicitly relies on physical memory being limited to less than 64TiB. On 32-bit, the limit is KERNEL_IMAGE_SIZE (512MiB). This is enforced by special checks in __process_mem_region(). Initialize mem_limit to the maximum (depending on architecture), instead of ULLONG_MAX, and make sure the command-line arguments can only decrease it. This makes the enforcement explicit on 64-bit, and eliminates the 32-bit specific checks to keep the kernel below 512M. Check upfront to make sure the minimum address is below the limit before doing any work. Signed-off-by: Arvind Sankar <nivedita@alum.mit.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org> Acked-by: Kees Cook <keescook@chromium.org> Link: https://lore.kernel.org/r/20200727230801.3468620-5-nivedita@alum.mit.edu Signed-off-by: Sasha Levin <sashal@kernel.org>
1 parent 3fc85e7 commit ad9d55d

1 file changed

Lines changed: 22 additions & 19 deletions

File tree

arch/x86/boot/compressed/kaslr.c

Lines changed: 22 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -87,8 +87,11 @@ static unsigned long get_boot_seed(void)
8787
static bool memmap_too_large;
8888

8989

90-
/* Store memory limit specified by "mem=nn[KMG]" or "memmap=nn[KMG]" */
91-
static unsigned long long mem_limit = ULLONG_MAX;
90+
/*
91+
* Store memory limit: MAXMEM on 64-bit and KERNEL_IMAGE_SIZE on 32-bit.
92+
* It may be reduced by "mem=nn[KMG]" or "memmap=nn[KMG]" command line options.
93+
*/
94+
static unsigned long long mem_limit;
9295

9396
/* Number of immovable memory regions */
9497
static int num_immovable_mem;
@@ -214,7 +217,7 @@ static void mem_avoid_memmap(enum parse_mode mode, char *str)
214217

215218
if (start == 0) {
216219
/* Store the specified memory limit if size > 0 */
217-
if (size > 0)
220+
if (size > 0 && size < mem_limit)
218221
mem_limit = size;
219222

220223
continue;
@@ -302,7 +305,8 @@ static void handle_mem_options(void)
302305
if (mem_size == 0)
303306
goto out;
304307

305-
mem_limit = mem_size;
308+
if (mem_size < mem_limit)
309+
mem_limit = mem_size;
306310
} else if (!strcmp(param, "efi_fake_mem")) {
307311
mem_avoid_memmap(PARSE_EFI, val);
308312
}
@@ -314,7 +318,9 @@ static void handle_mem_options(void)
314318
}
315319

316320
/*
317-
* In theory, KASLR can put the kernel anywhere in the range of [16M, 64T).
321+
* In theory, KASLR can put the kernel anywhere in the range of [16M, MAXMEM)
322+
* on 64-bit, and [16M, KERNEL_IMAGE_SIZE) on 32-bit.
323+
*
318324
* The mem_avoid array is used to store the ranges that need to be avoided
319325
* when KASLR searches for an appropriate random address. We must avoid any
320326
* regions that are unsafe to overlap with during decompression, and other
@@ -614,10 +620,6 @@ static void __process_mem_region(struct mem_vector *entry,
614620
unsigned long start_orig, end;
615621
struct mem_vector cur_entry;
616622

617-
/* On 32-bit, ignore entries entirely above our maximum. */
618-
if (IS_ENABLED(CONFIG_X86_32) && entry->start >= KERNEL_IMAGE_SIZE)
619-
return;
620-
621623
/* Ignore entries entirely below our minimum. */
622624
if (entry->start + entry->size < minimum)
623625
return;
@@ -650,11 +652,6 @@ static void __process_mem_region(struct mem_vector *entry,
650652
/* Reduce size by any delta from the original address. */
651653
region.size -= region.start - start_orig;
652654

653-
/* On 32-bit, reduce region size to fit within max size. */
654-
if (IS_ENABLED(CONFIG_X86_32) &&
655-
region.start + region.size > KERNEL_IMAGE_SIZE)
656-
region.size = KERNEL_IMAGE_SIZE - region.start;
657-
658655
/* Return if region can't contain decompressed kernel */
659656
if (region.size < image_size)
660657
return;
@@ -839,15 +836,16 @@ static void process_e820_entries(unsigned long minimum,
839836
static unsigned long find_random_phys_addr(unsigned long minimum,
840837
unsigned long image_size)
841838
{
839+
/* Bail out early if it's impossible to succeed. */
840+
if (minimum + image_size > mem_limit)
841+
return 0;
842+
842843
/* Check if we had too many memmaps. */
843844
if (memmap_too_large) {
844845
debug_putstr("Aborted memory entries scan (more than 4 memmap= args)!\n");
845846
return 0;
846847
}
847848

848-
/* Make sure minimum is aligned. */
849-
minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
850-
851849
if (process_efi_entries(minimum, image_size))
852850
return slots_fetch_random();
853851

@@ -860,8 +858,6 @@ static unsigned long find_random_virt_addr(unsigned long minimum,
860858
{
861859
unsigned long slots, random_addr;
862860

863-
/* Make sure minimum is aligned. */
864-
minimum = ALIGN(minimum, CONFIG_PHYSICAL_ALIGN);
865861
/* Align image_size for easy slot calculations. */
866862
image_size = ALIGN(image_size, CONFIG_PHYSICAL_ALIGN);
867863

@@ -908,6 +904,11 @@ void choose_random_location(unsigned long input,
908904
/* Prepare to add new identity pagetables on demand. */
909905
initialize_identity_maps();
910906

907+
if (IS_ENABLED(CONFIG_X86_32))
908+
mem_limit = KERNEL_IMAGE_SIZE;
909+
else
910+
mem_limit = MAXMEM;
911+
911912
/* Record the various known unsafe memory ranges. */
912913
mem_avoid_init(input, input_size, *output);
913914

@@ -917,6 +918,8 @@ void choose_random_location(unsigned long input,
917918
* location:
918919
*/
919920
min_addr = min(*output, 512UL << 20);
921+
/* Make sure minimum is aligned. */
922+
min_addr = ALIGN(min_addr, CONFIG_PHYSICAL_ALIGN);
920923

921924
/* Walk available memory entries to find a random address. */
922925
random_addr = find_random_phys_addr(min_addr, output_size);

0 commit comments

Comments
 (0)