@@ -162,7 +162,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
162162 loc = (long )* reloc + phys_offset ;
163163 if (loc < min_addr || loc > max_addr )
164164 error ("64-bit relocation outside of kernel!\n" );
165- * (u64 * )loc += offset - __START_KERNEL ;
165+ * (u64 * )loc += offset ;
166166 }
167167}
168168
@@ -177,7 +177,7 @@ static void kaslr_adjust_got(unsigned long offset)
177177 */
178178 for (entry = (u64 * )vmlinux .got_start ; entry < (u64 * )vmlinux .got_end ; entry ++ ) {
179179 if (* entry )
180- * entry += offset - __START_KERNEL ;
180+ * entry += offset ;
181181 }
182182}
183183
@@ -252,7 +252,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
252252 vmemmap_size = SECTION_ALIGN_UP (pages ) * sizeof (struct page );
253253
254254 /* choose kernel address space layout: 4 or 3 levels. */
255- BUILD_BUG_ON (!IS_ALIGNED (__START_KERNEL , THREAD_SIZE ));
255+ BUILD_BUG_ON (!IS_ALIGNED (TEXT_OFFSET , THREAD_SIZE ));
256256 BUILD_BUG_ON (!IS_ALIGNED (__NO_KASLR_START_KERNEL , THREAD_SIZE ));
257257 BUILD_BUG_ON (__NO_KASLR_END_KERNEL > _REGION1_SIZE );
258258 vsize = get_vmem_size (ident_map_size , vmemmap_size , vmalloc_size , _REGION3_SIZE );
@@ -341,7 +341,8 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
341341 BUILD_BUG_ON (MAX_DCSS_ADDR > (1UL << MAX_PHYSMEM_BITS ));
342342 max_mappable = max (ident_map_size , MAX_DCSS_ADDR );
343343 max_mappable = min (max_mappable , vmemmap_start );
344- __identity_base = round_down (vmemmap_start - max_mappable , rte_size );
344+ if (IS_ENABLED (CONFIG_RANDOMIZE_IDENTITY_BASE ))
345+ __identity_base = round_down (vmemmap_start - max_mappable , rte_size );
345346
346347 return asce_limit ;
347348}
@@ -388,31 +389,25 @@ static void kaslr_adjust_vmlinux_info(long offset)
388389#endif
389390}
390391
391- static void fixup_vmlinux_info (void )
392- {
393- vmlinux .entry -= __START_KERNEL ;
394- kaslr_adjust_vmlinux_info (- __START_KERNEL );
395- }
396-
397392void startup_kernel (void )
398393{
399- unsigned long kernel_size = vmlinux .image_size + vmlinux .bss_size ;
400- unsigned long nokaslr_offset_phys , kaslr_large_page_offset ;
401- unsigned long amode31_lma = 0 ;
394+ unsigned long vmlinux_size = vmlinux .image_size + vmlinux .bss_size ;
395+ unsigned long nokaslr_text_lma , text_lma = 0 , amode31_lma = 0 ;
396+ unsigned long kernel_size = TEXT_OFFSET + vmlinux_size ;
397+ unsigned long kaslr_large_page_offset ;
402398 unsigned long max_physmem_end ;
403399 unsigned long asce_limit ;
404400 unsigned long safe_addr ;
405401 psw_t psw ;
406402
407- fixup_vmlinux_info ();
408403 setup_lpp ();
409404
410405 /*
411406 * Non-randomized kernel physical start address must be _SEGMENT_SIZE
412407 * aligned (see blow).
413408 */
414- nokaslr_offset_phys = ALIGN (mem_safe_offset (), _SEGMENT_SIZE );
415- safe_addr = PAGE_ALIGN (nokaslr_offset_phys + kernel_size );
409+ nokaslr_text_lma = ALIGN (mem_safe_offset (), _SEGMENT_SIZE );
410+ safe_addr = PAGE_ALIGN (nokaslr_text_lma + vmlinux_size );
416411
417412 /*
418413 * Reserve decompressor memory together with decompression heap,
@@ -456,16 +451,27 @@ void startup_kernel(void)
456451 */
457452 kaslr_large_page_offset = __kaslr_offset & ~_SEGMENT_MASK ;
458453 if (kaslr_enabled ()) {
459- unsigned long end = ident_map_size - kaslr_large_page_offset ;
454+ unsigned long size = vmlinux_size + kaslr_large_page_offset ;
460455
461- __kaslr_offset_phys = randomize_within_range (kernel_size , _SEGMENT_SIZE , 0 , end );
456+ text_lma = randomize_within_range (size , _SEGMENT_SIZE , TEXT_OFFSET , ident_map_size );
462457 }
463- if (!__kaslr_offset_phys )
464- __kaslr_offset_phys = nokaslr_offset_phys ;
465- __kaslr_offset_phys |= kaslr_large_page_offset ;
458+ if (!text_lma )
459+ text_lma = nokaslr_text_lma ;
460+ text_lma |= kaslr_large_page_offset ;
461+
462+ /*
463+ * [__kaslr_offset_phys..__kaslr_offset_phys + TEXT_OFFSET] region is
464+ * never accessed via the kernel image mapping as per the linker script:
465+ *
466+ * . = TEXT_OFFSET;
467+ *
468+ * Therefore, this region could be used for something else and does
469+ * not need to be reserved. See how it is skipped in setup_vmem().
470+ */
471+ __kaslr_offset_phys = text_lma - TEXT_OFFSET ;
466472 kaslr_adjust_vmlinux_info (__kaslr_offset_phys );
467- physmem_reserve (RR_VMLINUX , __kaslr_offset_phys , kernel_size );
468- deploy_kernel ((void * )__kaslr_offset_phys );
473+ physmem_reserve (RR_VMLINUX , text_lma , vmlinux_size );
474+ deploy_kernel ((void * )text_lma );
469475
470476 /* vmlinux decompression is done, shrink reserved low memory */
471477 physmem_reserve (RR_DECOMPRESSOR , 0 , (unsigned long )_decompressor_end );
@@ -488,7 +494,7 @@ void startup_kernel(void)
488494 amode31_lma = randomize_within_range (vmlinux .amode31_size , PAGE_SIZE , amode31_min , SZ_2G );
489495 }
490496 if (!amode31_lma )
491- amode31_lma = __kaslr_offset_phys - vmlinux .amode31_size ;
497+ amode31_lma = text_lma - vmlinux .amode31_size ;
492498 physmem_reserve (RR_AMODE31 , amode31_lma , vmlinux .amode31_size );
493499
494500 /*
@@ -504,8 +510,8 @@ void startup_kernel(void)
504510 * - copy_bootdata() must follow setup_vmem() to propagate changes
505511 * to bootdata made by setup_vmem()
506512 */
507- clear_bss_section (__kaslr_offset_phys );
508- kaslr_adjust_relocs (__kaslr_offset_phys , __kaslr_offset_phys + vmlinux .image_size ,
513+ clear_bss_section (text_lma );
514+ kaslr_adjust_relocs (text_lma , text_lma + vmlinux .image_size ,
509515 __kaslr_offset , __kaslr_offset_phys );
510516 kaslr_adjust_got (__kaslr_offset );
511517 setup_vmem (__kaslr_offset , __kaslr_offset + kernel_size , asce_limit );
0 commit comments