@@ -109,9 +109,19 @@ static void setup_lpp(void)
109109}
110110
111111#ifdef CONFIG_KERNEL_UNCOMPRESSED
112- unsigned long mem_safe_offset (void )
112+ static unsigned long mem_safe_offset (void )
113113{
114- return vmlinux .default_lma + vmlinux .image_size + vmlinux .bss_size ;
114+ return (unsigned long )_compressed_start ;
115+ }
116+
117+ static void deploy_kernel (void * output )
118+ {
119+ void * uncompressed_start = (void * )_compressed_start ;
120+
121+ if (output == uncompressed_start )
122+ return ;
123+ memmove (output , uncompressed_start , vmlinux .image_size );
124+ memset (uncompressed_start , 0 , vmlinux .image_size );
115125}
116126#endif
117127
@@ -154,18 +164,18 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
154164 rela_end = (Elf64_Rela * ) vmlinux .rela_dyn_end ;
155165 dynsym = (Elf64_Sym * ) vmlinux .dynsym_start ;
156166 for (rela = rela_start ; rela < rela_end ; rela ++ ) {
157- loc = rela -> r_offset + phys_offset ;
167+ loc = rela -> r_offset + phys_offset - __START_KERNEL ;
158168 val = rela -> r_addend ;
159169 r_sym = ELF64_R_SYM (rela -> r_info );
160170 if (r_sym ) {
161171 if (dynsym [r_sym ].st_shndx != SHN_UNDEF )
162- val += dynsym [r_sym ].st_value + offset ;
172+ val += dynsym [r_sym ].st_value + offset - __START_KERNEL ;
163173 } else {
164174 /*
165- * 0 == undefined symbol table index (STN_UNDEF ),
175+ * 0 == undefined symbol table index (SHN_UNDEF ),
166176 * used for R_390_RELATIVE, only add KASLR offset
167177 */
168- val += offset ;
178+ val += offset - __START_KERNEL ;
169179 }
170180 r_type = ELF64_R_TYPE (rela -> r_info );
171181 rc = arch_kexec_do_relocs (r_type , (void * ) loc , val , 0 );
@@ -206,7 +216,7 @@ static void kaslr_adjust_relocs(unsigned long min_addr, unsigned long max_addr,
206216 loc = (long )* reloc + phys_offset ;
207217 if (loc < min_addr || loc > max_addr )
208218 error ("64-bit relocation outside of kernel!\n" );
209- * (u64 * )loc += offset ;
219+ * (u64 * )loc += offset - __START_KERNEL ;
210220 }
211221}
212222
@@ -219,7 +229,7 @@ static void kaslr_adjust_got(unsigned long offset)
219229 * reason. Adjust the GOT entries.
220230 */
221231 for (entry = (u64 * )vmlinux .got_start ; entry < (u64 * )vmlinux .got_end ; entry ++ )
222- * entry += offset ;
232+ * entry += offset - __START_KERNEL ;
223233}
224234#endif
225235
@@ -294,6 +304,7 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
294304 vmemmap_size = SECTION_ALIGN_UP (pages ) * sizeof (struct page );
295305
296306 /* choose kernel address space layout: 4 or 3 levels. */
307+ BUILD_BUG_ON (!IS_ALIGNED (__START_KERNEL , THREAD_SIZE ));
297308 BUILD_BUG_ON (!IS_ALIGNED (__NO_KASLR_START_KERNEL , THREAD_SIZE ));
298309 BUILD_BUG_ON (__NO_KASLR_END_KERNEL > _REGION1_SIZE );
299310 vsize = get_vmem_size (ident_map_size , vmemmap_size , vmalloc_size , _REGION3_SIZE );
@@ -383,9 +394,9 @@ static unsigned long setup_kernel_memory_layout(unsigned long kernel_size)
383394/*
384395 * This function clears the BSS section of the decompressed Linux kernel and NOT the decompressor's.
385396 */
386- static void clear_bss_section (unsigned long vmlinux_lma )
397+ static void clear_bss_section (unsigned long kernel_start )
387398{
388- memset ((void * )vmlinux_lma + vmlinux .image_size , 0 , vmlinux .bss_size );
399+ memset ((void * )kernel_start + vmlinux .image_size , 0 , vmlinux .bss_size );
389400}
390401
391402/*
@@ -402,7 +413,7 @@ static void setup_vmalloc_size(void)
402413 vmalloc_size = max (size , vmalloc_size );
403414}
404415
405- static void kaslr_adjust_vmlinux_info (unsigned long offset )
416+ static void kaslr_adjust_vmlinux_info (long offset )
406417{
407418 vmlinux .bootdata_off += offset ;
408419 vmlinux .bootdata_preserved_off += offset ;
@@ -426,24 +437,30 @@ static void kaslr_adjust_vmlinux_info(unsigned long offset)
426437#endif
427438}
428439
440+ static void fixup_vmlinux_info (void )
441+ {
442+ vmlinux .entry -= __START_KERNEL ;
443+ kaslr_adjust_vmlinux_info (- __START_KERNEL );
444+ }
445+
429446void startup_kernel (void )
430447{
431- unsigned long max_physmem_end ;
432- unsigned long vmlinux_lma = 0 ;
448+ unsigned long kernel_size = vmlinux . image_size + vmlinux . bss_size ;
449+ unsigned long nokaslr_offset_phys = mem_safe_offset () ;
433450 unsigned long amode31_lma = 0 ;
434- unsigned long kernel_size ;
451+ unsigned long max_physmem_end ;
435452 unsigned long asce_limit ;
436453 unsigned long safe_addr ;
437- void * img ;
438454 psw_t psw ;
439455
456+ fixup_vmlinux_info ();
440457 setup_lpp ();
441- safe_addr = mem_safe_offset ( );
458+ safe_addr = PAGE_ALIGN ( nokaslr_offset_phys + kernel_size );
442459
443460 /*
444- * Reserve decompressor memory together with decompression heap, buffer and
445- * memory which might be occupied by uncompressed kernel at default 1Mb
446- * position (if KASLR is off or failed).
461+ * Reserve decompressor memory together with decompression heap,
462+ * buffer and memory which might be occupied by uncompressed kernel
463+ * (if KASLR is off or failed).
447464 */
448465 physmem_reserve (RR_DECOMPRESSOR , 0 , safe_addr );
449466 if (IS_ENABLED (CONFIG_BLK_DEV_INITRD ) && parmarea .initrd_size )
@@ -463,7 +480,6 @@ void startup_kernel(void)
463480 max_physmem_end = detect_max_physmem_end ();
464481 setup_ident_map_size (max_physmem_end );
465482 setup_vmalloc_size ();
466- kernel_size = vmlinux .default_lma + vmlinux .image_size + vmlinux .bss_size ;
467483 asce_limit = setup_kernel_memory_layout (kernel_size );
468484 /* got final ident_map_size, physmem allocations could be performed now */
469485 physmem_set_usable_limit (ident_map_size );
@@ -472,32 +488,20 @@ void startup_kernel(void)
472488 rescue_initrd (safe_addr , ident_map_size );
473489 rescue_relocs ();
474490
475- if (kaslr_enabled ()) {
476- vmlinux_lma = randomize_within_range (vmlinux .image_size + vmlinux .bss_size ,
477- THREAD_SIZE , vmlinux .default_lma ,
478- ident_map_size );
479- if (vmlinux_lma ) {
480- __kaslr_offset_phys = vmlinux_lma - vmlinux .default_lma ;
481- kaslr_adjust_vmlinux_info (__kaslr_offset_phys );
482- }
483- }
484- vmlinux_lma = vmlinux_lma ?: vmlinux .default_lma ;
485- physmem_reserve (RR_VMLINUX , vmlinux_lma , vmlinux .image_size + vmlinux .bss_size );
486-
487- if (!IS_ENABLED (CONFIG_KERNEL_UNCOMPRESSED )) {
488- img = decompress_kernel ();
489- memmove ((void * )vmlinux_lma , img , vmlinux .image_size );
490- } else if (__kaslr_offset_phys ) {
491- img = (void * )vmlinux .default_lma ;
492- memmove ((void * )vmlinux_lma , img , vmlinux .image_size );
493- memset (img , 0 , vmlinux .image_size );
494- }
491+ if (kaslr_enabled ())
492+ __kaslr_offset_phys = randomize_within_range (kernel_size , THREAD_SIZE , 0 , ident_map_size );
493+ if (!__kaslr_offset_phys )
494+ __kaslr_offset_phys = nokaslr_offset_phys ;
495+ kaslr_adjust_vmlinux_info (__kaslr_offset_phys );
496+ physmem_reserve (RR_VMLINUX , __kaslr_offset_phys , kernel_size );
497+ deploy_kernel ((void * )__kaslr_offset_phys );
495498
496499 /* vmlinux decompression is done, shrink reserved low memory */
497500 physmem_reserve (RR_DECOMPRESSOR , 0 , (unsigned long )_decompressor_end );
498501 if (kaslr_enabled ())
499502 amode31_lma = randomize_within_range (vmlinux .amode31_size , PAGE_SIZE , 0 , SZ_2G );
500- amode31_lma = amode31_lma ?: vmlinux .default_lma - vmlinux .amode31_size ;
503+ if (!amode31_lma )
504+ amode31_lma = __kaslr_offset_phys - vmlinux .amode31_size ;
501505 physmem_reserve (RR_AMODE31 , amode31_lma , vmlinux .amode31_size );
502506
503507 /*
@@ -513,8 +517,8 @@ void startup_kernel(void)
513517 * - copy_bootdata() must follow setup_vmem() to propagate changes
514518 * to bootdata made by setup_vmem()
515519 */
516- clear_bss_section (vmlinux_lma );
517- kaslr_adjust_relocs (vmlinux_lma , vmlinux_lma + vmlinux .image_size ,
520+ clear_bss_section (__kaslr_offset_phys );
521+ kaslr_adjust_relocs (__kaslr_offset_phys , __kaslr_offset_phys + vmlinux .image_size ,
518522 __kaslr_offset , __kaslr_offset_phys );
519523 kaslr_adjust_got (__kaslr_offset );
520524 free_relocs ();
0 commit comments