-
Notifications
You must be signed in to change notification settings - Fork 393
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.
Already on GitHub? Sign in to your account
[2024] AArch64 support #1088
base: develop
Are you sure you want to change the base?
[2024] AArch64 support #1088
Changes from all commits
e5f4071
1fd3708
490b593
c9e7a3b
584d5f1
156eb51
f63a6f4
f54c2d1
1833dbd
fee620c
720aa60
001e473
8e56198
f46d2a6
8c0c6f6
19e65ea
254a3e7
bc1712f
a2145be
edee718
7b50a34
6d60a08
0e23aee
c10ab69
6a821e7
37512f9
5355126
eb872e6
f994d28
f9f5c0f
66e989b
7040645
65de635
7ad5bb1
bc47e76
8978b38
6cafcc5
dc559fc
25f94df
7aae9c8
680d0e4
ff0bfc3
b4ab6ee
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -4,20 +4,21 @@ | |
|
||
import logging | ||
import os | ||
from typing import Optional, Tuple, Type | ||
from typing import Optional, Tuple, Type, Union | ||
|
||
from volatility3.framework import constants, interfaces | ||
from volatility3.framework import constants, interfaces, exceptions | ||
from volatility3.framework.automagic import symbol_cache, symbol_finder | ||
from volatility3.framework.configuration import requirements | ||
from volatility3.framework.layers import intel, scanners | ||
from volatility3.framework.layers import intel, scanners, arm | ||
from volatility3.framework.symbols import linux | ||
|
||
vollog = logging.getLogger(__name__) | ||
|
||
|
||
class LinuxIntelStacker(interfaces.automagic.StackerLayerInterface): | ||
class LinuxStacker(interfaces.automagic.StackerLayerInterface): | ||
stack_order = 35 | ||
exclusion_list = ["mac", "windows"] | ||
join = interfaces.configuration.path_join | ||
|
||
@classmethod | ||
def stack( | ||
|
@@ -39,11 +40,10 @@ def stack( | |
|
||
# Bail out by default unless we can stack properly | ||
layer = context.layers[layer_name] | ||
join = interfaces.configuration.path_join | ||
|
||
# Never stack on top of an intel layer | ||
# Never stack on top of a linux layer | ||
# FIXME: Find a way to improve this check | ||
if isinstance(layer, intel.Intel): | ||
if isinstance(layer, intel.Intel) or isinstance(layer, arm.AArch64): | ||
return None | ||
|
||
identifiers_path = os.path.join( | ||
|
@@ -59,50 +59,214 @@ def stack( | |
) | ||
return None | ||
|
||
seen_banners = [] | ||
mss = scanners.MultiStringScanner([x for x in linux_banners if x is not None]) | ||
for _, banner in layer.scan( | ||
context=context, scanner=mss, progress_callback=progress_callback | ||
): | ||
dtb = None | ||
# No need to try stackers on the same banner more than once | ||
if banner in seen_banners: | ||
continue | ||
else: | ||
seen_banners.append(banner) | ||
|
||
vollog.debug(f"Identified banner: {repr(banner)}") | ||
|
||
isf_path = linux_banners.get(banner, None) | ||
if isf_path: | ||
table_name = context.symbol_space.free_table_name("LintelStacker") | ||
table_name = context.symbol_space.free_table_name("LinuxStacker") | ||
table = linux.LinuxKernelIntermedSymbols( | ||
context, | ||
"temporary." + table_name, | ||
name=table_name, | ||
isf_url=isf_path, | ||
) | ||
context.symbol_space.append(table) | ||
kaslr_shift, aslr_shift = cls.find_aslr( | ||
context, table_name, layer_name, progress_callback=progress_callback | ||
) | ||
new_layer_name = context.layers.free_layer_name("LinuxLayer") | ||
config_path = cls.join("LinuxHelper", new_layer_name) | ||
context.config[cls.join(config_path, "memory_layer")] = layer_name | ||
context.config[ | ||
cls.join(config_path, LinuxSymbolFinder.banner_config_key) | ||
] = str(banner, "latin-1") | ||
|
||
layer_class: Type = intel.Intel | ||
if "init_top_pgt" in table.symbols: | ||
layer_class = intel.Intel32e | ||
dtb_symbol_name = "init_top_pgt" | ||
elif "init_level4_pgt" in table.symbols: | ||
layer_class = intel.Intel32e | ||
dtb_symbol_name = "init_level4_pgt" | ||
else: | ||
dtb_symbol_name = "swapper_pg_dir" | ||
linux_arch_stackers = [cls.intel_stacker, cls.aarch64_stacker] | ||
for linux_arch_stacker in linux_arch_stackers: | ||
try: | ||
layer = linux_arch_stacker( | ||
context=context, | ||
layer_name=layer_name, | ||
table=table, | ||
table_name=table_name, | ||
config_path=config_path, | ||
new_layer_name=new_layer_name, | ||
banner=banner, | ||
progress_callback=progress_callback, | ||
) | ||
if layer: | ||
return layer | ||
except Exception as e: | ||
vollog.exception(e) | ||
|
||
dtb = cls.virtual_to_physical_address( | ||
table.get_symbol(dtb_symbol_name).address + kaslr_shift | ||
) | ||
vollog.debug("No suitable linux banner could be matched") | ||
return None | ||
|
||
# Build the new layer | ||
new_layer_name = context.layers.free_layer_name("IntelLayer") | ||
config_path = join("IntelHelper", new_layer_name) | ||
context.config[join(config_path, "memory_layer")] = layer_name | ||
context.config[join(config_path, "page_map_offset")] = dtb | ||
context.config[ | ||
join(config_path, LinuxSymbolFinder.banner_config_key) | ||
] = str(banner, "latin-1") | ||
@classmethod | ||
def intel_stacker( | ||
cls, | ||
context: interfaces.context.ContextInterface, | ||
layer_name: str, | ||
table: linux.LinuxKernelIntermedSymbols, | ||
table_name: str, | ||
config_path: str, | ||
new_layer_name: str, | ||
banner: str, | ||
progress_callback: constants.ProgressCallback = None, | ||
) -> Union[intel.Intel, intel.Intel32e, None]: | ||
|
||
layer_class: Type = intel.Intel | ||
if "init_top_pgt" in table.symbols: | ||
layer_class = intel.Intel32e | ||
dtb_symbol_name = "init_top_pgt" | ||
elif "init_level4_pgt" in table.symbols: | ||
layer_class = intel.Intel32e | ||
dtb_symbol_name = "init_level4_pgt" | ||
else: | ||
dtb_symbol_name = "swapper_pg_dir" | ||
|
||
kaslr_shift, aslr_shift = cls.find_aslr( | ||
context, | ||
table_name, | ||
layer_name, | ||
layer_class, | ||
progress_callback=progress_callback, | ||
) | ||
|
||
dtb = cls.virtual_to_physical_address( | ||
table.get_symbol(dtb_symbol_name).address + kaslr_shift | ||
) | ||
|
||
# Build the new layer | ||
context.config[cls.join(config_path, "page_map_offset")] = dtb | ||
|
||
layer = layer_class( | ||
context, | ||
config_path=config_path, | ||
name=new_layer_name, | ||
metadata={"os": "Linux"}, | ||
) | ||
layer.config["kernel_virtual_offset"] = aslr_shift | ||
linux_banner_address = table.get_symbol("linux_banner").address + aslr_shift | ||
test_banner_equality = cls.verify_translation_by_banner( | ||
context=context, | ||
layer=layer, | ||
layer_name=layer_name, | ||
linux_banner_address=linux_banner_address, | ||
target_banner=banner, | ||
) | ||
|
||
if layer and dtb and test_banner_equality: | ||
vollog.debug(f"DTB was found at: 0x{dtb:0x}") | ||
vollog.debug("Intel image found") | ||
return layer | ||
else: | ||
layer.destroy() | ||
|
||
return None | ||
|
||
@classmethod | ||
def aarch64_stacker( | ||
cls, | ||
context: interfaces.context.ContextInterface, | ||
layer_name: str, | ||
table: linux.LinuxKernelIntermedSymbols, | ||
table_name: str, | ||
config_path: str, | ||
new_layer_name: str, | ||
banner: bytes, | ||
progress_callback: constants.ProgressCallback = None, | ||
) -> Optional[arm.AArch64]: | ||
|
||
layer_class = arm.AArch64 | ||
kaslr_shift, aslr_shift = cls.find_aslr( | ||
context, | ||
table_name, | ||
layer_name, | ||
layer_class, | ||
progress_callback=progress_callback, | ||
) | ||
dtb = table.get_symbol("swapper_pg_dir").address + kaslr_shift | ||
context.config[cls.join(config_path, "page_map_offset")] = dtb | ||
context.config[cls.join(config_path, "page_map_offset_kernel")] = dtb | ||
kernel_endianness = table.get_type("pointer").vol.data_format.byteorder | ||
context.config[cls.join(config_path, "kernel_endianness")] = kernel_endianness | ||
|
||
# CREDIT : https://github.com/crash-utility/crash/blob/28891d1127542dbb2d5ba16c575e14e741ed73ef/arm64.c#L941 | ||
kernel_flags = 0 | ||
if "_kernel_flags_le" in table.symbols: | ||
kernel_flags = table.get_symbol("_kernel_flags_le").address | ||
if "_kernel_flags_le_hi32" in table.symbols: | ||
kernel_flags |= table.get_symbol("_kernel_flags_le_hi32").address << 32 | ||
if "_kernel_flags_le_lo32" in table.symbols: | ||
kernel_flags |= table.get_symbol("_kernel_flags_le_lo32").address | ||
|
||
# https://www.kernel.org/doc/Documentation/arm64/booting.txt | ||
page_size_kernel_space_bit = (kernel_flags >> 1) & 3 | ||
page_size_kernel_space_candidates = ( | ||
[4**page_size_kernel_space_bit] | ||
if 1 <= page_size_kernel_space_bit <= 3 | ||
else [4, 16, 64] | ||
) | ||
|
||
linux_banner_address = table.get_symbol("linux_banner").address + aslr_shift | ||
# Linux source : v6.7/source/arch/arm64/include/asm/memory.h#L186 - v5.7/source/arch/arm64/include/asm/memory.h#L160 | ||
va_bits = 0 | ||
if "vabits_actual" in table.symbols: | ||
vabits_actual_phys_addr = ( | ||
table.get_symbol("vabits_actual").address + kaslr_shift | ||
) | ||
# Linux source : v6.7/source/arch/arm64/Kconfig#L1263, VA_BITS | ||
va_bits = int.from_bytes( | ||
context.layers[layer_name].read(vabits_actual_phys_addr, 8), | ||
kernel_endianness, | ||
) | ||
if not va_bits: | ||
""" | ||
Count leftmost bits equal to 1, deduce number of used bits for virtual addressing. | ||
Example : | ||
linux_banner_address = 0xffffffd733aae820 = 0b1111111111111111111111111101011100110011101010101110100000100000 | ||
va_bits = (linux_banner_address ^ (2**64 - 1)).bit_length() + 1 = 39 | ||
""" | ||
va_bits = (linux_banner_address ^ (2**64 - 1)).bit_length() + 1 | ||
|
||
""" | ||
Determining the number of useful bits in virtual addresses (VA_BITS) | ||
is not straightforward, and not available in the kernel symbols. | ||
Calculation by masking works great, but not in every case, due to the AArch64 memory layout, | ||
sometimes pushing kernel addresses "too far" from the TTB1 start. | ||
See https://www.kernel.org/doc/html/v5.5/arm64/memory.html. | ||
Errors are by 1 or 2 bits, so we can try va_bits - {1,2,3}. | ||
Example, assuming the good va_bits value is 39 : | ||
# Case where calculation was correct : 1 iteration | ||
va_bits_candidates = [**39**, 38, 37, 36] | ||
# Case where calculation is off by 1 : 2 iterations | ||
va_bits_candidates = [40, **39**, 38, 37] | ||
""" | ||
va_bits_candidates = [va_bits] + [va_bits + i for i in range(-1, -4, -1)] | ||
for va_bits in va_bits_candidates: | ||
tcr_el1_t1sz = 64 - va_bits | ||
# T1SZ is considered equal to T0SZ | ||
context.config[cls.join(config_path, "tcr_el1_t1sz")] = tcr_el1_t1sz | ||
context.config[cls.join(config_path, "tcr_el1_t0sz")] = tcr_el1_t1sz | ||
|
||
# If "_kernel_flags_le*" aren't in the symbols, we can still do a quick bruteforce on [4,16,64] page sizes | ||
# False positives cannot happen, as translation indexes will be off on a wrong page size | ||
for page_size_kernel_space in page_size_kernel_space_candidates: | ||
# Kernel space page size is considered equal to the user space page size | ||
context.config[cls.join(config_path, "page_size_kernel_space")] = ( | ||
page_size_kernel_space | ||
) | ||
context.config[cls.join(config_path, "page_size_user_space")] = ( | ||
page_size_kernel_space | ||
) | ||
# Build layer | ||
layer = layer_class( | ||
context, | ||
config_path=config_path, | ||
|
@@ -111,18 +275,65 @@ def stack( | |
) | ||
layer.config["kernel_virtual_offset"] = aslr_shift | ||
|
||
if layer and dtb: | ||
vollog.debug(f"DTB was found at: 0x{dtb:0x}") | ||
return layer | ||
vollog.debug("No suitable linux banner could be matched") | ||
test_banner_equality = cls.verify_translation_by_banner( | ||
context=context, | ||
layer=layer, | ||
layer_name=layer_name, | ||
linux_banner_address=linux_banner_address, | ||
target_banner=banner, | ||
) | ||
|
||
if layer and dtb and test_banner_equality: | ||
vollog.debug(f"Kernel DTB was found at: 0x{dtb:0x}") | ||
vollog.debug("AArch64 image found") | ||
return layer | ||
else: | ||
layer.destroy() | ||
|
||
return None | ||
|
||
@classmethod | ||
def verify_translation_by_banner( | ||
cls, | ||
context: interfaces.context.ContextInterface, | ||
layer, | ||
layer_name: str, | ||
linux_banner_address: int, | ||
target_banner: bytes, | ||
) -> bool: | ||
"""Determine if a stacked layer is correct or a false positive, by calling the underlying | ||
_translate method against the linux_banner symbol virtual address. Then, compare it with | ||
the detected banner to verify the correct translation. | ||
""" | ||
|
||
try: | ||
banner_phys_address = layer._translate(linux_banner_address)[0] | ||
banner_value = context.layers[layer_name].read( | ||
banner_phys_address, len(target_banner) | ||
) | ||
except exceptions.InvalidAddressException as e: | ||
vollog.log( | ||
constants.LOGLEVEL_VVVV, | ||
'Cannot translate "linux_banner" symbol virtual address.', | ||
) | ||
return False | ||
|
||
if not banner_value == target_banner: | ||
vollog.log( | ||
constants.LOGLEVEL_VV, | ||
f"Mismatch between scanned and virtually translated linux banner : {target_banner} != {banner_value}.", | ||
) | ||
return False | ||
|
||
return True | ||
|
||
@classmethod | ||
def find_aslr( | ||
cls, | ||
context: interfaces.context.ContextInterface, | ||
symbol_table: str, | ||
layer_name: str, | ||
layer_class, | ||
progress_callback: constants.ProgressCallback = None, | ||
) -> Tuple[int, int]: | ||
"""Determines the offset of the actual DTB in physical space and its | ||
|
@@ -165,9 +376,12 @@ def find_aslr( | |
) | ||
- module.get_symbol("init_files").address | ||
) | ||
kaslr_shift = init_task_address - cls.virtual_to_physical_address( | ||
init_task_json_address | ||
) | ||
if layer_class == arm.AArch64: | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Rather than changing the API, it should be possible to derive this by getting the layer using There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. As the whole But if in the future we have to add some other specific AArch64 code to |
||
kaslr_shift = init_task_address - init_task_json_address | ||
else: | ||
kaslr_shift = init_task_address - cls.virtual_to_physical_address( | ||
init_task_json_address | ||
) | ||
if address_mask: | ||
aslr_shift = aslr_shift & address_mask | ||
|
||
|
@@ -199,5 +413,5 @@ class LinuxSymbolFinder(symbol_finder.SymbolFinder): | |
banner_config_key = "kernel_banner" | ||
operating_system = "linux" | ||
symbol_class = "volatility3.framework.symbols.linux.LinuxKernelIntermedSymbols" | ||
find_aslr = lambda cls, *args: LinuxIntelStacker.find_aslr(*args)[1] | ||
find_aslr = lambda cls, *args: LinuxStacker.find_aslr(*args)[1] | ||
exclusion_list = ["mac", "windows"] |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
To improve this, why not add an explicit layer property, like
_is_top_layer
insidevolatility3/framework/layers/intel.py#Intel
andvolatility3/framework/layers/arm.py#AArch64
and check with following :If we keep the current implementation, we have to change the Linux, Windows and Mac stacker for each new architecture layer.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Part of the reason is virtualization, it's possible to have an arm layer inside an intel layer (and it's certainly possible to have an intel layer on top an intel layer). You're right, it's not really a scalable solution (hence the FIXME right beneath this line), but it's also not a trivial attribute...
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Which stacker is supposed to do this (LinuxStacker, WindowsIntelStacker and MacIntelStacker blocks it), for example if a VM managed by qemu-system-aarch64 on an Intel host was running when the memory was dumped ?
Will a "VM (qemu) layer" be available from the globals
context.layers
variable too :There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I don't think there is anything automatic in core that would do that, but it would be nice to have.
There has been this issue from a while ago that talks about that kind of thing.
#464
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Thanks for pointing it out :) So in the current state, as it is not implemented, we will continue to strictly refuse stacking on top of Linux and AArch64. Adding an explicit flag on those two might be a temporary and more scalable solution ?
I leave this as a side note, for a potentiel reader in the future interested in AArch64 hypervisor execution mode (and what it might imply, if treating a layer from the hypervisor point of view) : https://developer.arm.com/documentation/102412/0103/Privilege-and-Exception-levels/Exception-levels