-
Notifications
You must be signed in to change notification settings - Fork 1.6k
cranelift: Add heap support to filetest infrastructure #3154
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Merged
Merged
Changes from all commits
Commits
Show all changes
6 commits
Select commit
Hold shift + click to select a range
6d960ca
cranelift: Add heap support to filetest infrastructure
afonso360 8761828
cranelift: Explicit heap pointer placement in filetest annotations
afonso360 8394020
cranelift: Add documentation about the Heap directive
afonso360 225468f
cranelift: Clarify that heap filetests pointers must be laid out sequ…
afonso360 d4a8dde
cranelift: Use wrapping add when computing bound pointer
afonso360 87a9b00
cranelift: Better error messages when invalid signatures are found fo…
afonso360 File filter
Filter by extension
Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
There are no files selected for viewing
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,170 @@ | ||
| test run | ||
| target x86_64 machinst | ||
| target s390x | ||
| target aarch64 | ||
|
|
||
|
|
||
| function %static_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = load.i64 notrap aligned gv0+0 | ||
| heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64 | ||
|
|
||
| block0(v0: i64, v1: i64, v2: i32): | ||
| v3 = heap_addr.i64 heap0, v1, 4 | ||
| store.i32 v2, v3 | ||
| v4 = load.i32 v3 | ||
| return v4 | ||
| } | ||
| ; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; run: %static_heap_i64_load_store(0, 1) == 1 | ||
| ; run: %static_heap_i64_load_store(0, -1) == -1 | ||
| ; run: %static_heap_i64_load_store(16, 1) == 1 | ||
| ; run: %static_heap_i64_load_store(16, -1) == -1 | ||
|
|
||
|
|
||
| function %static_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = load.i64 notrap aligned gv0+0 | ||
| heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i32 | ||
|
|
||
| block0(v0: i64, v1: i32, v2: i32): | ||
| v3 = heap_addr.i64 heap0, v1, 4 | ||
| store.i32 v2, v3 | ||
| v4 = load.i32 v3 | ||
| return v4 | ||
| } | ||
| ; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; run: %static_heap_i32_load_store(0, 1) == 1 | ||
| ; run: %static_heap_i32_load_store(0, -1) == -1 | ||
| ; run: %static_heap_i32_load_store(16, 1) == 1 | ||
| ; run: %static_heap_i32_load_store(16, -1) == -1 | ||
|
|
||
|
|
||
| function %static_heap_i32_load_store_no_min(i64 vmctx, i32, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = load.i64 notrap aligned gv0+0 | ||
| heap0 = static gv1, bound 0x1_0000_0000, offset_guard 0, index_type i32 | ||
|
|
||
| block0(v0: i64, v1: i32, v2: i32): | ||
| v3 = heap_addr.i64 heap0, v1, 4 | ||
| store.i32 v2, v3 | ||
| v4 = load.i32 v3 | ||
| return v4 | ||
| } | ||
| ; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; run: %static_heap_i32_load_store_no_min(0, 1) == 1 | ||
| ; run: %static_heap_i32_load_store_no_min(0, -1) == -1 | ||
| ; run: %static_heap_i32_load_store_no_min(16, 1) == 1 | ||
| ; run: %static_heap_i32_load_store_no_min(16, -1) == -1 | ||
|
|
||
|
|
||
| function %dynamic_heap_i64_load_store(i64 vmctx, i64, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = load.i64 notrap aligned gv0+0 | ||
| gv2 = load.i64 notrap aligned gv0+8 | ||
| heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i64 | ||
|
|
||
| block0(v0: i64, v1: i64, v2: i32): | ||
| v3 = heap_addr.i64 heap0, v1, 4 | ||
| store.i32 v2, v3 | ||
| v4 = load.i32 v3 | ||
| return v4 | ||
| } | ||
| ; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; run: %dynamic_heap_i64_load_store(0, 1) == 1 | ||
| ; run: %dynamic_heap_i64_load_store(0, -1) == -1 | ||
| ; run: %dynamic_heap_i64_load_store(16, 1) == 1 | ||
| ; run: %dynamic_heap_i64_load_store(16, -1) == -1 | ||
|
|
||
|
|
||
| function %dynamic_heap_i32_load_store(i64 vmctx, i32, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = load.i64 notrap aligned gv0+0 | ||
| gv2 = load.i64 notrap aligned gv0+8 | ||
| heap0 = dynamic gv1, bound gv2, offset_guard 0, index_type i32 | ||
|
|
||
| block0(v0: i64, v1: i32, v2: i32): | ||
| v3 = heap_addr.i64 heap0, v1, 4 | ||
| store.i32 v2, v3 | ||
| v4 = load.i32 v3 | ||
| return v4 | ||
| } | ||
| ; heap: dynamic, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; run: %dynamic_heap_i32_load_store(0, 1) == 1 | ||
| ; run: %dynamic_heap_i32_load_store(0, -1) == -1 | ||
| ; run: %dynamic_heap_i32_load_store(16, 1) == 1 | ||
| ; run: %dynamic_heap_i32_load_store(16, -1) == -1 | ||
|
|
||
|
|
||
| function %multi_heap_load_store(i64 vmctx, i32, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = load.i64 notrap aligned gv0+0 | ||
| gv2 = load.i64 notrap aligned gv0+16 | ||
| gv3 = load.i64 notrap aligned gv0+24 | ||
| heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64 | ||
| heap1 = dynamic gv2, bound gv3, offset_guard 0, index_type i32 | ||
|
|
||
| block0(v0: i64, v1: i32, v2: i32): | ||
| v3 = iconst.i64 0 | ||
| v4 = iconst.i32 0 | ||
|
|
||
| ; Store lhs in heap0 | ||
| v5 = heap_addr.i64 heap0, v3, 4 | ||
| store.i32 v1, v5 | ||
|
|
||
| ; Store rhs in heap1 | ||
| v6 = heap_addr.i64 heap1, v4, 4 | ||
| store.i32 v2, v6 | ||
|
|
||
|
|
||
| v7 = load.i32 v5 | ||
| v8 = load.i32 v6 | ||
|
|
||
| v9 = iadd.i32 v7, v8 | ||
| return v9 | ||
| } | ||
| ; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; heap: dynamic, size=0x1000, ptr=vmctx+16, bound=vmctx+24 | ||
| ; run: %multi_heap_load_store(1, 2) == 3 | ||
| ; run: %multi_heap_load_store(4, 5) == 9 | ||
|
|
||
|
|
||
|
|
||
| function %static_heap_i64_load_store_unaligned(i64 vmctx, i64, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = load.i64 notrap aligned gv0+0 | ||
| heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0, index_type i64 | ||
|
|
||
| block0(v0: i64, v1: i64, v2: i32): | ||
| v3 = heap_addr.i64 heap0, v1, 4 | ||
| store.i32 v2, v3 | ||
| v4 = load.i32 v3 | ||
| return v4 | ||
| } | ||
| ; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; run: %static_heap_i64_load_store_unaligned(0, 1) == 1 | ||
| ; run: %static_heap_i64_load_store_unaligned(0, -1) == -1 | ||
| ; run: %static_heap_i64_load_store_unaligned(1, 1) == 1 | ||
| ; run: %static_heap_i64_load_store_unaligned(1, -1) == -1 | ||
| ; run: %static_heap_i64_load_store_unaligned(2, 1) == 1 | ||
| ; run: %static_heap_i64_load_store_unaligned(2, -1) == -1 | ||
| ; run: %static_heap_i64_load_store_unaligned(3, 1) == 1 | ||
| ; run: %static_heap_i64_load_store_unaligned(3, -1) == -1 | ||
|
|
||
|
|
||
| ; This stores data in the place of the pointer in the vmctx struct, not in the heap itself. | ||
| function %static_heap_i64_iadd_imm(i64 vmctx, i32) -> i32 { | ||
| gv0 = vmctx | ||
| gv1 = iadd_imm.i64 gv0, 0 | ||
| heap0 = static gv1, min 0x1000, bound 0x1_0000_0000, offset_guard 0x8000_0000, index_type i64 | ||
|
|
||
| block0(v0: i64, v1: i32): | ||
| v2 = iconst.i64 0 | ||
| v3 = heap_addr.i64 heap0, v2, 4 | ||
| store.i32 v1, v3 | ||
| v4 = load.i32 v3 | ||
| return v4 | ||
| } | ||
| ; heap: static, size=0x1000, ptr=vmctx+0, bound=vmctx+8 | ||
| ; run: %static_heap_i64_iadd_imm(1) == 1 | ||
| ; run: %static_heap_i64_iadd_imm(-1) == -1 |
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains hidden or bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,111 @@ | ||
| use anyhow::anyhow; | ||
| use cranelift_codegen::data_value::DataValue; | ||
| use cranelift_codegen::ir::Type; | ||
| use cranelift_reader::parse_heap_command; | ||
| use cranelift_reader::{Comment, HeapCommand}; | ||
|
|
||
| /// Stores info about the expected environment for a test function. | ||
| #[derive(Debug, Clone)] | ||
| pub struct RuntestEnvironment { | ||
| pub heaps: Vec<HeapCommand>, | ||
| } | ||
|
|
||
| impl RuntestEnvironment { | ||
| /// Parse the environment from a set of comments | ||
| pub fn parse(comments: &[Comment]) -> anyhow::Result<Self> { | ||
| let mut env = RuntestEnvironment { heaps: Vec::new() }; | ||
|
|
||
| for comment in comments.iter() { | ||
| if let Some(heap_command) = parse_heap_command(comment.text)? { | ||
| let heap_index = env.heaps.len() as u64; | ||
| let expected_ptr = heap_index * 16; | ||
| if Some(expected_ptr) != heap_command.ptr_offset.map(|p| p.into()) { | ||
| return Err(anyhow!( | ||
| "Invalid ptr offset, expected vmctx+{}", | ||
| expected_ptr | ||
| )); | ||
| } | ||
|
|
||
| let expected_bound = (heap_index * 16) + 8; | ||
| if Some(expected_bound) != heap_command.bound_offset.map(|p| p.into()) { | ||
| return Err(anyhow!( | ||
| "Invalid bound offset, expected vmctx+{}", | ||
| expected_bound | ||
| )); | ||
| } | ||
|
|
||
| env.heaps.push(heap_command); | ||
| }; | ||
| } | ||
|
|
||
| Ok(env) | ||
| } | ||
|
|
||
| pub fn is_active(&self) -> bool { | ||
| !self.heaps.is_empty() | ||
| } | ||
|
|
||
| /// Allocates a struct to be injected into the test. | ||
| pub fn runtime_struct(&self) -> RuntestContext { | ||
| RuntestContext::new(&self) | ||
| } | ||
| } | ||
|
|
||
| type HeapMemory = Vec<u8>; | ||
|
|
||
| /// A struct that provides info about the environment to the test | ||
| #[derive(Debug, Clone)] | ||
| pub struct RuntestContext { | ||
| /// Store the heap memory alongside the context info so that we don't accidentally deallocate | ||
| /// it too early. | ||
| heaps: Vec<HeapMemory>, | ||
|
|
||
| /// This is the actual struct that gets passed into the `vmctx` argument of the tests. | ||
| /// It has a specific memory layout that all tests agree with. | ||
| /// | ||
| /// Currently we only have to store heap info, so we store the heap start and end addresses in | ||
| /// a 64 bit slot for each heap. | ||
| /// | ||
| /// ┌────────────┐ | ||
| /// │heap0: start│ | ||
| /// ├────────────┤ | ||
| /// │heap0: end │ | ||
| /// ├────────────┤ | ||
| /// │heap1: start│ | ||
| /// ├────────────┤ | ||
| /// │heap1: end │ | ||
| /// ├────────────┤ | ||
| /// │etc... │ | ||
| /// └────────────┘ | ||
| context_struct: Vec<u64>, | ||
| } | ||
|
|
||
| impl RuntestContext { | ||
| pub fn new(env: &RuntestEnvironment) -> Self { | ||
| let heaps: Vec<HeapMemory> = env | ||
| .heaps | ||
| .iter() | ||
| .map(|cmd| { | ||
| let size: u64 = cmd.size.into(); | ||
| vec![0u8; size as usize] | ||
| }) | ||
| .collect(); | ||
|
|
||
| let context_struct = heaps | ||
| .iter() | ||
| .flat_map(|heap| [heap.as_ptr(), heap.as_ptr().wrapping_add(heap.len())]) | ||
| .map(|p| p as usize as u64) | ||
| .collect(); | ||
|
|
||
| Self { | ||
| heaps, | ||
| context_struct, | ||
| } | ||
| } | ||
|
|
||
| /// Creates a [DataValue] with a target isa pointer type to the context struct. | ||
| pub fn pointer(&self, ty: Type) -> DataValue { | ||
| let ptr = self.context_struct.as_ptr() as usize as i128; | ||
| DataValue::from_integer(ptr, ty).expect("Failed to cast pointer to native target size") | ||
| } | ||
| } |
Oops, something went wrong.
Add this suggestion to a batch that can be applied as a single commit.
This suggestion is invalid because no changes were made to the code.
Suggestions cannot be applied while the pull request is closed.
Suggestions cannot be applied while viewing a subset of changes.
Only one suggestion per line can be applied in a batch.
Add this suggestion to a batch that can be applied as a single commit.
Applying suggestions on deleted lines is not supported.
You must change the existing code in this line in order to create a valid suggestion.
Outdated suggestions cannot be applied.
This suggestion has been applied or marked resolved.
Suggestions cannot be applied from pending reviews.
Suggestions cannot be applied on multi-line comments.
Suggestions cannot be applied while the pull request is queued to merge.
Suggestion cannot be applied right now. Please check back later.
Uh oh!
There was an error while loading. Please reload this page.