3
3
// usize->pointer casts is viable. It seems like a lot of work for us to participate for not much
4
4
// benefit.
5
5
6
- use std:: ptr:: NonNull ;
6
+ use std:: { cell :: RefCell , ptr:: NonNull } ;
7
7
8
8
use crate :: { backend:: ir:: Target , stats:: yjit_alloc_size, utils:: IntoUsize } ;
9
9
@@ -36,6 +36,12 @@ pub struct VirtualMemory<A: Allocator> {
36
36
/// granularity.
37
37
page_size_bytes : usize ,
38
38
39
+ /// Mutable parts.
40
+ mutable : RefCell < VirtualMemoryMut < A > > ,
41
+ }
42
+
43
+ /// Mutable parts of [`VirtualMemory`].
44
+ pub struct VirtualMemoryMut < A : Allocator > {
39
45
/// Number of bytes that have we have allocated physical memory for starting at
40
46
/// [Self::region_start].
41
47
mapped_region_bytes : usize ,
@@ -124,9 +130,11 @@ impl<A: Allocator> VirtualMemory<A> {
124
130
region_size_bytes,
125
131
memory_limit_bytes,
126
132
page_size_bytes,
127
- mapped_region_bytes : 0 ,
128
- current_write_page : None ,
129
- allocator,
133
+ mutable : RefCell :: new ( VirtualMemoryMut {
134
+ mapped_region_bytes : 0 ,
135
+ current_write_page : None ,
136
+ allocator,
137
+ } ) ,
130
138
}
131
139
}
132
140
@@ -137,7 +145,7 @@ impl<A: Allocator> VirtualMemory<A> {
137
145
}
138
146
139
147
pub fn mapped_end_ptr ( & self ) -> CodePtr {
140
- self . start_ptr ( ) . add_bytes ( self . mapped_region_bytes )
148
+ self . start_ptr ( ) . add_bytes ( self . mutable . borrow ( ) . mapped_region_bytes )
141
149
}
142
150
143
151
pub fn virtual_end_ptr ( & self ) -> CodePtr {
@@ -146,7 +154,7 @@ impl<A: Allocator> VirtualMemory<A> {
146
154
147
155
/// Size of the region in bytes that we have allocated physical memory for.
148
156
pub fn mapped_region_size ( & self ) -> usize {
149
- self . mapped_region_bytes
157
+ self . mutable . borrow ( ) . mapped_region_bytes
150
158
}
151
159
152
160
/// Size of the region in bytes where writes could be attempted.
@@ -161,19 +169,21 @@ impl<A: Allocator> VirtualMemory<A> {
161
169
}
162
170
163
171
/// Write a single byte. The first write to a page makes it readable.
164
- pub fn write_byte ( & mut self , write_ptr : CodePtr , byte : u8 ) -> Result < ( ) , WriteError > {
172
+ pub fn write_byte ( & self , write_ptr : CodePtr , byte : u8 ) -> Result < ( ) , WriteError > {
173
+ let mut mutable = self . mutable . borrow_mut ( ) ;
174
+
165
175
let page_size = self . page_size_bytes ;
166
176
let raw: * mut u8 = write_ptr. raw_ptr ( self ) as * mut u8 ;
167
177
let page_addr = ( raw as usize / page_size) * page_size;
168
178
169
- if self . current_write_page == Some ( page_addr) {
179
+ if mutable . current_write_page == Some ( page_addr) {
170
180
// Writing within the last written to page, nothing to do
171
181
} else {
172
182
// Switching to a different and potentially new page
173
183
let start = self . region_start . as_ptr ( ) ;
174
- let mapped_region_end = start. wrapping_add ( self . mapped_region_bytes ) ;
184
+ let mapped_region_end = start. wrapping_add ( mutable . mapped_region_bytes ) ;
175
185
let whole_region_end = start. wrapping_add ( self . region_size_bytes ) ;
176
- let alloc = & mut self . allocator ;
186
+ let alloc = & mut mutable . allocator ;
177
187
178
188
assert ! ( ( start..=whole_region_end) . contains( & mapped_region_end) ) ;
179
189
@@ -185,7 +195,7 @@ impl<A: Allocator> VirtualMemory<A> {
185
195
return Err ( FailedPageMapping ) ;
186
196
}
187
197
188
- self . current_write_page = Some ( page_addr) ;
198
+ mutable . current_write_page = Some ( page_addr) ;
189
199
} else if ( start..whole_region_end) . contains ( & raw ) &&
190
200
( page_addr + page_size - start as usize ) + yjit_alloc_size ( ) < self . memory_limit_bytes {
191
201
// Writing to a brand new page
@@ -217,9 +227,9 @@ impl<A: Allocator> VirtualMemory<A> {
217
227
unreachable ! ( "unknown arch" ) ;
218
228
}
219
229
}
220
- self . mapped_region_bytes = self . mapped_region_bytes + alloc_size;
230
+ mutable . mapped_region_bytes = mutable . mapped_region_bytes + alloc_size;
221
231
222
- self . current_write_page = Some ( page_addr) ;
232
+ mutable . current_write_page = Some ( page_addr) ;
223
233
} else {
224
234
return Err ( OutOfBounds ) ;
225
235
}
@@ -233,14 +243,16 @@ impl<A: Allocator> VirtualMemory<A> {
233
243
234
244
/// Make all the code in the region writeable.
235
245
/// Call this during GC before the phase of updating reference fields.
236
- pub fn mark_all_writeable ( & mut self ) {
237
- self . current_write_page = None ;
246
+ pub fn mark_all_writeable ( & self ) {
247
+ let mut mutable = self . mutable . borrow_mut ( ) ;
248
+
249
+ mutable. current_write_page = None ;
238
250
239
251
let region_start = self . region_start ;
240
- let mapped_region_bytes: u32 = self . mapped_region_bytes . try_into ( ) . unwrap ( ) ;
252
+ let mapped_region_bytes: u32 = mutable . mapped_region_bytes . try_into ( ) . unwrap ( ) ;
241
253
242
254
// Make mapped region executable
243
- if !self . allocator . mark_writable ( region_start. as_ptr ( ) , mapped_region_bytes) {
255
+ if !mutable . allocator . mark_writable ( region_start. as_ptr ( ) , mapped_region_bytes) {
244
256
panic ! ( "Cannot make memory region writable: {:?}-{:?}" ,
245
257
region_start. as_ptr( ) ,
246
258
unsafe { region_start. as_ptr( ) . add( mapped_region_bytes as usize ) }
@@ -250,18 +262,20 @@ impl<A: Allocator> VirtualMemory<A> {
250
262
251
263
/// Make all the code in the region executable. Call this at the end of a write session.
252
264
/// See [Self] for usual usage flow.
253
- pub fn mark_all_executable ( & mut self ) {
254
- self . current_write_page = None ;
265
+ pub fn mark_all_executable ( & self ) {
266
+ let mut mutable = self . mutable . borrow_mut ( ) ;
267
+
268
+ mutable. current_write_page = None ;
255
269
256
270
let region_start = self . region_start ;
257
- let mapped_region_bytes: u32 = self . mapped_region_bytes . try_into ( ) . unwrap ( ) ;
271
+ let mapped_region_bytes: u32 = mutable . mapped_region_bytes . try_into ( ) . unwrap ( ) ;
258
272
259
273
// Make mapped region executable
260
- self . allocator . mark_executable ( region_start. as_ptr ( ) , mapped_region_bytes) ;
274
+ mutable . allocator . mark_executable ( region_start. as_ptr ( ) , mapped_region_bytes) ;
261
275
}
262
276
263
277
/// Free a range of bytes. start_ptr must be memory page-aligned.
264
- pub fn free_bytes ( & mut self , start_ptr : CodePtr , size : u32 ) {
278
+ pub fn free_bytes ( & self , start_ptr : CodePtr , size : u32 ) {
265
279
assert_eq ! ( start_ptr. raw_ptr( self ) as usize % self . page_size_bytes, 0 ) ;
266
280
267
281
// Bounds check the request. We should only free memory we manage.
@@ -274,7 +288,8 @@ impl<A: Allocator> VirtualMemory<A> {
274
288
// code page, it's more appropriate to check the last byte against the virtual region.
275
289
assert ! ( virtual_region. contains( & last_byte_to_free) ) ;
276
290
277
- self . allocator . mark_unused ( start_ptr. raw_ptr ( self ) , size) ;
291
+ let mut mutable = self . mutable . borrow_mut ( ) ;
292
+ mutable. allocator . mark_unused ( start_ptr. raw_ptr ( self ) , size) ;
278
293
}
279
294
}
280
295
@@ -403,33 +418,33 @@ pub mod tests {
403
418
#[ test]
404
419
#[ cfg( target_arch = "x86_64" ) ]
405
420
fn new_memory_is_initialized ( ) {
406
- let mut virt = new_dummy_virt_mem ( ) ;
421
+ let virt = new_dummy_virt_mem ( ) ;
407
422
408
423
virt. write_byte ( virt. start_ptr ( ) , 1 ) . unwrap ( ) ;
409
424
assert ! (
410
- virt. allocator. memory[ ..PAGE_SIZE ] . iter( ) . all( |& byte| byte != 0 ) ,
425
+ virt. mutable . borrow ( ) . allocator. memory[ ..PAGE_SIZE ] . iter( ) . all( |& byte| byte != 0 ) ,
411
426
"Entire page should be initialized" ,
412
427
) ;
413
428
414
429
// Skip a few page
415
430
let three_pages = 3 * PAGE_SIZE ;
416
431
virt. write_byte ( virt. start_ptr ( ) . add_bytes ( three_pages) , 1 ) . unwrap ( ) ;
417
432
assert ! (
418
- virt. allocator. memory[ ..three_pages] . iter( ) . all( |& byte| byte != 0 ) ,
433
+ virt. mutable . borrow ( ) . allocator. memory[ ..three_pages] . iter( ) . all( |& byte| byte != 0 ) ,
419
434
"Gaps between write requests should be filled" ,
420
435
) ;
421
436
}
422
437
423
438
#[ test]
424
439
fn no_redundant_syscalls_when_writing_to_the_same_page ( ) {
425
- let mut virt = new_dummy_virt_mem ( ) ;
440
+ let virt = new_dummy_virt_mem ( ) ;
426
441
427
442
virt. write_byte ( virt. start_ptr ( ) , 1 ) . unwrap ( ) ;
428
443
virt. write_byte ( virt. start_ptr ( ) , 0 ) . unwrap ( ) ;
429
444
430
445
assert ! (
431
446
matches!(
432
- virt. allocator. requests[ ..] ,
447
+ virt. mutable . borrow ( ) . allocator. requests[ ..] ,
433
448
[ MarkWritable { start_idx: 0 , length: PAGE_SIZE } ] ,
434
449
)
435
450
) ;
@@ -438,7 +453,7 @@ pub mod tests {
438
453
#[ test]
439
454
fn bounds_checking ( ) {
440
455
use super :: WriteError :: * ;
441
- let mut virt = new_dummy_virt_mem ( ) ;
456
+ let virt = new_dummy_virt_mem ( ) ;
442
457
443
458
let one_past_end = virt. start_ptr ( ) . add_bytes ( virt. virtual_region_size ( ) ) ;
444
459
assert_eq ! ( Err ( OutOfBounds ) , virt. write_byte( one_past_end, 0 ) ) ;
@@ -451,15 +466,15 @@ pub mod tests {
451
466
fn only_written_to_regions_become_executable ( ) {
452
467
// ... so we catch attempts to read/write/execute never-written-to regions
453
468
const THREE_PAGES : usize = PAGE_SIZE * 3 ;
454
- let mut virt = new_dummy_virt_mem ( ) ;
469
+ let virt = new_dummy_virt_mem ( ) ;
455
470
let page_two_start = virt. start_ptr ( ) . add_bytes ( PAGE_SIZE * 2 ) ;
456
471
virt. write_byte ( page_two_start, 1 ) . unwrap ( ) ;
457
472
virt. mark_all_executable ( ) ;
458
473
459
474
assert ! ( virt. virtual_region_size( ) > THREE_PAGES ) ;
460
475
assert ! (
461
476
matches!(
462
- virt. allocator. requests[ ..] ,
477
+ virt. mutable . borrow ( ) . allocator. requests[ ..] ,
463
478
[
464
479
MarkWritable { start_idx: 0 , length: THREE_PAGES } ,
465
480
MarkExecutable { start_idx: 0 , length: THREE_PAGES } ,
0 commit comments