@@ -510,22 +510,13 @@ impl fmt::Debug for RegMapping {
510
510
}
511
511
}
512
512
513
- /// Bits for chain_depth_return_landing_defer
514
- const RETURN_LANDING_BIT : u8 = 0b10000000 ;
515
- const DEFER_BIT : u8 = 0b01000000 ;
516
- const CHAIN_DEPTH_MASK : u8 = 0b00111111 ; // 63
513
+ /// Maximum value of the chain depth (should fit in 5 bits)
514
+ const CHAIN_DEPTH_MAX : u8 = 0b11111 ; // 31
517
515
518
516
/// Code generation context
519
517
/// Contains information we can use to specialize/optimize code
520
- /// There are a lot of context objects so we try to keep the size small.
521
518
#[ derive( Copy , Clone , Default , Eq , Hash , PartialEq , Debug ) ]
522
519
pub struct Context {
523
- // FIXME: decoded_from breaks == on contexts
524
- /*
525
- // Offset at which this context was previously encoded (zero if not)
526
- decoded_from: u32,
527
- */
528
-
529
520
// Number of values currently on the temporary stack
530
521
stack_size : u8 ,
531
522
@@ -536,11 +527,15 @@ pub struct Context {
536
527
/// Which stack temps or locals are in a register
537
528
reg_mapping : RegMapping ,
538
529
539
- /// Fields packed into u8
540
- /// - 1st bit from the left: Whether this code is the target of a JIT-to-JIT Ruby return ([Self::is_return_landing])
541
- /// - 2nd bit from the left: Whether the compilation of this code has been deferred ([Self::is_deferred])
542
- /// - Last 6 bits (max: 63): Depth of this block in the sidechain (eg: inline-cache chain)
543
- chain_depth_and_flags : u8 ,
530
+ // Depth of this block in the sidechain (eg: inline-cache chain)
531
+ // 6 bits, max 63
532
+ chain_depth : u8 ,
533
+
534
+ // Whether this code is the target of a JIT-to-JIT Ruby return ([Self::is_return_landing])
535
+ is_return_landing : bool ,
536
+
537
+ // Whether the compilation of this code has been deferred ([Self::is_deferred])
538
+ is_deferred : bool ,
544
539
545
540
// Type we track for self
546
541
self_type : Type ,
@@ -645,26 +640,35 @@ impl BitVector {
645
640
self . push_uint ( val as u64 , 8 ) ;
646
641
}
647
642
643
+ fn push_u5 ( & mut self , val : u8 ) {
644
+ assert ! ( val <= 0b11111 ) ;
645
+ self . push_uint ( val as u64 , 5 ) ;
646
+ }
647
+
648
648
fn push_u4 ( & mut self , val : u8 ) {
649
- assert ! ( val < 16 ) ;
649
+ assert ! ( val <= 0b1111 ) ;
650
650
self . push_uint ( val as u64 , 4 ) ;
651
651
}
652
652
653
653
fn push_u3 ( & mut self , val : u8 ) {
654
- assert ! ( val < 8 ) ;
654
+ assert ! ( val <= 0b111 ) ;
655
655
self . push_uint ( val as u64 , 3 ) ;
656
656
}
657
657
658
658
fn push_u2 ( & mut self , val : u8 ) {
659
- assert ! ( val < 4 ) ;
659
+ assert ! ( val <= 0b11 ) ;
660
660
self . push_uint ( val as u64 , 2 ) ;
661
661
}
662
662
663
663
fn push_u1 ( & mut self , val : u8 ) {
664
- assert ! ( val < 2 ) ;
664
+ assert ! ( val <= 0b1 ) ;
665
665
self . push_uint ( val as u64 , 1 ) ;
666
666
}
667
667
668
+ fn push_bool ( & mut self , val : bool ) {
669
+ self . push_u1 ( if val { 1 } else { 0 } ) ;
670
+ }
671
+
668
672
// Push a context encoding opcode
669
673
fn push_op ( & mut self , op : CtxOp ) {
670
674
self . push_u4 ( op as u8 ) ;
@@ -710,6 +714,10 @@ impl BitVector {
710
714
self . read_uint ( bit_idx, 8 ) as u8
711
715
}
712
716
717
+ fn read_u5 ( & self , bit_idx : & mut usize ) -> u8 {
718
+ self . read_uint ( bit_idx, 5 ) as u8
719
+ }
720
+
713
721
fn read_u4 ( & self , bit_idx : & mut usize ) -> u8 {
714
722
self . read_uint ( bit_idx, 4 ) as u8
715
723
}
@@ -726,6 +734,10 @@ impl BitVector {
726
734
self . read_uint ( bit_idx, 1 ) as u8
727
735
}
728
736
737
+ fn read_bool ( & self , bit_idx : & mut usize ) -> bool {
738
+ self . read_u1 ( bit_idx) != 0
739
+ }
740
+
729
741
fn read_op ( & self , bit_idx : & mut usize ) -> CtxOp {
730
742
unsafe { std:: mem:: transmute ( self . read_u4 ( bit_idx) ) }
731
743
}
@@ -1052,8 +1064,18 @@ impl Context {
1052
1064
}
1053
1065
}
1054
1066
1055
- // chain_depth_and_flags: u8,
1056
- bits. push_u8 ( self . chain_depth_and_flags ) ;
1067
+ bits. push_bool ( self . is_deferred ) ;
1068
+ bits. push_bool ( self . is_return_landing ) ;
1069
+
1070
+ // The chain depth is most often 0 or 1
1071
+ if self . chain_depth < 2 {
1072
+ bits. push_u1 ( 0 ) ;
1073
+ bits. push_u1 ( self . chain_depth ) ;
1074
+
1075
+ } else {
1076
+ bits. push_u1 ( 1 ) ;
1077
+ bits. push_u5 ( self . chain_depth ) ;
1078
+ }
1057
1079
1058
1080
// Encode the self type if known
1059
1081
if self . self_type != Type :: Unknown {
@@ -1146,8 +1168,14 @@ impl Context {
1146
1168
}
1147
1169
}
1148
1170
1149
- // chain_depth_and_flags: u8
1150
- ctx. chain_depth_and_flags = bits. read_u8 ( & mut idx) ;
1171
+ ctx. is_deferred = bits. read_bool ( & mut idx) ;
1172
+ ctx. is_return_landing = bits. read_bool ( & mut idx) ;
1173
+
1174
+ if bits. read_u1 ( & mut idx) == 0 {
1175
+ ctx. chain_depth = bits. read_u1 ( & mut idx)
1176
+ } else {
1177
+ ctx. chain_depth = bits. read_u5 ( & mut idx)
1178
+ }
1151
1179
1152
1180
loop {
1153
1181
//println!("reading op");
@@ -2483,39 +2511,39 @@ impl Context {
2483
2511
}
2484
2512
2485
2513
pub fn get_chain_depth ( & self ) -> u8 {
2486
- self . chain_depth_and_flags & CHAIN_DEPTH_MASK
2514
+ self . chain_depth
2487
2515
}
2488
2516
2489
2517
pub fn reset_chain_depth_and_defer ( & mut self ) {
2490
- self . chain_depth_and_flags &= ! CHAIN_DEPTH_MASK ;
2491
- self . chain_depth_and_flags &= ! DEFER_BIT ;
2518
+ self . chain_depth = 0 ;
2519
+ self . is_deferred = false ;
2492
2520
}
2493
2521
2494
2522
pub fn increment_chain_depth ( & mut self ) {
2495
- if self . get_chain_depth ( ) == CHAIN_DEPTH_MASK {
2523
+ if self . get_chain_depth ( ) == CHAIN_DEPTH_MAX {
2496
2524
panic ! ( "max block version chain depth reached!" ) ;
2497
2525
}
2498
- self . chain_depth_and_flags += 1 ;
2526
+ self . chain_depth += 1 ;
2499
2527
}
2500
2528
2501
2529
pub fn set_as_return_landing ( & mut self ) {
2502
- self . chain_depth_and_flags |= RETURN_LANDING_BIT ;
2530
+ self . is_return_landing = true ;
2503
2531
}
2504
2532
2505
2533
pub fn clear_return_landing ( & mut self ) {
2506
- self . chain_depth_and_flags &= ! RETURN_LANDING_BIT ;
2534
+ self . is_return_landing = false ;
2507
2535
}
2508
2536
2509
2537
pub fn is_return_landing ( & self ) -> bool {
2510
- self . chain_depth_and_flags & RETURN_LANDING_BIT != 0
2538
+ self . is_return_landing
2511
2539
}
2512
2540
2513
2541
pub fn mark_as_deferred ( & mut self ) {
2514
- self . chain_depth_and_flags |= DEFER_BIT ;
2542
+ self . is_deferred = true ;
2515
2543
}
2516
2544
2517
2545
pub fn is_deferred ( & self ) -> bool {
2518
- self . chain_depth_and_flags & DEFER_BIT != 0
2546
+ self . is_deferred
2519
2547
}
2520
2548
2521
2549
/// Get an operand for the adjusted stack pointer address
0 commit comments