diff --git a/DEVELOPERS.md b/DEVELOPERS.md index 2fa954af..844a97de 100644 --- a/DEVELOPERS.md +++ b/DEVELOPERS.md @@ -10,6 +10,18 @@ nix develop in `rust/` or any subdirectory, and you will be dropped into a BASH shell with the build environment set up. This will provide proper versions of non-rust dependencies, as well as the rust environment. +If you receive the error + +``` +error: experimental Nix feature 'nix-command' is disabled; use '--extra-experimental-features nix-command' to override +``` + +edit your `$HOME/.config/nix/nix.conf` to include the line + +``` +extra-experimental-features = nix-command +``` + ## Rust ### Build @@ -33,7 +45,7 @@ To run the Vere king with Ares as serf, it's necessary to modify the Vere king t arg_c[0] = "/path/to/ares/repo/rust/ares/target/debug/ares"; ``` -Then, it is necessary to follow the [Vere build instrcutions](https://github.com/urbit/vere/blob/develop/INSTALL.md). Afterwards, it's possible to launch Vere with Ares as the serf using the usual commands: +Then, it is necessary to follow the [Vere build instrcutions](https://github.com/urbit/vere/blob/develop/INSTALL.md). (You should exit the `nix develop` shell first for such a build.) Afterwards, it's possible to launch Vere with Ares as the serf using the usual commands: ```bash bazel-bin/pkg/vere/urbit -F zod diff --git a/README.md b/README.md index d0cc2d65..724d670f 100644 --- a/README.md +++ b/README.md @@ -6,7 +6,4 @@ Read the [proposal](docs/proposal/proposal-nock-performance.md) and [hypotheses] ## Installation -Dependencies: -* [`libaes_siv`](https://github.com/dfoxfranke/libaes_siv) -* [`openssl`](https://github.com/openssl/openssl) -* [`libsecp256k1`](https://github.com/bitcoin-core/secp256k1) +See the "Run" section of [DEVELOPERS.md](DEVELOPERS.md#run). diff --git a/rust/ares/src/hamt.rs b/rust/ares/src/hamt.rs index d7f08a32..0b95d690 100644 --- a/rust/ares/src/hamt.rs +++ b/rust/ares/src/hamt.rs @@ -345,18 +345,22 @@ impl Hamt { *new_leaf_buffer = (*n, t); let split = stem.hypothetical_index(chunk); let new_buffer = stack.struct_alloc(stem.size() + 1); - copy_nonoverlapping(stem.buffer, new_buffer, split); + if split > 0 { + copy_nonoverlapping(stem.buffer, new_buffer, split); + } *new_buffer.add(split) = Entry { leaf: Leaf { len: 1, buffer: new_leaf_buffer, }, }; - copy_nonoverlapping( - stem.buffer.add(split), - new_buffer.add(split + 1), - stem.size() - split, - ); + if stem.size() - split > 0 { + copy_nonoverlapping( + stem.buffer.add(split), + new_buffer.add(split + 1), + stem.size() - split, + ); + } *dest = Stem { bitmap: stem.bitmap | chunk_to_bit(chunk), typemap: stem.typemap & !chunk_to_bit(chunk), @@ -628,8 +632,17 @@ impl Persist for Hamt { let next_chunk = traversal[depth].bitmap.trailing_zeros(); let next_type = traversal[depth].typemap & (1 << next_chunk) != 0; let next_entry = *traversal[depth].buffer; - traversal[depth].bitmap >>= next_chunk + 1; - traversal[depth].typemap >>= next_chunk + 1; + if next_chunk >= 31 { + // if next_chunk == 31, then we will try to shift the bitmap by next_chunk+1 = 32 bits. + // The datatype is a u32, so this is equivalent to setting it to 0. If we do try + // to shift a u32 by 32 bits, then rust's overflow checking will catch it + // and crash the process. + traversal[depth].bitmap = 0; + traversal[depth].typemap = 0; + } else { + traversal[depth].bitmap >>= next_chunk + 1; + traversal[depth].typemap >>= next_chunk + 1; + } traversal[depth].buffer = traversal[depth].buffer.add(1); if next_type { @@ -707,8 +720,17 @@ impl Persist for Hamt { let next_type = traversal[depth].typemap & (1 << next_chunk) != 0; let next_entry_ptr = traversal[depth].buffer; - traversal[depth].bitmap >>= next_chunk + 1; - traversal[depth].typemap >>= next_chunk + 1; + if next_chunk >= 31 { + // if next_chunk == 31, then we will try to shift the bitmap by next_chunk+1 = 32 bits. + // The datatype is a u32, so this is equivalent to setting it to 0. If we do try + // to shift a u32 by 32 bits, then rust's overflow checking will catch it + // and crash the process. + traversal[depth].bitmap = 0; + traversal[depth].typemap = 0; + } else { + traversal[depth].bitmap >>= next_chunk + 1; + traversal[depth].typemap >>= next_chunk + 1; + } traversal[depth].buffer = traversal[depth].buffer.add(1); if next_type { diff --git a/rust/ares/src/jets.rs b/rust/ares/src/jets.rs index f9ebe3cd..b91ee310 100644 --- a/rust/ares/src/jets.rs +++ b/rust/ares/src/jets.rs @@ -11,6 +11,7 @@ pub mod lute; pub mod math; pub mod nock; pub mod parse; +pub mod seam; pub mod serial; pub mod sort; pub mod tree; @@ -30,6 +31,7 @@ use crate::jets::lute::*; use crate::jets::math::*; use crate::jets::nock::*; use crate::jets::parse::*; +use crate::jets::seam::*; use crate::jets::serial::*; use crate::jets::sort::*; diff --git a/rust/ares/src/jets/hot.rs b/rust/ares/src/jets/hot.rs index 97bf5c76..a5e97972 100644 --- a/rust/ares/src/jets/hot.rs +++ b/rust/ares/src/jets/hot.rs @@ -99,6 +99,11 @@ pub const URBIT_HOT_STATE: &[HotEntry] = &[ 1, jet_turn, ), + ( + &[K_139, Left(b"one"), Left(b"two"), Left(b"roll")], + 1, + jet_roll, + ), ( &[K_139, Left(b"one"), Left(b"two"), Left(b"zing")], 1, @@ -219,6 +224,16 @@ pub const URBIT_HOT_STATE: &[HotEntry] = &[ 1, jet_jam, ), + ( + &[K_139, Left(b"one"), Left(b"two"), Left(b"by"), Left(b"rep")], + 1, + jet_by_rep, + ), + ( + &[K_139, Left(b"one"), Left(b"two"), Left(b"in"), Left(b"rep")], + 1, + jet_by_rep, // +rep:in has the same signature as +rep:by + ), // ( &[ diff --git a/rust/ares/src/jets/list.rs b/rust/ares/src/jets/list.rs index 3a2d318f..942483a6 100644 --- a/rust/ares/src/jets/list.rs +++ b/rust/ares/src/jets/list.rs @@ -1,11 +1,10 @@ /** Text processing jets */ -use crate::interpreter::{interpret, Context}; +use crate::interpreter::Context; use crate::jets::util::{slot, BAIL_FAIL}; use crate::jets::Result; use crate::noun::{Cell, Noun, D, T}; -use bitvec::order::Lsb0; -use bitvec::slice::BitSlice; +use crate::site::{site_slam, Site}; crate::gdb!(); @@ -30,89 +29,48 @@ pub fn jet_turn(context: &mut Context, subject: Noun) -> Result { let sample = slot(subject, 6)?; let mut list = slot(sample, 2)?; let mut gate = slot(sample, 3)?; - let mut gate_battery = slot(gate, 2)?; - let gate_context = slot(gate, 7)?; let mut res = D(0); let mut dest: *mut Noun = &mut res; // Mutable pointer because we cannot guarantee initialized - // Since the gate doesn't change, we can do a single jet check and use that through the whole - // loop - if let Some((jet, _path)) = context - .warm - .find_jet(&mut context.stack, &mut gate, &mut gate_battery) - .filter(|(_jet, mut path)| { - // check that 7 is a prefix of the parent battery axis, - // to ensure that the sample (axis 6) is not part of the jet match. - // - // XX TODO this check is pessimized since there could be multiple ways to match the - // jet and we only actually match one of them, but we check all of them and run - // unjetted if any have an axis outside 7. - let axis_7_bits: &BitSlice = BitSlice::from_element(&7u64); - let batteries_list = context.cold.find(&mut context.stack, &mut path); - let mut ret = true; - for mut batteries in batteries_list { - if let Some((_battery, parent_axis)) = batteries.next() { - let parent_axis_prefix_bits = &parent_axis.as_bitslice()[0..3]; - if parent_axis_prefix_bits == axis_7_bits { - continue; - } else { - ret = false; - break; - } - } else { - ret = false; - break; - } + let site = Site::new(context, &mut gate); + loop { + if let Ok(list_cell) = list.as_cell() { + list = list_cell.tail(); + unsafe { + let (new_cell, new_mem) = Cell::new_raw_mut(&mut context.stack); + (*new_mem).head = site_slam(context, &site, list_cell.head()); + *dest = new_cell.as_noun(); + dest = &mut (*new_mem).tail; } - ret - }) - { - loop { - if let Ok(list_cell) = list.as_cell() { - list = list_cell.tail(); - let element_subject = T( - &mut context.stack, - &[gate_battery, list_cell.head(), gate_context], - ); - unsafe { - let (new_cell, new_mem) = Cell::new_raw_mut(&mut context.stack); - (*new_mem).head = jet(context, element_subject)?; - *dest = new_cell.as_noun(); - dest = &mut (*new_mem).tail; - } - } else { - if unsafe { !list.raw_equals(D(0)) } { - return Err(BAIL_FAIL); - } - unsafe { - *dest = D(0); - }; - return Ok(res); + } else { + if unsafe { !list.raw_equals(D(0)) } { + return Err(BAIL_FAIL); } + unsafe { + *dest = D(0); + }; + return Ok(res); } - } else { - loop { - if let Ok(list_cell) = list.as_cell() { - list = list_cell.tail(); - let element_subject = T( - &mut context.stack, - &[gate_battery, list_cell.head(), gate_context], - ); - unsafe { - let (new_cell, new_mem) = Cell::new_raw_mut(&mut context.stack); - (*new_mem).head = interpret(context, element_subject, gate_battery)?; - *dest = new_cell.as_noun(); - dest = &mut (*new_mem).tail; - } - } else { - if unsafe { !list.raw_equals(D(0)) } { - return Err(BAIL_FAIL); - } - unsafe { - *dest = D(0); - }; - return Ok(res); + } +} + +pub fn jet_roll(context: &mut Context, subject: Noun) -> Result { + let sample = slot(subject, 6)?; + let mut list = slot(sample, 2)?; + let mut gate = slot(sample, 3)?; + let mut pro = slot(gate, 13)?; + + let site = Site::new(context, &mut gate); + loop { + if let Ok(list_cell) = list.as_cell() { + list = list_cell.tail(); + let sam = T(&mut context.stack, &[list_cell.head(), pro]); + pro = site_slam(context, &site, sam); + } else { + if unsafe { !list.raw_equals(D(0)) } { + return Err(BAIL_FAIL); } + return Ok(pro); } } } diff --git a/rust/ares/src/jets/seam.rs b/rust/ares/src/jets/seam.rs new file mode 100644 index 00000000..4c888888 --- /dev/null +++ b/rust/ares/src/jets/seam.rs @@ -0,0 +1,34 @@ +/** Map jets. */ +use crate::interpreter::Context; +use crate::jets::util::slot; +use crate::jets::Result; +use crate::noun::{Noun, D, T}; +use crate::site::{site_slam, Site}; + +crate::gdb!(); + +fn by_rep(context: &mut Context, tree: Noun, site: &Site, out: &mut Noun) { + if unsafe { tree.raw_equals(D(0)) } { + } else if let Ok(node) = slot(tree, 2) { + let acc = T(&mut context.stack, &[node, *out]); + *out = site_slam(context, site, acc); + + if let Ok(left) = slot(tree, 6) { + by_rep(context, left, site, out); + } + + if let Ok(rite) = slot(tree, 7) { + by_rep(context, rite, site, out); + } + } +} + +pub fn jet_by_rep(context: &mut Context, subject: Noun) -> Result { + let tree = slot(subject, 30)?; + let mut gate = slot(subject, 6)?; + let mut pro = slot(gate, 13)?; + + let site = Site::new(context, &mut gate); + by_rep(context, tree, &site, &mut pro); + Ok(pro) +} diff --git a/rust/ares/src/lib.rs b/rust/ares/src/lib.rs index e7c37ec9..07563d78 100644 --- a/rust/ares/src/lib.rs +++ b/rust/ares/src/lib.rs @@ -13,6 +13,7 @@ pub mod mug; pub mod newt; pub mod noun; pub mod serf; +pub mod site; //pub mod bytecode; pub mod persist; pub mod serialization; diff --git a/rust/ares/src/site.rs b/rust/ares/src/site.rs new file mode 100644 index 00000000..3daa9a43 --- /dev/null +++ b/rust/ares/src/site.rs @@ -0,0 +1,70 @@ +/** Call site of a kick (Nock 9), used to cache call targets. */ +use bitvec::order::Lsb0; +use bitvec::slice::BitSlice; + +use crate::interpreter::{interpret, Context}; +use crate::jets::util::slot; +use crate::jets::Jet; +use crate::noun::{Noun, D, T}; + +pub struct Site { + pub battery: Noun, // battery + pub context: Noun, // context + pub jet: Option, // jet driver + pub path: Noun, // label +} + +impl Site { + /// Prepare a locally cached gate to call repeatedly. + pub fn new(ctx: &mut Context, core: &mut Noun) -> Site { + let mut battery = slot(*core, 2).unwrap(); + let context = slot(*core, 7).unwrap(); + + let warm_result = ctx + .warm + .find_jet(&mut ctx.stack, core, &mut battery) + .filter(|(_jet, mut path)| { + // check that 7 is a prefix of the parent battery axis, + // to ensure that the sample (axis 6) is not part of the jet match. + // + // XX TODO this check is pessimized since there could be multiple ways to match the + // jet and we only actually match one of them, but we check all of them and run + // unjetted if any have an axis outside 7. + let axis_7_bits: &BitSlice = BitSlice::from_element(&7u64); + let batteries_list = ctx.cold.find(&mut ctx.stack, &mut path); + let mut ret = true; + for mut batteries in batteries_list { + if let Some((_battery, parent_axis)) = batteries.next() { + let parent_axis_prefix_bits = &parent_axis.as_bitslice()[0..3]; + if parent_axis_prefix_bits == axis_7_bits { + continue; + } else { + ret = false; + break; + } + } else { + ret = false; + break; + } + } + ret + }); + Site { + battery, + context, + jet: warm_result.map(|(jet, _)| jet), + path: warm_result.map(|(_, path)| path).unwrap_or(D(0)), + } + } +} + +/// Slam a cached call site. +pub fn site_slam(ctx: &mut Context, site: &Site, sample: Noun) -> Noun { + let subject = T(&mut ctx.stack, &[site.battery, sample, site.context]); + if site.jet.is_some() { + let jet = site.jet.unwrap(); + jet(ctx, subject).unwrap() + } else { + interpret(ctx, subject, site.battery).unwrap() + } +} diff --git a/rust/ares_pma/c-src/btest.c b/rust/ares_pma/c-src/btest.c index 1d13040d..eed097f3 100644 --- a/rust/ares_pma/c-src/btest.c +++ b/rust/ares_pma/c-src/btest.c @@ -318,7 +318,7 @@ int main(int argc, char *argv[]) assert(SUCC(bt_state_open(state4, "./pmatest4", 0, 0644))); assert(state4->file_size_p == PMA_INITIAL_SIZE_p + PMA_GROW_SIZE_p * 2); - assert(state4->flist->next->hi == state4->file_size_p); + assert(state4->flist->hi == state4->file_size_p); for (size_t i = 0; i < PMA_GROW_SIZE_b * 2; i++) assert(t4a_copy[i] == t4a[i]); diff --git a/rust/ares_pma/c-src/btree.c b/rust/ares_pma/c-src/btree.c index 73588667..a314e288 100644 --- a/rust/ares_pma/c-src/btree.c +++ b/rust/ares_pma/c-src/btree.c @@ -1689,7 +1689,6 @@ _flist_grow(BT_state *state, size_t pages) static int _flist_new(BT_state *state, size_t size_p) #define FLIST_PG_START (BT_META_SECTION_WIDTH / BT_PAGESIZE) -/* #define FLIST_PG_START ((BT_META_SECTION_WIDTH + BLK_BASE_LEN0_b) / BT_PAGESIZE) */ { BT_flistnode *head = calloc(1, sizeof *head); head->next = 0; @@ -2407,6 +2406,22 @@ _freelist_restore2(BT_state *state, BT_page *node, } } +static void +_flist_restore_partitions(BT_state *state) +{ + BT_meta *meta = state->meta_pages[state->which]; + assert(meta->blk_base[0] == BT_NUMMETAS); + + for (size_t i = 0 + ; i < BT_NUMPARTS && meta->blk_base[i] != 0 + ; i++) { + pgno_t partoff_p = meta->blk_base[i]; + size_t partlen_p = BLK_BASE_LENS_b[i] / BT_PAGESIZE; + + _flist_record_alloc(state, partoff_p, partoff_p + partlen_p); + } +} + static void _freelist_restore(BT_state *state) /* restores the mlist, nlist, and mlist */ @@ -2416,7 +2431,11 @@ _freelist_restore(BT_state *state) assert(SUCC(_flist_new(state, state->file_size_p))); assert(SUCC(_nlist_load(state))); assert(SUCC(_mlist_new(state))); - /* first record root's allocation */ + + /* record node partitions in flist */ + _flist_restore_partitions(state); + + /* record root's allocation and then handle subtree */ _nlist_record_alloc(state, root); _freelist_restore2(state, root, 1, meta->depth); } @@ -2515,7 +2534,6 @@ _bt_state_load(BT_state *state) /* map the node segment */ _bt_state_map_node_segment(state); - /* new db, so populate metadata */ if (new) { assert(SUCC(_flist_new(state, PMA_GROW_SIZE_p))); assert(SUCC(_nlist_new(state)));