Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We鈥檒l occasionally send you account related emails.

Already on GitHub? Sign in to your account

馃惛 #36

Merged
merged 9 commits into from May 19, 2018
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
5 changes: 1 addition & 4 deletions Cargo.toml
Expand Up @@ -7,11 +7,8 @@ authors = ["Niko Matsakis <niko@alum.mit.edu>"]
assert_cli = "0.5.4"

[dependencies]
abomonation = "0.5.0"
abomonation_derive = "0.3.0"
differential-dataflow = "0.5.0"
datafrog = { git = "https://github.com/frankmcsherry/datafrog", rev = "c963caead" }
failure = "0.1.1"
timely = "0.5.1"
fxhash = "0.2.1"
structopt = "0.2.8"
clap = "2.31.2"
Expand Down
9 changes: 3 additions & 6 deletions src/cli.rs
Expand Up @@ -12,8 +12,8 @@ arg_enum! {
#[derive(Debug, Clone, Copy)]
pub enum Algorithm {
Naive,
TimelyOpt,
LocationInsensitive,
DatafrogOpt,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

lol, nice

// LocationInsensitive,
}
}

Expand All @@ -35,8 +35,6 @@ pub struct Opt {
output_directory: Option<String>,
#[structopt(raw(required = "true"))]
fact_dirs: Vec<String>,
#[structopt(short = "w", long = "workers", default_value = "1")]
workers: u32,
}

pub fn main(opt: Opt) -> Result<(), Error> {
Expand All @@ -50,9 +48,8 @@ pub fn main(opt: Opt) -> Result<(), Error> {
let result: Result<(Duration, Output), Error> = do catch {
let verbose = opt.verbose | opt.stats;
let algorithm = opt.algorithm;
let workers = opt.workers;
let all_facts = tab_delim::load_tab_delimited_facts(tables, &Path::new(&facts_dir))?;
timed(|| Output::compute(&all_facts, algorithm, verbose, workers))
timed(|| Output::compute(&all_facts, algorithm, verbose))
};

match result {
Expand Down
6 changes: 2 additions & 4 deletions src/facts.rs
@@ -1,5 +1,3 @@
use abomonation_derive::Abomonation;

/// The "facts" which are the basis of the NLL borrow analysis.
#[derive(Clone, Default)]
crate struct AllFacts {
Expand All @@ -26,7 +24,7 @@ crate struct AllFacts {

macro_rules! index_type {
($t: ident) => {
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Abomonation, Debug, Hash)]
#[derive(Ord, PartialOrd, Eq, PartialEq, Clone, Copy, Debug, Hash)]
pub(crate) struct $t {
index: u32,
}
Expand All @@ -44,7 +42,7 @@ macro_rules! index_type {
}

impl $t {
fn index(self) -> usize {
pub(crate) fn index(self) -> usize {
self.into()
}
}
Expand Down
5 changes: 1 addition & 4 deletions src/lib.rs
Expand Up @@ -7,13 +7,10 @@

#![allow(dead_code)]

extern crate abomonation;
extern crate abomonation_derive;
extern crate differential_dataflow;
extern crate datafrog;
extern crate failure;
extern crate fxhash;
extern crate histo;
extern crate timely;
extern crate structopt;

#[macro_use]
Expand Down
294 changes: 294 additions & 0 deletions src/output/datafrog_opt.rs
@@ -0,0 +1,294 @@
// Copyright 2017 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.

use std::collections::{BTreeMap, BTreeSet};
use std::time::Instant;

use crate::facts::{AllFacts, Loan, Point, Region};
use crate::output::Output;

use datafrog::{Iteration, Relation};

pub(super) fn compute(dump_enabled: bool, mut all_facts: AllFacts) -> Output {
// Declare that each universal region is live at every point.
let all_points: BTreeSet<Point> = all_facts
.cfg_edge
.iter()
.map(|&(p, _)| p)
.chain(all_facts.cfg_edge.iter().map(|&(_, q)| q))
.collect();

for &r in &all_facts.universal_region {
for &p in &all_points {
all_facts.region_live_at.push((r, p));
}
}

let timer = Instant::now();

let mut result = Output::new(dump_enabled);

let borrow_live_at = {
// Create a new iteration context, ...
let mut iteration = Iteration::new();

// .. some variables and different indices for `subset`.
let subset = iteration.variable::<(Region, Region, Point)>("subset");
let subset_1 = iteration.variable_indistinct("subset_1");
let subset_2 = iteration.variable_indistinct("subset_2");
let subset_r1p = iteration.variable_indistinct("subset_r1p");
let subset_r2p = iteration.variable_indistinct("subset_r2p");
let subset_p = iteration.variable_indistinct("subset_p");

// temporaries as we perform a multi-way join, and more indices
let requires = iteration.variable::<(Region, Loan, Point)>("requires");
let requires_1 = iteration.variable("requires_1");
let requires_2 = iteration.variable("requires_2");
let requires_bp = iteration.variable("requires_bp");
let requires_rp = iteration.variable("requires_rp");

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I apologize for not noticing this before, but each of the requires_ variables can be variable_indistinct(). We just need requires itself (without a _) to be a variable() without _indistinct(). This has a solid perf benefit for me, taking the time from 9.43s to 7.99s.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Solid benefits for me as well, 10-15% 馃憤


let borrow_live_at = iteration.variable::<(Loan, Point)>("borrow_live_at");

let live_to_dead_regions = iteration.variable::<(Region, Region, Point, Point)>("live_to_dead_regions");
let live_to_dead_regions_1 = iteration.variable_indistinct("live_to_dead_regions_1");
let live_to_dead_regions_2 = iteration.variable_indistinct("live_to_dead_regions_2");
let live_to_dead_regions_p = iteration.variable_indistinct("live_to_dead_regions_p");
let live_to_dead_regions_r2pq = iteration.variable_indistinct("live_to_dead_regions_r2pq");

let dead_region_requires = iteration.variable::<(Region, Loan, Point, Point)>("dead_region_requires");
let dead_region_requires_1 = iteration.variable_indistinct("dead_region_requires_1");
let dead_region_requires_2 = iteration.variable_indistinct("dead_region_requires_2");
let dead_region_requires_rpq = iteration.variable_indistinct("dead_region_requires_rpq");

let dead_can_reach_origins = iteration.variable::<((Region, Point), Point)>("dead_can_reach_origins");
let dead_can_reach = iteration.variable::<(Region, Region, Point, Point)>("dead_can_reach");
let dead_can_reach_1 = iteration.variable_indistinct("dead_can_reach_1");
let dead_can_reach_r2q = iteration.variable_indistinct("dead_can_reach_r2q");

let dead_can_reach_live = iteration.variable::<((Region, Point, Point), Region)>("dead_can_reach_live");
let dead_can_reach_live_r1pq = iteration.variable_indistinct("dead_can_reach_live_r1pq");

// We need both relation and variable forms of this (for join and antijoin).
let region_live_at_rel = Relation::from(all_facts.region_live_at.iter().map(|&(r, p)| (r, p)));
let region_live_at_var = iteration.variable::<((Region, Point), ())>("region_live_at");

let cfg_edge_p = iteration.variable::<(Point, Point)>("cfg_edge_p");

let killed = all_facts.killed.into();

// load initial facts.
subset.insert(all_facts.outlives.into());
requires.insert(all_facts.borrow_region.into());
region_live_at_var.insert(Relation::from(all_facts.region_live_at.iter().map(|&(r, p)| ((r, p), ()))));
cfg_edge_p.insert(all_facts.cfg_edge.into());

// .. and then start iterating rules!
while iteration.changed() {
// remap fields to re-index by the different keys
subset_r1p.from_map(&subset, |&(r1,r2,p)| ((r1,p),r2));
subset_r2p.from_map(&subset, |&(r1,r2,p)| ((r2,p),r1));
subset_p.from_map(&subset, |&(r1,r2,p)| (p,(r1,r2)));

requires_bp.from_map(&requires, |&(r,b,p)| ((b,p),r));
requires_rp.from_map(&requires, |&(r,b,p)| ((r,p),b));

live_to_dead_regions_p.from_map(&live_to_dead_regions, |&(r1,r2,p,q)| (p, (r1,r2,q)));
live_to_dead_regions_r2pq.from_map(&live_to_dead_regions, |&(r1,r2,p,q)| ((r2,p,q),r1));

dead_can_reach_r2q.from_map(&dead_can_reach, |&(r1,r2,p,q)| ((r2,q),(r1,p)));
dead_can_reach_live_r1pq.from_map(&dead_can_reach_live, |&((r1,p,q),r2)| ((r1,p,q),r2));
dead_region_requires_rpq.from_map(&dead_region_requires, |&(r,b,p,q)| ((r,p,q),b));

// it's now time ... to datafrog:

// .decl subset(R1, R2, P)
//
// At the point P, R1 <= R2.
//
// subset(R1, R2, P) :- outlives(R1, R2, P).
// -> already loaded; outlives is a static input.

// .decl requires(R, B, P) -- at the point, things with region R
// may depend on data from borrow B
//
// requires(R, B, P) :- borrow_region(R, B, P).
// -> already loaded; borrow_region is a static input.

// .decl live_to_dead_regions(R1, R2, P, Q)
//
// The regions `R1` and `R2` are "live to dead"
// on the edge `P -> Q` if:
//
// - In P, `R1` <= `R2`
// - In Q, `R1` is live but `R2` is dead.
//
// In that case, `Q` would like to add all the
// live things reachable from `R2` to `R1`.
//
// live_to_dead_regions(R1, R2, P, Q) :-
// subset(R1, R2, P),
// cfg_edge(P, Q),
// region_live_at(R1, Q),
// !region_live_at(R2, Q).
live_to_dead_regions_1.from_join(&subset_p, &cfg_edge_p, |&p, &(r1,r2), &q| ((r1,q),(r2,p)));
live_to_dead_regions_2.from_join(&live_to_dead_regions_1, &region_live_at_var, |&(r1,q), &(r2,p), &()| ((r2,q),(r1,p)));
live_to_dead_regions.from_antijoin(&live_to_dead_regions_2, &region_live_at_rel, |&(r2,q), &(r1,p)| (r1, r2, p, q));

// .decl dead_region_requires(R, B, P, Q)
//
// The region `R` requires the borrow `B`, but the
// region `R` goes dead along the edge `P -> Q`
//
// dead_region_requires(R, B, P, Q) :-
// requires(R, B, P),
// !killed(B, P),
// cfg_edge(P, Q),
// !region_live_at(R, Q).
dead_region_requires_1.from_antijoin(&requires_bp, &killed, |&(b,p),&r| (p,(b,r)));
dead_region_requires_2.from_join(&dead_region_requires_1, &cfg_edge_p, |&p,&(b,r),&q| ((r,q),(b,p)));
dead_region_requires.from_antijoin(&dead_region_requires_2, &region_live_at_rel, |&(r,q),&(b,p)| (r, b, p, q));

// .decl dead_can_reach_origins(R, P, Q)
//
// Contains dead regions where we are interested
// in computing the transitive closure of things they
// can reach.
dead_can_reach_origins.from_map(&live_to_dead_regions, |&(_r1, r2, p, q)| ((r2, p), q));
dead_can_reach_origins.from_map(&dead_region_requires, |&(r, _b, p, q)| ((r, p), q));

// .decl dead_can_reach(R1, R2, P, Q)
//
// Indicates that the region `R1`, which is dead
// in `Q`, can reach the region `R2` in P.
//
// This is effectively the transitive subset
// relation, but we try to limit it to regions
// that are dying on the edge P -> Q.
//
// dead_can_reach(R1, R2, P, Q) :-
// dead_can_reach_origins(R1, P, Q),
// subset(R1, R2, P).
dead_can_reach.from_join(&dead_can_reach_origins, &subset_r1p, |&(r1,p),&q,&r2| (r1,r2,p,q));

// dead_can_reach(R1, R3, P, Q) :-
// dead_can_reach(R1, R2, P, Q),
// !region_live_at(R2, Q),
// subset(R2, R3, P).
//
// This is the "transitive closure" rule, but
// note that we only apply it with the
// "intermediate" region R2 is dead at Q.
dead_can_reach_1.from_antijoin(&dead_can_reach_r2q, &region_live_at_rel, |&(r2,q),&(r1,p)| ((r2,p),(r1,q)));
dead_can_reach.from_join(&dead_can_reach_1, &subset_r1p, |&(_r2,p),&(r1,q),&r3| (r1,r3,p,q));

// .decl dead_can_reach_live(R1, R2, P, Q)
//
// Indicates that, along the edge `P -> Q`, the
// dead (in Q) region R1 can reach the live (in Q)
// region R2 via a subset relation. This is a
// subset of the full `dead_can_reach` relation
// where we filter down to those cases where R2 is
// live in Q.
dead_can_reach_live.from_join(&dead_can_reach_r2q, &region_live_at_var, |&(r2,q),&(r1,p),&()| ((r1,p,q),r2));

// subset(R1, R2, Q) :-
// subset(R1, R2, P) :-
// cfg_edge(P, Q),
// region_live_at(R1, Q),
// region_live_at(R2, Q).
//
// Carry `R1 <= R2` from P into Q if both `R1` and
// `R2` are live in Q.
subset_1.from_join(&subset_p, &cfg_edge_p, |&_p,&(r1,r2),&q| ((r1,q),r2));
subset_2.from_join(&subset_1, &region_live_at_var, |&(r1,q),&r2,&()| ((r2,q),r1));
subset.from_join(&subset_2, &region_live_at_var, |&(r2,q),&r1,&()| (r1,r2,q));

// subset(R1, R3, Q) :-
// live_to_dead_regions(R1, R2, P, Q),
// dead_can_reach_live(R2, R3, P, Q).
subset.from_join(&live_to_dead_regions_r2pq, &dead_can_reach_live_r1pq, |&(_r2,_p,q),&r1,&r3| (r1,r3,q));

// requires(R2, B, Q) :-
// dead_region_requires(R1, B, P, Q),
// dead_can_reach_live(R1, R2, P, Q).
//
// Communicate a `R1 requires B` relation across
// an edge `P -> Q` where `R1` is dead in Q; in
// that case, for each region `R2` live in `Q`
// where `R1 <= R2` in P, we add `R2 requires B`
// to `Q`.
requires.from_join(&dead_region_requires_rpq, &dead_can_reach_live_r1pq, |&(_r1,_p,q),&b,&r2| (r2,b,q));

// requires(R, B, Q) :-
// requires(R, B, P),
// !killed(B, P),
// cfg_edge(P, Q),
// region_live_at(R, Q).
requires_1.from_antijoin(&requires_bp, &killed, |&(b,p),&r| (p,(r,b)));
requires_2.from_join(&requires_1, &cfg_edge_p, |&_p, &(r,b), &q| ((r,q),b));
requires.from_join(&requires_2, &region_live_at_var, |&(r,q),&b,&()| (r,b,q));

// .decl borrow_live_at(B, P) -- true if the restrictions of the borrow B
// need to be enforced at the point P
//
// borrow_live_at(B, P) :- requires(R, B, P), region_live_at(R, P)
borrow_live_at.from_join(&requires_rp, &region_live_at_var, |&(_r,p), &b, &()| (b, p));
}

if dump_enabled {
for (region, location) in &region_live_at_rel.elements {
result
.region_live_at
.entry(*location)
.or_insert(vec![])
.push(*region);
}

let subset = subset.complete();
for (r1, r2, location) in &subset.elements {
result
.subset
.entry(*location)
.or_insert(BTreeMap::new())
.entry(*r1)
.or_insert(BTreeSet::new())
.insert(*r2);
result.region_degrees.update_degrees(*r1, *r2, *location);
}

let requires = requires.complete();
for (region, borrow, location) in &requires.elements {
result
.restricts
.entry(*location)
.or_insert(BTreeMap::new())
.entry(*region)
.or_insert(BTreeSet::new())
.insert(*borrow);
}
}

borrow_live_at.complete()
};

if dump_enabled {
println!("borrow_live_at is complete: {} tuples, {:?}", borrow_live_at.len(), timer.elapsed());
}

for (borrow, location) in &borrow_live_at.elements {
result
.borrow_live_at
.entry(*location)
.or_insert(Vec::new())
.push(*borrow);
}

result
}