Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

diff: minor cleanup of tokenizer functions #3919

Merged
merged 2 commits into from
Jun 19, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cli/src/diff_util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -726,7 +726,7 @@ fn unified_diff_hunks<'content>(
lines: vec![],
};
let mut show_context_after = false;
let diff = Diff::for_tokenizer(&[left_content, right_content], &diff::find_line_ranges);
let diff = Diff::for_tokenizer(&[left_content, right_content], diff::find_line_ranges);
for hunk in diff.hunks() {
match hunk {
DiffHunk::Matching(content) => {
Expand Down
2 changes: 1 addition & 1 deletion cli/src/merge_tools/builtin.rs
Original file line number Diff line number Diff line change
Expand Up @@ -227,7 +227,7 @@ fn make_diff_sections(
) -> Result<Vec<scm_record::Section<'static>>, BuiltinToolError> {
let diff = Diff::for_tokenizer(
&[left_contents.as_bytes(), right_contents.as_bytes()],
&find_line_ranges,
find_line_ranges,
);
let mut sections = Vec::new();
for hunk in diff.hunks() {
Expand Down
4 changes: 2 additions & 2 deletions lib/src/conflicts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -259,15 +259,15 @@ pub fn materialize_merge_result(
output.write_all(&left.0)?;
continue;
};
let diff1 = Diff::for_tokenizer(&[&left.0, &right1.0], &find_line_ranges)
let diff1 = Diff::for_tokenizer(&[&left.0, &right1.0], find_line_ranges)
.hunks()
.collect_vec();
// Check if the diff against the next positive term is better. Since
// we want to preserve the order of the terms, we don't match against
// any later positive terms.
if let Some(right2) = hunk.get_add(add_index + 1) {
let diff2 =
Diff::for_tokenizer(&[&left.0, &right2.0], &find_line_ranges)
Diff::for_tokenizer(&[&left.0, &right2.0], find_line_ranges)
.hunks()
.collect_vec();
if diff_size(&diff2) < diff_size(&diff1) {
Expand Down
51 changes: 19 additions & 32 deletions lib/src/diff.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,23 +23,13 @@ use std::slice;
use itertools::Itertools;

pub fn find_line_ranges(text: &[u8]) -> Vec<Range<usize>> {
let mut ranges = vec![];
let mut start = 0;
loop {
match text[start..].iter().position(|b| *b == b'\n') {
None => {
break;
}
Some(i) => {
ranges.push(start..start + i + 1);
start += i + 1;
}
}
}
if start < text.len() {
ranges.push(start..text.len());
}
ranges
text.split_inclusive(|b| *b == b'\n')
.scan(0, |total, line| {
let start = *total;
*total += line.len();
Some(start..*total)
})
.collect()
}

fn is_word_byte(b: u8) -> bool {
Expand Down Expand Up @@ -73,13 +63,10 @@ pub fn find_word_ranges(text: &[u8]) -> Vec<Range<usize>> {
}

pub fn find_nonword_ranges(text: &[u8]) -> Vec<Range<usize>> {
let mut ranges = vec![];
for (i, b) in text.iter().enumerate() {
if !is_word_byte(*b) {
ranges.push(i..i + 1);
}
}
ranges
text.iter()
.positions(|b| !is_word_byte(*b))
.map(|i| i..i + 1)
.collect()
}

struct Histogram<'a> {
Expand Down Expand Up @@ -395,7 +382,7 @@ fn intersect_regions(
impl<'input> Diff<'input> {
pub fn for_tokenizer(
inputs: &[&'input [u8]],
tokenizer: &impl Fn(&[u8]) -> Vec<Range<usize>>,
tokenizer: impl Fn(&[u8]) -> Vec<Range<usize>>,
) -> Self {
assert!(!inputs.is_empty());
let base_input = inputs[0];
Expand Down Expand Up @@ -444,7 +431,7 @@ impl<'input> Diff<'input> {
}

pub fn unrefined(inputs: &[&'input [u8]]) -> Self {
Diff::for_tokenizer(inputs, &|_| vec![])
Diff::for_tokenizer(inputs, |_| vec![])
}

// TODO: At least when merging, it's wasteful to refine the diff if e.g. if 2
Expand All @@ -454,9 +441,9 @@ impl<'input> Diff<'input> {
// probably mean that many callers repeat the same code. Perhaps it
// should be possible to refine a whole diff *or* individual hunks.
pub fn default_refinement(inputs: &[&'input [u8]]) -> Self {
let mut diff = Diff::for_tokenizer(inputs, &find_line_ranges);
diff.refine_changed_regions(&find_word_ranges);
diff.refine_changed_regions(&find_nonword_ranges);
let mut diff = Diff::for_tokenizer(inputs, find_line_ranges);
diff.refine_changed_regions(find_word_ranges);
diff.refine_changed_regions(find_nonword_ranges);
diff
}

Expand All @@ -475,7 +462,7 @@ impl<'input> Diff<'input> {

/// Uses the given tokenizer to split the changed regions into smaller
/// regions. Then tries to finds unchanged regions among them.
pub fn refine_changed_regions(&mut self, tokenizer: &impl Fn(&[u8]) -> Vec<Range<usize>>) {
pub fn refine_changed_regions(&mut self, tokenizer: impl Fn(&[u8]) -> Vec<Range<usize>>) {
let mut previous = UnchangedRange {
base_range: 0..0,
offsets: vec![0; self.other_inputs.len()],
Expand All @@ -493,7 +480,7 @@ impl<'input> Diff<'input> {
slices.push(&self.other_inputs[i][changed_range]);
}

let refined_diff = Diff::for_tokenizer(&slices, tokenizer);
let refined_diff = Diff::for_tokenizer(&slices, &tokenizer);

for UnchangedRange {
base_range,
Expand Down Expand Up @@ -931,7 +918,7 @@ mod tests {
// Tests that unchanged regions are compacted when using for_tokenizer()
let diff = Diff::for_tokenizer(
&[b"a\nb\nc\nd\ne\nf\ng", b"a\nb\nc\nX\ne\nf\ng"],
&find_line_ranges,
find_line_ranges,
);
assert_eq!(
diff.hunks().collect_vec(),
Expand Down
2 changes: 1 addition & 1 deletion lib/src/files.rs
Original file line number Diff line number Diff line change
Expand Up @@ -165,7 +165,7 @@ pub fn merge(slices: &Merge<&[u8]>) -> MergeResult {
let num_diffs = slices.removes().len();
let diff_inputs = slices.removes().chain(slices.adds()).copied().collect_vec();

let diff = Diff::for_tokenizer(&diff_inputs, &diff::find_line_ranges);
let diff = Diff::for_tokenizer(&diff_inputs, diff::find_line_ranges);
let mut resolved_hunk = ContentHunk(vec![]);
let mut merge_hunks: Vec<Merge<ContentHunk>> = vec![];
for diff_hunk in diff.hunks() {
Expand Down