Skip to content

Commit

Permalink
Migrate to latest Rust version
Browse files Browse the repository at this point in the history
  • Loading branch information
kaisellgren committed May 5, 2016
1 parent faf68ab commit 3328c47
Show file tree
Hide file tree
Showing 12 changed files with 96 additions and 104 deletions.
5 changes: 5 additions & 0 deletions Cargo.toml
Expand Up @@ -9,3 +9,8 @@ authors = ["Kai Sellgren <kaisellgren@gmail.com>"]

name = "comp_sci"
path = "src/lib.rs"

[dependencies]
bit-set = "0.4.0"
rand = "0.3.14"
rustc-serialize = "0.3.19"
4 changes: 2 additions & 2 deletions src/algorithms/insertion_sort.rs
Expand Up @@ -6,11 +6,11 @@
/// than `k` places away from its sorted position).
///
/// Insertion sort can also sort sets as it receives them.
pub fn insertion_sort<'a, A: Ord + 'a>(data: &'a mut [A]) {
pub fn insertion_sort<A: Ord>(data: &mut [A]) {
match data.len() {
0 | 1 => (),
size => {
for i in range(1, size) {
for i in 1 .. size {
let mut x = i;
while x > 0 && &data[x - 1] > &data[x] {
data.swap(x, x - 1);
Expand Down
6 changes: 3 additions & 3 deletions src/algorithms/merge_sort.rs
Expand Up @@ -9,7 +9,7 @@ pub fn merge_sort<A: Ord + Clone>(data: &[A]) -> Vec<A> {
1 => vec![data[0].clone()],
size => {
let middle = size / 2;
conquer(divide(data.slice_to(middle)), divide(data.slice_from(middle)))
conquer(divide(&data[..middle]), divide(&data[middle..]))
}
}
}
Expand All @@ -33,8 +33,8 @@ pub fn merge_sort<A: Ord + Clone>(data: &[A]) -> Vec<A> {
}
}

result.push_all(left.slice_from(left_index));
result.push_all(right.slice_from(right_index));
result.extend_from_slice(&left[left_index..]);
result.extend_from_slice(&right[right_index..]);

result
}
Expand Down
20 changes: 10 additions & 10 deletions src/algorithms/murmur.rs
Expand Up @@ -17,17 +17,17 @@ pub fn murmur3_32_seed(data: &[u8], seed: u32) -> u32 {
let length = data.len() as u32;

let n_blocks = length / 4;
for i in range(0, n_blocks) {
let mut k = get_u32(data.slice_from((i * 4) as usize));
k *= C1;
for i in 0 .. n_blocks {
let mut k = get_u32(&data[(i * 4) as usize..]);
k = k.wrapping_mul(C1);
k = (k << R1) | (k >> (32 - R1));
k *= C2;
k = k.wrapping_mul(C2);

hash ^= k;
hash = ((hash << R2) | (hash >> (32 - R2))) * M + N;
hash = ((hash << R2) | (hash >> (32 - R2))).wrapping_mul(M).wrapping_add(N);
}

let tail = data.slice_from((n_blocks * 4) as usize);
let tail = &data[(n_blocks * 4) as usize..];
let remainder = length & 3;
let mut k1 = 0u32;

Expand All @@ -42,17 +42,17 @@ pub fn murmur3_32_seed(data: &[u8], seed: u32) -> u32 {
if remainder >= 1 {
k1 ^= tail[0] as u32;

k1 *= C1;
k1 = k1.wrapping_mul(C1);
k1 = (k1 << R1) | (k1 >> (32 - R1));
k1 *= C2;
k1 = k1.wrapping_mul(C2);
hash ^= k1;
}

hash ^= length;
hash ^= hash >> 16;
hash *= 0x85ebca6b;
hash = hash.wrapping_mul(0x85ebca6b);
hash ^= hash >> 13;
hash *= 0xc2b2ae35;
hash = hash.wrapping_mul(0xc2b2ae35);
hash ^= hash >> 16;

hash
Expand Down
4 changes: 2 additions & 2 deletions src/algorithms/quick_sort.rs
Expand Up @@ -13,8 +13,8 @@ pub fn quick_sort<A: Ord>(data: &mut [A]) {
let pivot = find_pivot(data);
let pivot = partition(data, pivot);

qsort(data.slice_to_mut(pivot));
qsort(data.slice_from_mut(pivot + 1));
qsort(&mut data[..pivot]);
qsort(&mut data[pivot + 1..]);
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion src/algorithms/selection_sort.rs
Expand Up @@ -3,7 +3,7 @@
/// Selection sort is inefficient against large sets. It requires no additional memory.
///
/// The write performance of `O(n)` is better than that of e.g. insertion sort's `O(n^2)`.
pub fn selection_sort<'a, A: Ord + 'a>(data: &'a mut [A]) {
pub fn selection_sort<A: Ord>(data: &mut [A]) {
let (mut i, size) = (0, data.len());

while i < size {
Expand Down
72 changes: 35 additions & 37 deletions src/data_structures/array_list.rs
@@ -1,11 +1,12 @@
use data_structures::heap_array::HeapArray;
use core::raw::Slice as RawSlice;
use std::slice::from_raw_parts;
use std::slice::from_raw_parts_mut;
use std::ops::{Index, IndexMut};
use std::mem;
use std::cmp::max;
use std::iter::range_step;
use std::convert::AsRef;

static DEFAULT_CAPACITY: usize = 10us;
static DEFAULT_CAPACITY: usize = 10usize;

/// An implementation of a growable and mutable array type, which is allocated on the heap.
///
Expand Down Expand Up @@ -62,7 +63,7 @@ impl<A> ArrayList<A> {

self.ensure_enough_capacity();

for i in range(index, self.length) {
for i in index .. self.length {
self.elements.swap(self.length - i, self.length - i - 1);
}

Expand All @@ -80,20 +81,17 @@ impl<A> ArrayList<A> {
);
}

for i in range(index, self.length - 1) {
for i in index .. self.length - 1 {
self.elements.swap(i, i + 1);
}

self.length -= 1;
}

#[inline]
pub fn as_mut_slice<'a>(&'a mut self) -> &'a mut [A] {
pub fn as_mut_slice(&mut self) -> &mut [A] {
unsafe {
mem::transmute(RawSlice {
data: &self.elements[0],
len: self.length,
})
from_raw_parts_mut(&mut self.elements[0], self.length)
}
}

Expand All @@ -108,33 +106,33 @@ impl<A> ArrayList<A> {
}
}

impl<A> AsSlice<A> for ArrayList<A> {
#[inline]
fn as_slice<'a>(&'a self) -> &'a [A] {
impl<A> AsRef<[A]> for ArrayList<A> {
fn as_ref(&self) -> &[A] {
unsafe {
from_raw_parts(&self.elements[0], self.length)
}
}
}

impl<A> AsMut<[A]> for ArrayList<A> {
fn as_mut(&mut self) -> &mut [A] {
unsafe {
mem::transmute(RawSlice {
data: &self.elements[0],
len: self.length,
})
from_raw_parts_mut(&mut self.elements[0], self.length)
}
}
}

impl<A> Index<usize> for ArrayList<A> {
type Output = A;

#[inline]
fn index<'a>(&'a self, index: &usize) -> &'a A {
&self.as_slice()[*index]
fn index(&self, index: usize) -> &A {
&self.as_ref()[index]
}
}

impl<A> IndexMut<usize> for ArrayList<A> {
type Output = A;

#[inline]
fn index_mut<'a>(&'a mut self, index: &usize) -> &'a mut A {
&mut self.as_mut_slice()[*index]
fn index_mut(&mut self, index: usize) -> &mut A {
&mut self.as_mut()[index]
}
}

Expand All @@ -146,16 +144,16 @@ mod tests {
fn basic_tests() {
let mut a = ArrayList::with_capacity(2);

assert_eq!(2us, a.capacity());
assert_eq!(2usize, a.capacity());

a.push(0u8);
a.push(1u8);

assert_eq!(2us, a.capacity());
assert_eq!(2usize, a.capacity());

a.push(2u8);

assert_eq!(4us, a.capacity());
assert_eq!(4usize, a.capacity());
}

#[test]
Expand All @@ -170,11 +168,11 @@ mod tests {

a.remove_at(2);

assert_eq!([1u8, 2u8, 4u8, 5u8], a.as_slice());
assert_eq!([1u8, 2u8, 4u8, 5u8], a.as_ref());

a.remove_at(3);

assert_eq!([1u8, 2u8, 4u8], a.as_slice());
assert_eq!([1u8, 2u8, 4u8], a.as_ref());
}

#[test]
Expand All @@ -191,28 +189,28 @@ mod tests {

assert_eq!(5u8, a[1]);

assert_eq!(5us, a.capacity());
assert_eq!(2us, a.length());
assert_eq!(5usize, a.capacity());
assert_eq!(2usize, a.length());

a.insert(2, 1u8);

assert_eq!(5us, a.capacity());
assert_eq!(3us, a.length());
assert_eq!(5usize, a.capacity());
assert_eq!(3usize, a.length());

a.insert(3, 2u8);

assert_eq!([15u8, 5u8, 1u8, 2u8], a.as_slice());
assert_eq!([15u8, 5u8, 1u8, 2u8], a.as_ref());
}

#[test]
#[should_fail]
#[should_panic]
fn insert_out_of_bounds() {
let mut a = ArrayList::with_capacity(2);
a.insert(1, 0u8);
}

#[test]
#[should_fail]
#[should_panic]
fn remove_at_out_of_bounds() {
let mut a: ArrayList<u8> = ArrayList::with_capacity(2);
a.remove_at(0);
Expand Down
2 changes: 1 addition & 1 deletion src/data_structures/binary_heap.rs
Expand Up @@ -105,7 +105,7 @@ mod tests {
b.iter(|| {
let mut bh = BinaryHeap::new();

for i in range(0u32, 1_000) {
for i in 0u32 .. 1_001 {
bh.push(i);
}
})
Expand Down
17 changes: 8 additions & 9 deletions src/data_structures/bloom_filter.rs
@@ -1,10 +1,9 @@
use std::collections::BitvSet;
use std::num::Float;
use bit_set::BitSet;
use std::cmp::max;
use algorithms::murmur::murmur3_32_seed;

pub struct BloomFilter {
set: BitvSet,
set: BitSet,
expected_length: u32,
hash_count: u32,
}
Expand All @@ -16,12 +15,12 @@ impl BloomFilter {
/// expect to add. The latter is used to choose some optimal internal values to minimize the false-positive
/// rate (which can be estimated with expected_false_positive_rate()).
pub fn with_capacity(capacity: u32, expected_length: u32) -> BloomFilter {
let hash_count = ((capacity / expected_length) as f32 * 2.0.ln()).ceil() as u32;
let hash_count = (capacity / expected_length) as f32 * 2.0f32.ln();

BloomFilter {
set: BitvSet::with_capacity(capacity as usize),
set: BitSet::with_capacity(capacity as usize),
expected_length: expected_length,
hash_count: max(1, hash_count),
hash_count: max(1, hash_count.ceil() as u32),
}
}

Expand All @@ -34,7 +33,7 @@ impl BloomFilter {

/// Pushes a new value to the bloom filter.
pub fn push(&mut self, data: &[u8]) {
let mut hashes = range(0, self.hash_count).map(|i| murmur3_32_seed(data, i) as usize);
let hashes = (0 .. self.hash_count + 1).map(|i| murmur3_32_seed(data, i) as usize);

for hash in hashes {
self.set.insert(hash);
Expand All @@ -48,9 +47,9 @@ impl BloomFilter {

/// Returns false if the data was definitely not added to the bloom filter, and true if it may have been.
pub fn contains(&self, data: &[u8]) -> bool {
let hashes = range(0, self.hash_count).map(|i| murmur3_32_seed(data, i) as usize);
let mut hashes = (0 .. self.hash_count + 1).map(|i| murmur3_32_seed(data, i) as usize);

hashes.all(|hash| self.set.contains(&hash))
hashes.all(|h| self.set.contains(h))
}
}

Expand Down

0 comments on commit 3328c47

Please sign in to comment.