Skip to content

Commit

Permalink
refactor/cleanup: replace &vec![..] with &[..] where possible, remove…
Browse files Browse the repository at this point in the history
… redundant clone [SKIP_CHANGELOG]
  • Loading branch information
alexandermorozov committed Mar 20, 2016
1 parent 4a21001 commit c57172b
Show file tree
Hide file tree
Showing 8 changed files with 30 additions and 30 deletions.
2 changes: 1 addition & 1 deletion Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ keywords = ["deep-learning", "neural-networks", "machine-learning", "framework"]
license = "MIT OR Apache-2.0"

[dependencies]
collenchyma = { version = "0.0.8", default-features = false, features = ["native"] } # native feature to read/write data into tensors
collenchyma = { version = "0.0.9", default-features = false, features = ["native"] } # native feature to read/write data into tensors
collenchyma-blas = { version = "0.2.0", default-features = false, features = ["native"] } # only compiles with native feature
collenchyma-nn = { version = "0.3.2", default-features = false }

Expand Down
14 changes: 7 additions & 7 deletions benches/network_benches.rs
Original file line number Diff line number Diff line change
Expand Up @@ -72,8 +72,8 @@ mod cuda {
fn bench_mnsit_forward_1(b: &mut Bencher) {
let mut cfg = SequentialConfig::default();
// set up input
cfg.add_input("in", &vec![1, 30, 30]);
cfg.add_input("label", &vec![1, 1, 10]);
cfg.add_input("in", &[1, 30, 30]);
cfg.add_input("label", &[1, 1, 10]);
// set up sigmoid
let mut sig_cfg = LayerConfig::new("sig", LayerType::Sigmoid);
sig_cfg.add_input("in");
Expand All @@ -96,7 +96,7 @@ mod cuda {
backend.clone(), &LayerConfig::new("network", LayerType::Sequential(cfg)));

let _ = timeit_loops!(10, {
let inp = SharedTensor::<f32>::new(backend.device(), &vec![1, 30, 30]).unwrap();
let inp = SharedTensor::<f32>::new(backend.device(), &[1, 30, 30]).unwrap();
let inp_lock = Arc::new(RwLock::new(inp));

network.forward(&[inp_lock]);
Expand All @@ -120,7 +120,7 @@ mod cuda {
fn alexnet_forward(b: &mut Bencher) {
let mut cfg = SequentialConfig::default();
// Layer: data
cfg.add_input("data", &vec![128, 3, 224, 224]);
cfg.add_input("data", &[128, 3, 224, 224]);
// Layer: conv1
let conv1_layer_cfg = ConvolutionConfig {
num_output: 64,
Expand Down Expand Up @@ -260,7 +260,7 @@ mod cuda {

let func = || {
let forward_time = timeit_loops!(1, {
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 112, 112]).unwrap();
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 112, 112]).unwrap();

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock]);
Expand All @@ -277,7 +277,7 @@ mod cuda {
// let _ = env_logger::init();
let mut cfg = SequentialConfig::default();
// Layer: data
cfg.add_input("data", &vec![128, 3, 112, 112]);
cfg.add_input("data", &[128, 3, 112, 112]);
// Layer: conv1
let conv1_layer_cfg = ConvolutionConfig {
num_output: 32,
Expand Down Expand Up @@ -416,7 +416,7 @@ mod cuda {
backend.clone(), &LayerConfig::new("network", LayerType::Sequential(cfg)));

let mut func = || {
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 112, 112]).unwrap();
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 112, 112]).unwrap();

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock]);
Expand Down
12 changes: 6 additions & 6 deletions examples/benchmarks.rs
Original file line number Diff line number Diff line change
Expand Up @@ -120,7 +120,7 @@ fn bench_alexnet() {
#[cfg(all(feature="cuda", not(feature="native")))]
fn bench_alexnet() {
let mut cfg = SequentialConfig::default();
cfg.add_input("data", &vec![128, 3, 224, 224]);
cfg.add_input("data", &[128, 3, 224, 224]);

let conv1_layer_cfg = ConvolutionConfig { num_output: 64, filter_shape: vec![11], padding: vec![2], stride: vec![4] };
cfg.add_layer(LayerConfig::new("conv1", conv1_layer_cfg));
Expand Down Expand Up @@ -160,7 +160,7 @@ fn bench_alexnet() {
let func = || {
let forward_time = timeit_loops!(1, {
{
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 224, 224]).unwrap();
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 224, 224]).unwrap();

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock.clone()]);
Expand Down Expand Up @@ -202,7 +202,7 @@ fn bench_overfeat() {
#[cfg(all(feature="cuda", not(feature="native")))]
fn bench_overfeat() {
let mut cfg = SequentialConfig::default();
cfg.add_input("data", &vec![128, 3, 231, 231]);
cfg.add_input("data", &[128, 3, 231, 231]);

let conv1_layer_cfg = ConvolutionConfig { num_output: 96, filter_shape: vec![11], padding: vec![0], stride: vec![4] };
cfg.add_layer(LayerConfig::new("conv1", conv1_layer_cfg));
Expand Down Expand Up @@ -242,7 +242,7 @@ fn bench_overfeat() {
let func = || {
let forward_time = timeit_loops!(1, {
{
let inp = SharedTensor::<f32>::new(backend.device(), &vec![128, 3, 231, 231]).unwrap();
let inp = SharedTensor::<f32>::new(backend.device(), &[128, 3, 231, 231]).unwrap();

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock.clone()]);
Expand Down Expand Up @@ -284,7 +284,7 @@ fn bench_vgg_a() {
#[cfg(all(feature="cuda", not(feature="native")))]
fn bench_vgg_a() {
let mut cfg = SequentialConfig::default();
cfg.add_input("data", &vec![64, 3, 224, 224]);
cfg.add_input("data", &[64, 3, 224, 224]);

let conv1_layer_cfg = ConvolutionConfig { num_output: 64, filter_shape: vec![3], padding: vec![1], stride: vec![1] };
cfg.add_layer(LayerConfig::new("conv1", conv1_layer_cfg));
Expand Down Expand Up @@ -339,7 +339,7 @@ fn bench_vgg_a() {
let func = || {
let forward_time = timeit_loops!(1, {
{
let inp = SharedTensor::<f32>::new(backend.device(), &vec![64, 3, 224, 224]).unwrap();
let inp = SharedTensor::<f32>::new(backend.device(), &[64, 3, 224, 224]).unwrap();

let inp_lock = Arc::new(RwLock::new(inp));
network.forward(&[inp_lock.clone()]);
Expand Down
14 changes: 7 additions & 7 deletions src/layer.rs
Original file line number Diff line number Diff line change
Expand Up @@ -203,8 +203,8 @@ impl<B: IBackend> Layer<B> {
}

let backend: Rc<IBackend<F=B::F>> = self.backend.clone();
blob_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
blob_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
blob_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
blob_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
}
self.output_blob_names.push(blob_name.clone());
self.output_blobs_data.push(blob_data.clone());
Expand All @@ -227,8 +227,8 @@ impl<B: IBackend> Layer<B> {
info!("{} -> {}", self.name, blob_name);

let backend: Rc<IBackend<F=B::F>> = self.backend.clone();
let output_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
let output_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &vec![1,1,1]).unwrap())); // [1,1,1] for CUDA
let output_data = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
let output_gradient = Arc::new(RwLock::new(SharedTensor::new(backend.device(), &[1,1,1]).unwrap())); // [1,1,1] for CUDA
self.output_blobs_data.push(output_data);
self.output_blobs_gradient.push(output_gradient);
}
Expand Down Expand Up @@ -460,7 +460,7 @@ impl<B: IBackend> Layer<B> {

let forward_time = timeit_loops!(1, {
if self.is_using_in_place() {
self.worker.forward(&self.backend, &vec![], &self.weights_data, &mut self.output_blobs_data);
self.worker.forward(&self.backend, &[], &self.weights_data, &mut self.output_blobs_data);
} else {
self.worker.forward(&self.backend, &self.input_blobs_data, &self.weights_data, &mut self.output_blobs_data);
}
Expand Down Expand Up @@ -498,8 +498,8 @@ impl<B: IBackend> Layer<B> {
if self.is_using_in_place() {
self.worker.backward_input(&self.backend,
&self.weights_data,
&vec![],
&vec![],
&[],
&[],
&self.input_blobs_data,
&mut self.input_blobs_gradient)
} else {
Expand Down
6 changes: 3 additions & 3 deletions src/layers/common/convolution.rs
Original file line number Diff line number Diff line change
Expand Up @@ -252,12 +252,12 @@ mod tests {
stride: vec![4],
};
let layer = Convolution::<Backend<Cuda>>::from_config(&cfg);
let num_spatial_dims = layer.num_spatial_dims(&vec![1, 3, 224, 224]);
let num_spatial_dims = layer.num_spatial_dims(&[1, 3, 224, 224]);
assert_eq!(2, num_spatial_dims);
assert_eq!(vec![11, 11], layer.spatial_filter_dims(2));
assert_eq!(vec![2, 2], layer.padding_dims(2));
assert_eq!(vec![4, 4], layer.stride_dims(2));
assert_eq!(vec![64, 3, 11, 11], layer.calculate_filter_shape(&vec![1, 3, 224, 224]));
assert_eq!(vec![1, 64, 55, 55], layer.calculate_output_shape(&vec![1, 3, 224, 224]));
assert_eq!(vec![64, 3, 11, 11], layer.calculate_filter_shape(&[1, 3, 224, 224]));
assert_eq!(vec![1, 64, 55, 55], layer.calculate_output_shape(&[1, 3, 224, 224]));
}
}
2 changes: 1 addition & 1 deletion src/layers/common/sequential.rs
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ impl<B: IBackend + LayerOps<f32> + 'static> Sequential<B> {
pub fn from_config(backend: Rc<B>, config: &SequentialConfig) -> Sequential<B> {
let mut layer = Self::empty();

layer.init_layers(backend, &config.clone());
layer.init_layers(backend, config);

layer
}
Expand Down
2 changes: 1 addition & 1 deletion src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ pub fn write_batch_sample<T: NumCast + ::std::marker::Copy>(tensor: &mut SharedT
/// Create a Collenchyma SharedTensor for a scalar value.
pub fn native_scalar<T: NumCast + ::std::marker::Copy>(scalar: T) -> SharedTensor<T> {
let native = native_backend();
let mut shared_scalar = SharedTensor::<T>::new(native.device(), &vec![1]).unwrap();
let mut shared_scalar = SharedTensor::<T>::new(native.device(), &1).unwrap();
write_to_memory(shared_scalar.get_mut(native.device()).unwrap(), &[scalar]);

shared_scalar
Expand Down
8 changes: 4 additions & 4 deletions tests/layer_specs.rs
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ mod layer_spec {
#[test]
fn can_create_single_layer_sequential_layer() {
let mut model = SequentialConfig::default();
model.add_input("data", &vec![28, 28]);
model.add_input("data", &[28, 28]);
model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));

Layer::from_config(cuda_backend(), &LayerConfig::new("model", LayerType::Sequential(model)));
Expand All @@ -69,7 +69,7 @@ mod layer_spec {
#[test]
fn can_create_simple_network_sequential_layer() {
let mut model = SequentialConfig::default();
model.add_input("data", &vec![1, 784]);
model.add_input("data", &[1, 784]);
model.add_layer(LayerConfig::new("linear1", LinearConfig { output_size: 1568 }));
model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));
model.add_layer(LayerConfig::new("linear2", LinearConfig { output_size: 10 }));
Expand All @@ -83,12 +83,12 @@ mod layer_spec {
let cuda_backend = cuda_backend();

let mut normal_model = SequentialConfig::default();
normal_model.add_input("data", &vec![3]);
normal_model.add_input("data", &[3]);
normal_model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));
let mut normal_network = Layer::from_config(cuda_backend.clone(), &LayerConfig::new("normal_model", LayerType::Sequential(normal_model)));

let mut reshape_model = SequentialConfig::default();
reshape_model.add_input("data", &vec![3]);
reshape_model.add_input("data", &[3]);
reshape_model.add_layer(LayerConfig::new("reshape", ReshapeConfig { shape: vec![1, 1, 3] }));
reshape_model.add_layer(LayerConfig::new("sigmoid", LayerType::Sigmoid));
let mut reshape_network = Layer::from_config(cuda_backend.clone(), &LayerConfig::new("reshape_model", LayerType::Sequential(reshape_model)));
Expand Down

0 comments on commit c57172b

Please sign in to comment.