Skip to content

Commit

Permalink
chore/update: update to collenchyma with SharedTensor
Browse files Browse the repository at this point in the history
  • Loading branch information
hobofan committed Dec 11, 2015
1 parent 9530054 commit 631b2b6
Show file tree
Hide file tree
Showing 3 changed files with 71 additions and 71 deletions.
64 changes: 32 additions & 32 deletions src/helper.rs
Expand Up @@ -4,8 +4,8 @@
macro_rules! iblas_asum_for {
($t:ident, $b:ty) => (
fn asum(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
result: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
result: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match result.add_device(self.device()) { _ => () }
Expand All @@ -18,8 +18,8 @@ macro_rules! iblas_asum_for {
}

fn asum_plain(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
result: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
result: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
<$b as IOperationAsum<$t>>::compute(&self,
Expand All @@ -35,9 +35,9 @@ macro_rules! iblas_asum_for {
macro_rules! iblas_axpy_for {
($t:ident, $b:ty) => (
fn axpy(&self,
a: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
a: &mut ::collenchyma::tensor::SharedTensor<$t>,
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
match a.add_device(self.device()) { _ => try!(a.sync(self.device())) }
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
Expand All @@ -52,9 +52,9 @@ macro_rules! iblas_axpy_for {
}

fn axpy_plain(&self,
a: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
a: &mut ::collenchyma::tensor::SharedTensor<$t>,
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
<$b as IOperationAxpy<$t>>::compute(&self,
Expand All @@ -71,8 +71,8 @@ macro_rules! iblas_axpy_for {
macro_rules! iblas_copy_for {
($t:ident, $b:ty) => (
fn copy(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match y.add_device(self.device()) { _ => () }
Expand All @@ -85,8 +85,8 @@ macro_rules! iblas_copy_for {
}

fn copy_plain(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
<$b as IOperationCopy<$t>>::compute(&self,
Expand All @@ -102,9 +102,9 @@ macro_rules! iblas_copy_for {
macro_rules! iblas_dot_for {
($t:ident, $b:ty) => (
fn dot(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
result: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>,
result: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match y.add_device(self.device()) { _ => try!(y.sync(self.device())) }
Expand All @@ -119,9 +119,9 @@ macro_rules! iblas_dot_for {
}

fn dot_plain(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
result: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>,
result: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
<$b as IOperationDot<$t>>::compute(&self,
Expand All @@ -138,8 +138,8 @@ macro_rules! iblas_dot_for {
macro_rules! iblas_nrm2_for {
($t:ident, $b:ty) => (
fn nrm2(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
result: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
result: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match result.add_device(self.device()) { _ => () }
Expand All @@ -152,8 +152,8 @@ macro_rules! iblas_nrm2_for {
}

fn nrm2_plain(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
result: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
result: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
<$b as IOperationNrm2<$t>>::compute(&self,
Expand All @@ -169,8 +169,8 @@ macro_rules! iblas_nrm2_for {
macro_rules! iblas_scale_for {
($t:ident, $b:ty) => (
fn scale(&self,
a: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
a: &mut ::collenchyma::tensor::SharedTensor<$t>,
x: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
match a.add_device(self.device()) { _ => try!(a.sync(self.device())) }
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
Expand All @@ -183,8 +183,8 @@ macro_rules! iblas_scale_for {
}

fn scale_plain(&self,
a: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR0>,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
a: &mut ::collenchyma::tensor::SharedTensor<$t>,
x: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
<$b as IOperationScale<$t>>::compute(&self,
Expand All @@ -200,8 +200,8 @@ macro_rules! iblas_scale_for {
macro_rules! iblas_swap_for {
($t:ident, $b:ty) => (
fn swap(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match y.add_device(self.device()) { _ => try!(y.sync(self.device())) }
Expand All @@ -214,8 +214,8 @@ macro_rules! iblas_swap_for {
}

fn swap_plain(&self,
x: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>,
y: &mut ::collenchyma::shared_memory::SharedMemory<$t, ::collenchyma::shared_memory::TensorR1>
x: &mut ::collenchyma::tensor::SharedTensor<$t>,
y: &mut ::collenchyma::tensor::SharedTensor<$t>
) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
<$b as IOperationSwap<$t>>::compute(&self,
Expand Down
30 changes: 15 additions & 15 deletions src/library.rs
Expand Up @@ -4,7 +4,7 @@ use super::binary::IBlasBinary;
use super::operation::*;
use collenchyma::plugin::numeric_helpers::Float;
use collenchyma::binary::IBinary;
use collenchyma::shared_memory::{SharedMemory, TensorR0, TensorR1};
use collenchyma::tensor::SharedTensor;
use collenchyma::device::DeviceType;
use collenchyma::plugin::Error as LibError;

Expand All @@ -19,7 +19,7 @@ pub trait IBlas<F: Float> {
/// This is a Level 1 BLAS operation.
///
/// For a no-memory managed version see `asum_plain`.
fn asum(&self, x: &mut SharedMemory<F, TensorR1>, result: &mut SharedMemory<F, TensorR0>) -> Result<(), ::collenchyma::error::Error> {
fn asum(&self, x: &mut SharedTensor<F>, result: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match result.add_device(self.device()) { _ => () }
Ok(try!(
Expand All @@ -38,7 +38,7 @@ pub trait IBlas<F: Float> {
/// *Attention*:<br/>
/// For a correct computation result, you need to manage the memory allocation and synchronization yourself.<br/>
/// For a memory managed version see `asum`.
fn asum_plain(&self, x: &mut SharedMemory<F, TensorR1>, result: &mut SharedMemory<F, TensorR0>) -> Result<(), ::collenchyma::error::Error> {
fn asum_plain(&self, x: &mut SharedTensor<F>, result: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
self.binary().asum().compute(
try!(x.get(self.device()).ok_or(LibError::MissingMemoryForDevice("Unable to resolve memory for `x`"))),
Expand All @@ -53,7 +53,7 @@ pub trait IBlas<F: Float> {
/// This is a Level 1 BLAS operation.
///
/// For a no-memory managed version see `axpy_plain`.
fn axpy(&self, a: &mut SharedMemory<F, TensorR0>, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn axpy(&self, a: &mut SharedTensor<F>, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
match a.add_device(self.device()) { _ => try!(a.sync(self.device())) }
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match y.add_device(self.device()) { _ => try!(y.sync(self.device())) }
Expand All @@ -74,7 +74,7 @@ pub trait IBlas<F: Float> {
/// *Attention*:<br/>
/// For a correct computation result, you need to manage the memory allocation and synchronization yourself.<br/>
/// For a memory managed version see `axpy`.
fn axpy_plain(&self, a: &mut SharedMemory<F, TensorR0>, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn axpy_plain(&self, a: &mut SharedTensor<F>, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
self.binary().axpy().compute(
try!(a.get(self.device()).ok_or(LibError::MissingMemoryForDevice("Unable to resolve memory for `a`"))),
Expand All @@ -90,7 +90,7 @@ pub trait IBlas<F: Float> {
/// This is a Level 1 BLAS operation.
///
/// For a no-memory managed version see `copy_plain`.
fn copy(&self, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn copy(&self, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match y.add_device(self.device()) { _ => () }
Ok(try!(
Expand All @@ -109,7 +109,7 @@ pub trait IBlas<F: Float> {
/// *Attention*:<br/>
/// For a correct computation result, you need to manage the memory allocation and synchronization yourself.<br/>
/// For a memory managed version see `copy`.
fn copy_plain(&self, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn copy_plain(&self, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
self.binary().copy().compute(
try!(x.get(self.device()).ok_or(LibError::MissingMemoryForDevice("Unable to resolve memory for `x`"))),
Expand All @@ -125,7 +125,7 @@ pub trait IBlas<F: Float> {
/// This is a Level 1 BLAS operation.
///
/// For a no-memory managed version see `dot_plain`.
fn dot(&self, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>, result: &mut SharedMemory<F, TensorR0>) -> Result<(), ::collenchyma::error::Error> {
fn dot(&self, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>, result: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match y.add_device(self.device()) { _ => try!(y.sync(self.device())) }
match result.add_device(self.device()) { _ => () }
Expand All @@ -147,7 +147,7 @@ pub trait IBlas<F: Float> {
/// *Attention*:<br/>
/// For a correct computation result, you need to manage the memory allocation and synchronization yourself.<br/>
/// For a memory managed version see `dot`.
fn dot_plain(&self, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>, result: &mut SharedMemory<F, TensorR0>) -> Result<(), ::collenchyma::error::Error> {
fn dot_plain(&self, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>, result: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
self.binary().dot().compute(
try!(x.get(self.device()).ok_or(LibError::MissingMemoryForDevice("Unable to resolve memory for `x`"))),
Expand All @@ -163,7 +163,7 @@ pub trait IBlas<F: Float> {
/// This is a Level 1 BLAS operation.
///
/// For a no-memory managed version see `nrm2_plain`.
fn nrm2(&self, x: &mut SharedMemory<F, TensorR1>, result: &mut SharedMemory<F, TensorR0>) -> Result<(), ::collenchyma::error::Error> {
fn nrm2(&self, x: &mut SharedTensor<F>, result: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match result.add_device(self.device()) { _ => () }
Ok(try!(
Expand All @@ -182,7 +182,7 @@ pub trait IBlas<F: Float> {
/// *Attention*:<br/>
/// For a correct computation result, you need to manage the memory allocation and synchronization yourself.<br/>
/// For a memory managed version see `nrm2`.
fn nrm2_plain(&self, x: &mut SharedMemory<F, TensorR1>, result: &mut SharedMemory<F, TensorR0>) -> Result<(), ::collenchyma::error::Error> {
fn nrm2_plain(&self, x: &mut SharedTensor<F>, result: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
self.binary().nrm2().compute(
try!(x.get(self.device()).ok_or(LibError::MissingMemoryForDevice("Unable to resolve memory for `x`"))),
Expand All @@ -197,7 +197,7 @@ pub trait IBlas<F: Float> {
/// This is a Level 1 BLAS operation.
///
/// For a no-memory managed version see `scale_plain`.
fn scale(&self, a: &mut SharedMemory<F, TensorR0>, x: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn scale(&self, a: &mut SharedTensor<F>, x: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
match a.add_device(self.device()) { _ => try!(a.sync(self.device())) }
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
Ok(try!(
Expand All @@ -216,7 +216,7 @@ pub trait IBlas<F: Float> {
/// *Attention*:<br/>
/// For a correct computation result, you need to manage the memory allocation and synchronization yourself.<br/>
/// For a memory managed version see `scale`.
fn scale_plain(&self, a: &mut SharedMemory<F, TensorR0>, x: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn scale_plain(&self, a: &mut SharedTensor<F>, x: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
self.binary().scale().compute(
try!(a.get(self.device()).ok_or(LibError::MissingMemoryForDevice("Unable to resolve memory for `a`"))),
Expand All @@ -231,7 +231,7 @@ pub trait IBlas<F: Float> {
/// This is a Level 1 BLAS operation.
///
/// For a no-memory managed version see `swap_plain`.
fn swap(&self, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn swap(&self, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
match x.add_device(self.device()) { _ => try!(x.sync(self.device())) }
match y.add_device(self.device()) { _ => try!(y.sync(self.device())) }
Ok(try!(
Expand All @@ -250,7 +250,7 @@ pub trait IBlas<F: Float> {
/// *Attention*:<br/>
/// For a correct computation result, you need to manage the memory allocation and synchronization yourself.<br/>
/// For a memory managed version see `swap`.
fn swap_plain(&self, x: &mut SharedMemory<F, TensorR1>, y: &mut SharedMemory<F, TensorR1>) -> Result<(), ::collenchyma::error::Error> {
fn swap_plain(&self, x: &mut SharedTensor<F>, y: &mut SharedTensor<F>) -> Result<(), ::collenchyma::error::Error> {
Ok(try!(
self.binary().swap().compute(
try!(x.get_mut(self.device()).ok_or(LibError::MissingMemoryForDevice("Unable to resolve memory for `x`"))),
Expand Down

0 comments on commit 631b2b6

Please sign in to comment.