From 128250323879ac83fba4ee466df24ef6434dadb5 Mon Sep 17 00:00:00 2001 From: Eduardo Bart Date: Thu, 14 Sep 2017 15:35:34 -0300 Subject: [PATCH] In-place division/multiplication by scalar, closes #20 --- src/arraymancer/data_structure.nim | 8 ++++---- src/arraymancer/operators_blas_l1.nim | 10 +++++++++- src/arraymancer/ufunc.nim | 7 +++++++ tests/test_operators_blas.nim | 12 +++++++++++- 4 files changed, 31 insertions(+), 6 deletions(-) diff --git a/src/arraymancer/data_structure.nim b/src/arraymancer/data_structure.nim index 6fbbc4406..309a24333 100644 --- a/src/arraymancer/data_structure.nim +++ b/src/arraymancer/data_structure.nim @@ -29,10 +29,10 @@ type ## - ``strides``: Numbers of items to skip to get the next item along a dimension. ## - ``offset``: Offset to get the first item of the Tensor. Note: offset can be negative, in particular for slices. ## - ``data``: A sequence that holds the actual data - shape: seq[int] - strides: seq[int] - offset: int - data: seq[T] # Perf note: seq are always deep copied on "var" assignement. + shape*: seq[int] + strides*: seq[int] + offset*: int + data*: seq[T] # Perf note: seq are always deep copied on "var" assignement. CudaTensor*[T: SomeReal] = object ## Tensor data structure, stored on Nvidia GPU (Cuda) diff --git a/src/arraymancer/operators_blas_l1.nim b/src/arraymancer/operators_blas_l1.nim index 8920a74c2..88b59679f 100644 --- a/src/arraymancer/operators_blas_l1.nim +++ b/src/arraymancer/operators_blas_l1.nim @@ -92,7 +92,15 @@ proc `*`*[T: SomeNumber](t: Tensor[T], a: T): Tensor[T] {.noSideEffect, inline.} ## Element-wise multiplication by a scalar a * t +proc `*=`*[T: SomeNumber](t: var Tensor[T], a: T) {.noSideEffect.} = + ## Element-wise multiplication by a scalar (in-place) + t.apply(proc(x: T): T = a * x) + proc `/`*[T: SomeNumber](t: Tensor[T], a: T): Tensor[T] {.noSideEffect.} = ## Element-wise division by a scalar proc f(x: T): T = x / a - return t.fmap(f) \ No newline at end of file + return t.fmap(f) + +proc `/=`*[T: SomeNumber](t: var Tensor[T], a: T): Tensor[T] {.noSideEffect.} = + ## Element-wise division by a scalar (in-place) + t.apply(proc(x: T): T = a / x) diff --git a/src/arraymancer/ufunc.nim b/src/arraymancer/ufunc.nim index 29cd3a98a..da6289234 100644 --- a/src/arraymancer/ufunc.nim +++ b/src/arraymancer/ufunc.nim @@ -35,6 +35,13 @@ proc fmap*[T, U](t: Tensor[T], g: T -> U): Tensor[U] {.noSideEffect.}= result.data[i] = g(val) inc i +proc apply*[T](t: var Tensor[T], g: T -> T) {.noSideEffect.}= + ## Map a elements inplace with unary function T -> T + var i = 0 # TODO: use pairs/enumerate instead - pending https://forum.nim-lang.org/t/2972 + for val in t: + t.data[i] = g(val) + inc i + proc fmap2*[T, U, V](t1: Tensor[T], t2: Tensor[U], g: (T,U) -> V): Tensor[V] {.noSideEffect.}= ## Map a binary function (T,U) -> V on Tensor[T] ## It applies the function to each matching elements diff --git a/tests/test_operators_blas.nim b/tests/test_operators_blas.nim index 94d1e5241..fca16620c 100644 --- a/tests/test_operators_blas.nim +++ b/tests/test_operators_blas.nim @@ -163,6 +163,17 @@ suite "BLAS (Basic Linear Algebra Subprograms)": let ufl_expected = @[2'f64, 6, -10].toTensor() check: ufl_expected / 2 == u_float + test "Multiplication/division by scalar (inplace)": + var u_int = @[1, 3, -5].toTensor() + let u_expected = @[2, 6, -10].toTensor() + u_int *= 2 + check: u_int == u_expected + + var u_float = @[1'f64, 3, -5].toTensor() + let ufl_expected = @[2'f64, 6, -10].toTensor() + u_float *= 2.0'f64 + check: ufl_expected == u_float + test "Tensor addition and substraction": let u_int = @[1, 3, -5].toTensor() let v_int = @[1, 1, 1].toTensor() @@ -172,7 +183,6 @@ suite "BLAS (Basic Linear Algebra Subprograms)": check: u_int + v_int == expected_add check: u_int - v_int == expected_sub - test "Tensor negative": let u_int = @[-1, 0, 2].toTensor() let expected_add = @[1, 0, -2].toTensor()