Skip to content

Commit

Permalink
support complex tensor for cpu backend (#350)
Browse files Browse the repository at this point in the history
* support complex matrix for cpu backend

* add complex matrix product
  • Loading branch information
chimez authored and mratsim committed Jun 7, 2019
1 parent e026622 commit a49ce4a
Show file tree
Hide file tree
Showing 23 changed files with 366 additions and 79 deletions.
1 change: 1 addition & 0 deletions src/ml/metrics/common_error_functions.nim
Expand Up @@ -13,6 +13,7 @@
# limitations under the License.

import ../../tensor/tensor
import complex except Complex64, Complex32


# ####################################################
Expand Down
5 changes: 3 additions & 2 deletions src/tensor/aggregate.nim
Expand Up @@ -20,6 +20,7 @@ import ./backend/memory_optimization_hints,
./math_functions,
./accessors,
math
import complex except Complex64, Complex32

# ### Standard aggregate functions
# TODO consider using stats from Nim standard lib: https://nim-lang.org/docs/stats.html#standardDeviation,RunningStat
Expand Down Expand Up @@ -56,11 +57,11 @@ proc mean*[T: SomeInteger](t: Tensor[T], axis: int): Tensor[T] {.noInit,inline.}
## Warning ⚠: Since input is integer, output will also be integer (using integer division)
t.sum(axis) div t.shape[axis].T

proc mean*[T: SomeFloat](t: Tensor[T]): T {.inline.}=
proc mean*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T]): T {.inline.}=
## Compute the mean of all elements
t.sum / t.size.T

proc mean*[T: SomeFloat](t: Tensor[T], axis: int): Tensor[T] {.noInit,inline.}=
proc mean*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T], axis: int): Tensor[T] {.noInit,inline.}=
## Compute the mean along an axis
t.sum(axis) / t.shape[axis].T

Expand Down
13 changes: 7 additions & 6 deletions src/tensor/init_cpu.nim
Expand Up @@ -21,6 +21,7 @@ import ../private/[functional, nested_containers, sequninit],
sequtils,
random,
math
include ./private/p_complex

proc newTensorUninit*[T](shape: varargs[int]): Tensor[T] {.noSideEffect,noInit, inline.} =
## Creates a new Tensor on Cpu backend
Expand Down Expand Up @@ -108,7 +109,7 @@ proc toTensor*(s:string): auto {.noSideEffect.} =
## This proc handles string specifically as otherwise they are interpreted as a sequence of char
toTensorCpu(s)

proc zeros*[T: SomeNumber](shape: varargs[int]): Tensor[T] {.noInit,noSideEffect, inline.} =
proc zeros*[T: SomeNumber|Complex[float32]|Complex[float64]](shape: varargs[int]): Tensor[T] {.noInit,noSideEffect, inline.} =
## Creates a new Tensor filled with 0
##
## Input:
Expand All @@ -119,7 +120,7 @@ proc zeros*[T: SomeNumber](shape: varargs[int]): Tensor[T] {.noInit,noSideEffect
tensorCpu(shape, result)
result.storage.Fdata = newSeq[T](result.size)

proc zeros*[T: SomeNumber](shape: MetadataArray): Tensor[T] {.noInit,noSideEffect, inline.} =
proc zeros*[T: SomeNumber|Complex[float32]|Complex[float64]](shape: MetadataArray): Tensor[T] {.noInit,noSideEffect, inline.} =
## Creates a new Tensor filled with 0
##
## Input:
Expand All @@ -130,7 +131,7 @@ proc zeros*[T: SomeNumber](shape: MetadataArray): Tensor[T] {.noInit,noSideEffec
tensorCpu(shape, result)
result.storage.Fdata = newSeq[T](result.size)

proc zeros_like*[T: SomeNumber](t: Tensor[T]): Tensor[T] {.noInit,noSideEffect, inline.} =
proc zeros_like*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T]): Tensor[T] {.noInit,noSideEffect, inline.} =
## Creates a new Tensor filled with 0 with the same shape as the input
## Input:
## - Shape of the Tensor
Expand All @@ -139,7 +140,7 @@ proc zeros_like*[T: SomeNumber](t: Tensor[T]): Tensor[T] {.noInit,noSideEffect,
## - A zero-ed Tensor of the same shape
result = zeros[T](t.shape)

proc ones*[T: SomeNumber](shape: varargs[int]): Tensor[T] {.noInit, inline, noSideEffect.} =
proc ones*[T: SomeNumber|Complex[float32]|Complex[float64]](shape: varargs[int]): Tensor[T] {.noInit, inline, noSideEffect.} =
## Creates a new Tensor filled with 1
## Input:
## - Shape of the Tensor
Expand All @@ -148,7 +149,7 @@ proc ones*[T: SomeNumber](shape: varargs[int]): Tensor[T] {.noInit, inline, noSi
## - A one-ed Tensor of the same shape
newTensorWith[T](shape, 1.T)

proc ones*[T: SomeNumber](shape: MetadataArray): Tensor[T] {.noInit, inline, noSideEffect.} =
proc ones*[T: SomeNumber|Complex[float32]|Complex[float64]](shape: MetadataArray): Tensor[T] {.noInit, inline, noSideEffect.} =
## Creates a new Tensor filled with 1
## Input:
## - Shape of the Tensor
Expand All @@ -157,7 +158,7 @@ proc ones*[T: SomeNumber](shape: MetadataArray): Tensor[T] {.noInit, inline, noS
## - A one-ed Tensor of the same shape
newTensorWith[T](shape, 1.T)

proc ones_like*[T: SomeNumber](t: Tensor[T]): Tensor[T] {.noInit, inline, noSideEffect.} =
proc ones_like*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T]): Tensor[T] {.noInit, inline, noSideEffect.} =
## Creates a new Tensor filled with 1 with the same shape as the input
## and filled with 1
## Input:
Expand Down
17 changes: 14 additions & 3 deletions src/tensor/math_functions.nim
Expand Up @@ -16,6 +16,7 @@ import ./data_structure,
./init_cpu,
./higher_order_applymap,
./ufunc
import complex except Complex64, Complex32

# Non-operator math functions

Expand Down Expand Up @@ -43,11 +44,11 @@ proc melwise_div*[T: SomeFloat](a: var Tensor[T], b: Tensor[T]) =
## Element-wise division (in-place)
a.apply2_inline(b, x / y)

proc reciprocal*[T: SomeFloat](t: Tensor[T]): Tensor[T] {.noInit.} =
proc reciprocal*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T]): Tensor[T] {.noInit.} =
## Return a tensor with the reciprocal 1/x of all elements
t.map_inline(1.T/x)

proc mreciprocal*[T: SomeFloat](t: var Tensor[T]) =
proc mreciprocal*[T: SomeFloat|Complex[float32]|Complex[float64]](t: var Tensor[T]) =
## Apply the reciprocal 1/x in-place to all elements of the Tensor
t.apply_inline(1.T/x)

Expand All @@ -64,12 +65,22 @@ proc `-`*[T: SomeNumber](t: Tensor[T]): Tensor[T] {.noInit.} =
t.map_inline(-x)

# Built-in nim function that doesn't work with makeUniversal
proc abs*[T](t: Tensor[T]): Tensor[T] {.noInit.} =
proc abs*[T:SomeNumber](t: Tensor[T]): Tensor[T] {.noInit.} =
## Return a Tensor with absolute values of all elements
t.map_inline(abs(x))

# complex abs -> float
proc abs*(t: Tensor[Complex[float64]]): Tensor[float64] {.noInit.} =
## Return a Tensor with absolute values of all elements
t.map_inline(abs(x))

proc abs*(t: Tensor[Complex[float32]]): Tensor[float32] {.noInit.} =
## Return a Tensor with absolute values of all elements
t.map_inline(abs(x))

proc mabs*[T](t: var Tensor[T]) =
## Return a Tensor with absolute values of all elements
# FIXME: how to inplace convert Tensor[Complex] to Tensor[float]
t.apply_inline(abs(x))

proc clamp*[T](t: Tensor[T], min, max: T): Tensor[T] {.noInit.} =
Expand Down
20 changes: 11 additions & 9 deletions src/tensor/operators_blas_l1.nim
Expand Up @@ -16,6 +16,7 @@ import ./private/p_checks,
./data_structure,
./accessors, ./higher_order_applymap,
nimblas
import complex except Complex32, Complex64

# ####################################################################
# BLAS Level 1 (Vector dot product, Addition, Scalar to Vector/Matrix)
Expand All @@ -25,6 +26,7 @@ import ./private/p_checks,

proc dot*[T: SomeFloat](a, b: Tensor[T]): T {.noSideEffect.} =
## Vector to Vector dot (scalar) product
# TODO: blas's complex vector dot only support dotu and dotc
when compileOption("boundChecks"): check_dot_prod(a,b)
return dot(a.shape[0], a.get_offset_ptr, a.strides[0], b.get_offset_ptr, b.strides[0])

Expand All @@ -39,37 +41,37 @@ proc dot*[T: SomeInteger](a, b: Tensor[T]): T {.noSideEffect.} =
# # Tensor-Tensor linear algebra
# # shape checks are done in map2 proc

proc `+`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noInit.} =
proc `+`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noInit.} =
## Tensor addition
map2_inline(a, b, x + y)

proc `-`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noInit.} =
proc `-`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noInit.} =
## Tensor substraction
map2_inline(a, b, x - y)

# #########################################################
# # Tensor-Tensor in-place linear algebra

proc `+=`*[T: SomeNumber](a: var Tensor[T], b: Tensor[T]) =
proc `+=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor in-place addition
a.apply2_inline(b, x + y)

proc `-=`*[T: SomeNumber](a: var Tensor[T], b: Tensor[T]) =
proc `-=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor in-place substraction
a.apply2_inline(b, x - y)

# #########################################################
# # Tensor-scalar linear algebra

proc `*`*[T: SomeNumber](a: T, t: Tensor[T]): Tensor[T] {.noInit.} =
proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: T, t: Tensor[T]): Tensor[T] {.noInit.} =
## Element-wise multiplication by a scalar
t.map_inline(x * a)

proc `*`*[T: SomeNumber](t: Tensor[T], a: T): Tensor[T] {.noInit.} =
proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noInit.} =
## Element-wise multiplication by a scalar
a * t

proc `/`*[T: SomeFloat](t: Tensor[T], a: T): Tensor[T] {.noInit.} =
proc `/`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T], a: T): Tensor[T] {.noInit.} =
## Element-wise division by a float scalar
t.map_inline(x / a)

Expand All @@ -80,11 +82,11 @@ proc `div`*[T: SomeInteger](t: Tensor[T], a: T): Tensor[T] {.noInit.} =
# #########################################################
# # Tensor-scalar in-place linear algebra

proc `*=`*[T: SomeNumber](t: var Tensor[T], a: T) =
proc `*=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], a: T) =
## Element-wise multiplication by a scalar (in-place)
t.apply_inline(x * a)

proc `/=`*[T: SomeFloat](t: var Tensor[T], a: T) =
proc `/=`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: var Tensor[T], a: T) =
## Element-wise division by a scalar (in-place)
t.apply_inline(x / a)

Expand Down
9 changes: 5 additions & 4 deletions src/tensor/operators_blas_l2l3.nim
Expand Up @@ -20,8 +20,9 @@ import ./private/p_checks,
./fallback/naive_l2_gemv,
./data_structure,
./init_cpu
from complex import Complex

proc gemv*[T: SomeFloat](
proc gemv*[T: SomeFloat|Complex](
alpha: T,
A: Tensor[T],
x: Tensor[T],
Expand Down Expand Up @@ -55,7 +56,7 @@ proc gemv*[T: SomeInteger](

naive_gemv_fallback(alpha, A, x, beta, y)

proc gemm*[T: SomeFloat](
proc gemm*[T: SomeFloat|Complex](
alpha: T, A, B: Tensor[T],
beta: T, C: var Tensor[T]) {.inline.}=
# Matrix: C = alpha A matmul B + beta C
Expand Down Expand Up @@ -85,10 +86,10 @@ proc gemm*[T: SomeNumber](
C: var Tensor[T]) {.inline.}=
gemm(1.T, A, B, 0.T, C)

proc `*`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noInit.} =
proc `*`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noInit.} =
## Matrix multiplication (Matrix-Matrix and Matrix-Vector)
##
## Float operations use optimized BLAS like OpenBLAS, Intel MKL or BLIS.
## Float and complex operations use optimized BLAS like OpenBLAS, Intel MKL or BLIS.

if a.rank == 2 and b.rank == 2:
result = newTensorUninit[T](a.shape[0], b.shape[1])
Expand Down
41 changes: 21 additions & 20 deletions src/tensor/operators_broadcasted.nim
Expand Up @@ -16,22 +16,23 @@ import ./data_structure,
./higher_order_applymap,
./shapeshifting,
./math
import complex except Complex64, Complex32

# #########################################################
# # Broadcasting Tensor-Tensor
# # And element-wise multiplication (Hadamard) and division

proc `.+`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noInit,inline.} =
proc `.+`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noInit,inline.} =
## Broadcasted addition for tensors of incompatible but broadcastable shape.
let (tmp_a, tmp_b) = broadcast2(a, b)
result = tmp_a + tmp_b

proc `.-`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noInit,inline.} =
proc `.-`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noInit,inline.} =
## Broadcasted addition for tensors of incompatible but broadcastable shape.
let (tmp_a, tmp_b) = broadcast2(a, b)
result = tmp_a - tmp_b

proc `.*`*[T: SomeNumber](a, b: Tensor[T]): Tensor[T] {.noInit.} =
proc `.*`*[T: SomeNumber|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noInit.} =
## Element-wise multiplication (Hadamard product).
##
## And broadcasted element-wise multiplication.
Expand All @@ -46,7 +47,7 @@ proc `./`*[T: SomeInteger](a, b: Tensor[T]): Tensor[T] {.noInit.} =
let (tmp_a, tmp_b) = broadcast2(a, b)
result = map2_inline(tmp_a, tmp_b, x div y)

proc `./`*[T: SomeFloat](a, b: Tensor[T]): Tensor[T] {.noInit.} =
proc `./`*[T: SomeFloat|Complex[float32]|Complex[float64]](a, b: Tensor[T]): Tensor[T] {.noInit.} =
## Tensor element-wise division for real numbers.
##
## And broadcasted element-wise division.
Expand All @@ -56,7 +57,7 @@ proc `./`*[T: SomeFloat](a, b: Tensor[T]): Tensor[T] {.noInit.} =
# ##############################################
# # Broadcasting in-place Tensor-Tensor

proc `.+=`*[T: SomeNumber](a: var Tensor[T], b: Tensor[T]) =
proc `.+=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor broadcasted in-place addition.
##
## Only the right hand side tensor can be broadcasted.
Expand All @@ -65,7 +66,7 @@ proc `.+=`*[T: SomeNumber](a: var Tensor[T], b: Tensor[T]) =
let tmp_b = b.broadcast(a.shape)
apply2_inline(a, tmp_b, x + y)

proc `.-=`*[T: SomeNumber](a: var Tensor[T], b: Tensor[T]) =
proc `.-=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor broadcasted in-place substraction.
##
## Only the right hand side tensor can be broadcasted.
Expand All @@ -74,7 +75,7 @@ proc `.-=`*[T: SomeNumber](a: var Tensor[T], b: Tensor[T]) =
let tmp_b = b.broadcast(a.shape)
apply2_inline(a, tmp_b, x - y)

proc `.*=`*[T: SomeNumber](a: var Tensor[T], b: Tensor[T]) =
proc `.*=`*[T: SomeNumber|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor broadcasted in-place multiplication (Hadamard product)
##
## Only the right hand side tensor can be broadcasted
Expand All @@ -92,7 +93,7 @@ proc `./=`*[T: SomeInteger](a: var Tensor[T], b: Tensor[T]) =
let tmp_b = b.broadcast(a.shape)
apply2_inline(a, tmp_b, x div y)

proc `./=`*[T: SomeFloat](a: var Tensor[T], b: Tensor[T]) =
proc `./=`*[T: SomeFloat|Complex[float32]|Complex[float64]](a: var Tensor[T], b: Tensor[T]) =
## Tensor broadcasted in-place float division.
##
## Only the right hand side tensor can be broadcasted.
Expand All @@ -105,61 +106,61 @@ proc `./=`*[T: SomeFloat](a: var Tensor[T], b: Tensor[T]) =
# ##############################################
# # Broadcasting Tensor-Scalar and Scalar-Tensor

proc `.+`*[T: SomeNumber](val: T, t: Tensor[T]): Tensor[T] {.noInit.} =
proc `.+`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, t: Tensor[T]): Tensor[T] {.noInit.} =
## Broadcasted addition for tensor + scalar.
result = t.map_inline(x + val)

proc `.+`*[T: SomeNumber](t: Tensor[T], val: T): Tensor[T] {.noInit.} =
proc `.+`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], val: T): Tensor[T] {.noInit.} =
## Broadcasted addition for scalar + tensor.
result = t.map_inline(x + val)

proc `.-`*[T: SomeNumber](val: T, t: Tensor[T]): Tensor[T] {.noInit.} =
proc `.-`*[T: SomeNumber|Complex[float32]|Complex[float64]](val: T, t: Tensor[T]): Tensor[T] {.noInit.} =
## Broadcasted substraction for tensor - scalar.
result = t.map_inline(val - x)

proc `.-`*[T: SomeNumber](t: Tensor[T], val: T): Tensor[T] {.noInit.} =
proc `.-`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: Tensor[T], val: T): Tensor[T] {.noInit.} =
## Broadcasted substraction for scalar - tensor.
result = t.map_inline(x - val)

proc `./`*[T: SomeInteger](val: T, t: Tensor[T]): Tensor[T] {.noInit.} =
## Broadcasted division of an integer by a tensor of integers.
result = t.map_inline(val div x)

proc `./`*[T: SomeFloat](val: T, t: Tensor[T]): Tensor[T] {.noInit.} =
proc `./`*[T: SomeFloat|Complex[float32]|Complex[float64]](val: T, t: Tensor[T]): Tensor[T] {.noInit.} =
## Broadcasted division of a float by a tensor of floats.
result = t.map_inline(val / x)

proc `./`*[T: SomeInteger](t: Tensor[T], val: T): Tensor[T] {.noInit.} =
## Broadcasted division of tensor of integers by an integer.
result = t.map_inline(x div val)

proc `./`*[T: SomeFloat](t: Tensor[T], val: T): Tensor[T] {.noInit.} =
proc `./`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T], val: T): Tensor[T] {.noInit.} =
## Broadcasted division of a tensor of floats by a float.
result = t.map_inline(x / val)

proc `.^`*[T: SomeFloat](t: Tensor[T], exponent: T): Tensor[T] {.noInit.} =
proc `.^`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: Tensor[T], exponent: T): Tensor[T] {.noInit.} =
## Compute element-wise exponentiation
result = t.map_inline pow(x, exponent)

# #####################################
# # Broadcasting in-place Tensor-Scalar

proc `.+=`*[T: SomeNumber](t: var Tensor[T], val: T) =
proc `.+=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], val: T) =
## Tensor in-place addition with a broadcasted scalar.
t.apply_inline(x + val)

proc `.-=`*[T: SomeNumber](t: var Tensor[T], val: T) =
proc `.-=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], val: T) =
## Tensor in-place substraction with a broadcasted scalar.
t.apply_inline(x - val)

proc `.^=`*[T: SomeFloat](t: var Tensor[T], exponent: T) =
proc `.^=`*[T: SomeFloat|Complex[float32]|Complex[float64]](t: var Tensor[T], exponent: T) =
## Compute in-place element-wise exponentiation
t.apply_inline pow(x, exponent)

proc `.*=`*[T: SomeNumber](t: var Tensor[T], val: T) =
proc `.*=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], val: T) =
## Tensor in-place multiplication with a broadcasted scalar.
t.apply_inline(x * val)

proc `./=`*[T: SomeNumber](t: var Tensor[T], val: T) =
proc `./=`*[T: SomeNumber|Complex[float32]|Complex[float64]](t: var Tensor[T], val: T) =
## Tensor in-place division with a broadcasted scalar.
t.apply_inline(x - val)

0 comments on commit a49ce4a

Please sign in to comment.