From f809e7f2806020f4c49ca45f000e478fe53fd2ba Mon Sep 17 00:00:00 2001 From: Angel Ezquerra Date: Sat, 30 Dec 2023 15:22:00 +0100 Subject: [PATCH] Add bounds checks for fold and reduce axis inline operations (#608) Also fixes a small typo in ast_utils.nim. --- src/arraymancer/private/ast_utils.nim | 2 +- src/arraymancer/tensor/higher_order_foldreduce.nim | 9 +++++++++ 2 files changed, 10 insertions(+), 1 deletion(-) diff --git a/src/arraymancer/private/ast_utils.nim b/src/arraymancer/private/ast_utils.nim index 60668c3c6..52d6e3f06 100644 --- a/src/arraymancer/private/ast_utils.nim +++ b/src/arraymancer/private/ast_utils.nim @@ -66,7 +66,7 @@ proc replaceSymsByIdents*(ast: NimNode): NimNode = return node of nnkLiterals: return node - of nnkHiddenStdConv: # see `test_fancy_indexing,nim` why needed + of nnkHiddenStdConv: # see `test_fancy_indexing.nim` why needed expectKind(node[1], nnkSym) return ident($node[1]) else: diff --git a/src/arraymancer/tensor/higher_order_foldreduce.nim b/src/arraymancer/tensor/higher_order_foldreduce.nim index 848485da6..a41bf38fc 100644 --- a/src/arraymancer/tensor/higher_order_foldreduce.nim +++ b/src/arraymancer/tensor/higher_order_foldreduce.nim @@ -44,6 +44,11 @@ template fold_inline*[T](arg: Tensor[T], op_initial, op_middle, op_final: untype template reduce_axis_inline*[T](arg: Tensor[T], reduction_axis: int, op: untyped): untyped = let z = arg # ensure that if t is the result of a function it is not called multiple times var reduced: type(z) + when compileOption("boundChecks"): + if arg.rank <= reduction_axis: + raise newException(IndexDefect, "Input tensor rank (" & $arg.rank & + ") must be greater than reduction axis (" & $reduction_axis & + ") when executing reduce_axis_inline: " & astToStr(op)) let weight = z.size div z.shape[reduction_axis] omp_parallel_reduce_blocks(reduced, block_offset, block_size, z.shape[reduction_axis], weight, op) do: x = z.atAxisIndex(reduction_axis, block_offset).clone() @@ -55,6 +60,10 @@ template reduce_axis_inline*[T](arg: Tensor[T], reduction_axis: int, op: untyped template fold_axis_inline*[T](arg: Tensor[T], accumType: typedesc, fold_axis: int, op_initial, op_middle, op_final: untyped): untyped = let z = arg # ensure that if t is the result of a function it is not called multiple times var reduced: accumType + when compileOption("boundChecks"): + if arg.rank <= fold_axis: + raise newException(IndexDefect, "Input tensor rank (" & $arg.rank & + ") must be greater than fold axis (" & $fold_axis & ") when executing fold_axis_inline") let weight = z.size div z.shape[fold_axis] omp_parallel_reduce_blocks(reduced, block_offset, block_size, z.shape[fold_axis], weight, op_final) do: let y {.inject.} = z.atAxisIndex(fold_axis, block_offset).clone()