diff --git a/CMakeLists.txt b/CMakeLists.txt index f327938f48933..a07202f6b722d 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -60,6 +60,8 @@ if (TI_BUILD_TESTS) include(cmake/TaichiTests.cmake) endif() +include_directories(${PROJECT_SOURCE_DIR}/external/eigen) + message("C++ Flags: ${CMAKE_CXX_FLAGS}") message("Build type: ${CMAKE_BUILD_TYPE}") diff --git a/misc/sparse_matrix.py b/misc/sparse_matrix.py new file mode 100644 index 0000000000000..5d800f27dcf37 --- /dev/null +++ b/misc/sparse_matrix.py @@ -0,0 +1,64 @@ +import taichi as ti + +ti.init(arch=ti.x64) + +n = 8 + +K = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) +f = ti.SparseMatrixBuilder(n, 1, max_num_triplets=100) + + +@ti.kernel +def fill(A: ti.sparse_matrix_builder(), b: ti.sparse_matrix_builder(), + interval: ti.i32): + for i in range(n): + if i > 0: + A[i - 1, i] += -1.0 + A[i, i] += 1 + if i < n - 1: + A[i + 1, i] += -1.0 + A[i, i] += 1.0 + + if i % interval == 0: + b[i, 0] += 1.0 + + +fill(K, f, 3) + +print(">>>> K.print_triplets()") +K.print_triplets() + +A = K.build() + +print(">>>> A = K.build()") +print(A) + +print(">>>> Summation: C = A + A") +C = A + A +print(C) + +print(">>>> Subtraction: D = A - A") +D = A - A +print(D) + +print(">>>> Multiplication with a scalar on the right: E = A * 3.0") +E = A * 3.0 +print(E) + +print(">>>> Multiplication with a scalar on the left: E = 3.0 * A") +E = 3.0 * A +print(E) + +print(">>>> Transpose: F = A.transpose()") +F = A.transpose() +print(F) + +print(">>>> Matrix multiplication: G = E @ A") +G = E @ A +print(G) + +print(">>>> Element-wise multiplication: H = E * A") +H = E * A +print(H) + +print(f">>>> Element Access: A[0,0] = {A[0,0]}") diff --git a/python/taichi/lang/__init__.py b/python/taichi/lang/__init__.py index b56e24f476f8a..b4238d965532d 100644 --- a/python/taichi/lang/__init__.py +++ b/python/taichi/lang/__init__.py @@ -8,7 +8,8 @@ from taichi.lang.enums import Layout from taichi.lang.exception import InvalidOperationError from taichi.lang.impl import * -from taichi.lang.kernel_arguments import any_arr, ext_arr, template +from taichi.lang.kernel_arguments import (any_arr, ext_arr, + sparse_matrix_builder, template) from taichi.lang.kernel_impl import (KernelArgError, KernelDefError, data_oriented, func, kernel, pyfunc) from taichi.lang.matrix import Matrix, Vector @@ -16,6 +17,7 @@ from taichi.lang.ops import * from taichi.lang.quant_impl import quant from taichi.lang.runtime_ops import async_flush, sync +from taichi.lang.sparse_matrix import SparseMatrix, SparseMatrixBuilder from taichi.lang.transformer import TaichiSyntaxError from taichi.lang.type_factory_impl import type_factory from taichi.lang.util import (has_pytorch, is_taichi_class, python_scope, diff --git a/python/taichi/lang/kernel_arguments.py b/python/taichi/lang/kernel_arguments.py index 39a650008d829..af13fb104ddc3 100644 --- a/python/taichi/lang/kernel_arguments.py +++ b/python/taichi/lang/kernel_arguments.py @@ -1,8 +1,10 @@ +from taichi.core.primitive_types import u64 from taichi.core.util import ti_core as _ti_core from taichi.lang.enums import Layout from taichi.lang.expr import Expr from taichi.lang.ext_array import AnyArray, ExtArray from taichi.lang.snode import SNode +from taichi.lang.sparse_matrix import SparseMatrixBuilder from taichi.lang.util import cook_dtype, to_taichi_type @@ -117,6 +119,39 @@ def extract(self, x): """ +class SparseMatrixEntry: + def __init__(self, ptr, i, j): + self.ptr = ptr + self.i = i + self.j = j + + def augassign(self, value, op): + from taichi.lang.impl import call_internal, ti_float + if op == 'Add': + call_internal("insert_triplet", self.ptr, self.i, self.j, + ti_float(value)) + elif op == 'Sub': + call_internal("insert_triplet", self.ptr, self.i, self.j, + -ti_float(value)) + else: + assert False, f"Only operations '+=' and '-=' are supported on sparse matrices." + + +class SparseMatrixProxy: + is_taichi_class = True + + def __init__(self, ptr): + self.ptr = ptr + + def subscript(self, i, j): + return SparseMatrixEntry(self.ptr, i, j) + + +sparse_matrix_builder = SparseMatrixBuilder +"""Alias for :class:`~taichi.lang.sparse_matrix.SparseMatrixBuilder`. +""" + + def decl_scalar_arg(dtype): dtype = cook_dtype(dtype) arg_id = _ti_core.decl_arg(dtype, False) @@ -129,6 +164,13 @@ def decl_ext_arr_arg(dtype, dim): return ExtArray(_ti_core.make_external_tensor_expr(dtype, dim, arg_id)) +def decl_sparse_matrix(): + ptr_type = cook_dtype(u64) + # Treat the sparse matrix argument as a scalar since we only need to pass in the base pointer + arg_id = _ti_core.decl_arg(ptr_type, False) + return SparseMatrixProxy(_ti_core.make_arg_load_expr(arg_id, ptr_type)) + + def decl_any_arr_arg(dtype, dim, element_shape, layout): dtype = cook_dtype(dtype) arg_id = _ti_core.decl_arg(dtype, True) diff --git a/python/taichi/lang/kernel_impl.py b/python/taichi/lang/kernel_impl.py index ee602ecab2d22..a41478479c50b 100644 --- a/python/taichi/lang/kernel_impl.py +++ b/python/taichi/lang/kernel_impl.py @@ -10,7 +10,8 @@ from taichi.lang import impl, util from taichi.lang.ast_checker import KernelSimplicityASTChecker from taichi.lang.exception import TaichiSyntaxError -from taichi.lang.kernel_arguments import any_arr, ext_arr, template +from taichi.lang.kernel_arguments import (any_arr, ext_arr, + sparse_matrix_builder, template) from taichi.lang.ndarray import ScalarNdarray from taichi.lang.shell import _shell_pop_print, oinspect from taichi.lang.transformer import ASTTransformerTotal @@ -368,6 +369,8 @@ def extract_arguments(self): pass elif id(annotation) in primitive_types.type_ids: pass + elif isinstance(annotation, sparse_matrix_builder): + pass else: _taichi_skip_traceback = 1 raise KernelDefError( @@ -483,6 +486,9 @@ def func__(*args): if not isinstance(v, int): raise KernelArgError(i, needed.to_string(), provided) launch_ctx.set_arg_int(actual_argument_slot, int(v)) + elif isinstance(needed, sparse_matrix_builder): + # Pass only the base pointer of the ti.sparse_matrix_builder() argument + launch_ctx.set_arg_int(actual_argument_slot, v.get_addr()) elif (isinstance(needed, (any_arr, ext_arr)) and self.match_ext_arr(v)) or \ (isinstance(needed, any_arr) and isinstance(v, ScalarNdarray)): if isinstance(v, ScalarNdarray): diff --git a/python/taichi/lang/sparse_matrix.py b/python/taichi/lang/sparse_matrix.py new file mode 100644 index 0000000000000..bad4e400cb29e --- /dev/null +++ b/python/taichi/lang/sparse_matrix.py @@ -0,0 +1,73 @@ +class SparseMatrix: + def __init__(self, n=None, m=None, sm=None): + if sm is None: + self.n = n + self.m = m if m else n + from taichi.core.util import ti_core as _ti_core + self.matrix = _ti_core.create_sparse_matrix(n, m) + else: + self.n = sm.num_rows() + self.m = sm.num_cols() + self.matrix = sm + + def __add__(self, other): + assert self.n == other.n and self.m == other.m, f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix + other.matrix + return SparseMatrix(sm=sm) + + def __sub__(self, other): + assert self.n == other.n and self.m == other.m, f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix - other.matrix + return SparseMatrix(sm=sm) + + def __mul__(self, other): + if isinstance(other, float): + sm = self.matrix * other + return SparseMatrix(sm=sm) + elif isinstance(other, SparseMatrix): + assert self.n == other.n and self.m == other.m, f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix * other.matrix + return SparseMatrix(sm=sm) + + def __rmul__(self, other): + if isinstance(other, float): + sm = other * self.matrix + return SparseMatrix(sm=sm) + + def transpose(self): + sm = self.matrix.transpose() + return SparseMatrix(sm=sm) + + def __matmul__(self, other): + assert self.m == other.n, f"Dimension mismatch between sparse matrices ({self.n}, {self.m}) and ({other.n}, {other.m})" + sm = self.matrix.matmul(other.matrix) + return SparseMatrix(sm=sm) + + def __getitem__(self, indices): + return self.matrix.get_element(indices[0], indices[1]) + + def __str__(self): + return self.matrix.to_string() + + def __repr__(self): + return self.matrix.to_string() + + +class SparseMatrixBuilder: + def __init__(self, num_rows=None, num_cols=None, max_num_triplets=0): + self.num_rows = num_rows + self.num_cols = num_cols if num_cols else num_rows + if num_rows is not None: + from taichi.core.util import ti_core as _ti_core + self.ptr = _ti_core.create_sparse_matrix_builder( + num_rows, num_cols, max_num_triplets) + + def get_addr(self): + return self.ptr.get_addr() + + def print_triplets(self): + self.ptr.print_triplets() + + def build(self): + sm = self.ptr.build() + return SparseMatrix(sm=sm) diff --git a/python/taichi/lang/stmt_builder.py b/python/taichi/lang/stmt_builder.py index cb0d891c4f5f4..9fdab853ee557 100644 --- a/python/taichi/lang/stmt_builder.py +++ b/python/taichi/lang/stmt_builder.py @@ -555,6 +555,13 @@ def transform_as_kernel(): arg_init.value.args[0] = dt arg_init.value.args[1] = parse_expr("{}".format(array_dim)) arg_decls.append(arg_init) + elif isinstance(ctx.func.argument_annotations[i], + ti.sparse_matrix_builder): + arg_init = parse_stmt( + 'x = ti.lang.kernel_arguments.decl_sparse_matrix()') + arg_init.targets[0].id = arg.arg + ctx.create_variable(arg.arg) + arg_decls.append(arg_init) elif isinstance(ctx.func.argument_annotations[i], ti.any_arr): arg_init = parse_stmt( 'x = ti.lang.kernel_arguments.decl_any_arr_arg(0, 0, 0, 0)' diff --git a/taichi/program/kernel.cpp b/taichi/program/kernel.cpp index 5f7cfd43b9a57..b4d26c8f114a6 100644 --- a/taichi/program/kernel.cpp +++ b/taichi/program/kernel.cpp @@ -162,7 +162,7 @@ Kernel::LaunchContextBuilder::LaunchContextBuilder(Kernel *kernel) void Kernel::LaunchContextBuilder::set_arg_float(int arg_id, float64 d) { TI_ASSERT_INFO(!kernel_->args[arg_id].is_external_array, - "Assigning scalar value to external(numpy) array argument is " + "Assigning scalar value to external (numpy) array argument is " "not allowed."); ActionRecorder::get_instance().record( @@ -198,7 +198,7 @@ void Kernel::LaunchContextBuilder::set_arg_float(int arg_id, float64 d) { void Kernel::LaunchContextBuilder::set_arg_int(int arg_id, int64 d) { TI_ASSERT_INFO(!kernel_->args[arg_id].is_external_array, - "Assigning scalar value to external(numpy) array argument is " + "Assigning scalar value to external (numpy) array argument is " "not allowed."); ActionRecorder::get_instance().record( @@ -242,7 +242,7 @@ void Kernel::LaunchContextBuilder::set_arg_external_array(int arg_id, uint64 size) { TI_ASSERT_INFO( kernel_->args[arg_id].is_external_array, - "Assigning external(numpy) array to scalar argument is not allowed."); + "Assigning external (numpy) array to scalar argument is not allowed."); ActionRecorder::get_instance().record( "set_kernel_arg_ext_ptr", @@ -256,7 +256,7 @@ void Kernel::LaunchContextBuilder::set_arg_external_array(int arg_id, void Kernel::LaunchContextBuilder::set_arg_raw(int arg_id, uint64 d) { TI_ASSERT_INFO(!kernel_->args[arg_id].is_external_array, - "Assigning scalar value to external(numpy) array argument is " + "Assigning scalar value to external (numpy) array argument is " "not allowed."); if (!kernel_->is_evaluator) { diff --git a/taichi/program/program.cpp b/taichi/program/program.cpp index 5e82e8a715c5e..a93734c1341e9 100644 --- a/taichi/program/program.cpp +++ b/taichi/program/program.cpp @@ -24,6 +24,7 @@ #include "taichi/program/snode_expr_utils.h" #include "taichi/util/statistics.h" #include "taichi/math/arithmetic.h" + #if defined(TI_WITH_CC) #include "taichi/backends/cc/struct_cc.h" #include "taichi/backends/cc/cc_layout.h" diff --git a/taichi/program/program.h b/taichi/program/program.h index 25fa22d235603..35486ed672235 100644 --- a/taichi/program/program.h +++ b/taichi/program/program.h @@ -31,6 +31,7 @@ #include "taichi/system/memory_pool.h" #include "taichi/system/threading.h" #include "taichi/system/unified_allocator.h" +#include "taichi/program/sparse_matrix.h" namespace taichi { namespace lang { diff --git a/taichi/program/sparse_matrix.cpp b/taichi/program/sparse_matrix.cpp new file mode 100644 index 0000000000000..1abb0ccf8516c --- /dev/null +++ b/taichi/program/sparse_matrix.cpp @@ -0,0 +1,113 @@ +#include "taichi/program/sparse_matrix.h" + +#include + +#include "Eigen/Dense" +#include "Eigen/SparseLU" + +namespace taichi { +namespace lang { + +SparseMatrixBuilder::SparseMatrixBuilder(int rows, + int cols, + int max_num_triplets) + : rows_(rows), cols_(cols), max_num_triplets_(max_num_triplets) { + data_.reserve(max_num_triplets * 3); + data_base_ptr_ = get_data_base_ptr(); +} + +void *SparseMatrixBuilder::get_data_base_ptr() { + return data_.data(); +} + +void SparseMatrixBuilder::print_triplets() { + fmt::print("n={}, m={}, num_triplets={} (max={})", rows_, cols_, + num_triplets_, max_num_triplets_); + for (int64 i = 0; i < num_triplets_; i++) { + fmt::print("({}, {}) val={}", data_[i * 3], data_[i * 3 + 1], + taichi_union_cast(data_[i * 3 + 2])); + } + fmt::print("\n"); +} + +SparseMatrix SparseMatrixBuilder::build() { + TI_ASSERT(built_ == false); + built_ = true; + using T = Eigen::Triplet; + std::vector triplets; + for (int i = 0; i < num_triplets_; i++) { + triplets.push_back(T(data_[i * 3], data_[i * 3 + 1], + taichi_union_cast(data_[i * 3 + 2]))); + } + SparseMatrix sm(rows_, cols_); + sm.get_matrix().setFromTriplets(triplets.begin(), triplets.end()); + return sm; +} + +SparseMatrix::SparseMatrix(Eigen::SparseMatrix &matrix) { + this->matrix_ = matrix; +} + +SparseMatrix::SparseMatrix(int rows, int cols) : matrix_(rows, cols) { +} + +const std::string SparseMatrix::to_string() const { + Eigen::IOFormat clean_fmt(4, 0, ", ", "\n", "[", "]"); + // Note that the code below first converts the sparse matrix into a dense one. + // https://stackoverflow.com/questions/38553335/how-can-i-print-in-console-a-formatted-sparse-matrix-with-eigen + std::ostringstream ostr; + ostr << Eigen::MatrixXf(matrix_).format(clean_fmt); + return ostr.str(); +} + +const int SparseMatrix::num_rows() const { + return matrix_.rows(); +} +const int SparseMatrix::num_cols() const { + return matrix_.cols(); +} + +Eigen::SparseMatrix &SparseMatrix::get_matrix() { + return matrix_; +} + +SparseMatrix operator+(const SparseMatrix &sm1, const SparseMatrix &sm2) { + Eigen::SparseMatrix res(sm1.matrix_ + sm2.matrix_); + return SparseMatrix(res); +} + +SparseMatrix operator-(const SparseMatrix &sm1, const SparseMatrix &sm2) { + Eigen::SparseMatrix res(sm1.matrix_ - sm2.matrix_); + return SparseMatrix(res); +} + +SparseMatrix operator*(float scale, const SparseMatrix &sm) { + Eigen::SparseMatrix res(scale * sm.matrix_); + return SparseMatrix(res); +} + +SparseMatrix operator*(const SparseMatrix &sm, float scale) { + return scale * sm; +} + +SparseMatrix operator*(const SparseMatrix &sm1, const SparseMatrix &sm2) { + Eigen::SparseMatrix res(sm1.matrix_.cwiseProduct(sm2.matrix_)); + return SparseMatrix(res); +} + +SparseMatrix SparseMatrix::matmul(const SparseMatrix &sm) { + Eigen::SparseMatrix res(matrix_ * sm.matrix_); + return SparseMatrix(res); +} + +SparseMatrix SparseMatrix::transpose() { + Eigen::SparseMatrix res(matrix_.transpose()); + return SparseMatrix(res); +} + +float32 SparseMatrix::get_element(int row, int col) { + return matrix_.coeff(row, col); +} + +} // namespace lang +} // namespace taichi diff --git a/taichi/program/sparse_matrix.h b/taichi/program/sparse_matrix.h new file mode 100644 index 0000000000000..58e2de8a91b08 --- /dev/null +++ b/taichi/program/sparse_matrix.h @@ -0,0 +1,60 @@ +#pragma once + +#include "taichi/common/core.h" +#include "taichi/inc/constants.h" +#include "Eigen/Sparse" + +namespace taichi { +namespace lang { + +class SparseMatrix; + +class SparseMatrixBuilder { + public: + SparseMatrixBuilder(int rows, int cols, int max_num_triplets); + + void *get_data_base_ptr(); + + void print_triplets(); + + SparseMatrix build(); + + private: + uint64 num_triplets_{0}; + void *data_base_ptr_{nullptr}; + std::vector data_; + int rows_{0}; + int cols_{0}; + uint64 max_num_triplets_{0}; + bool built_{false}; +}; + +class SparseMatrix { + public: + SparseMatrix() = delete; + SparseMatrix(int rows, int cols); + SparseMatrix(Eigen::SparseMatrix &matrix); + + const int num_rows() const; + const int num_cols() const; + const std::string to_string() const; + Eigen::SparseMatrix &get_matrix(); + float32 get_element(int row, int col); + + friend SparseMatrix operator+(const SparseMatrix &sm1, + const SparseMatrix &sm2); + friend SparseMatrix operator-(const SparseMatrix &sm1, + const SparseMatrix &sm2); + friend SparseMatrix operator*(float scale, const SparseMatrix &sm); + friend SparseMatrix operator*(const SparseMatrix &sm, float scale); + friend SparseMatrix operator*(const SparseMatrix &sm1, + const SparseMatrix &sm2); + SparseMatrix matmul(const SparseMatrix &sm); + SparseMatrix transpose(); + + private: + Eigen::SparseMatrix matrix_; +}; + +} // namespace lang +} // namespace taichi diff --git a/taichi/python/export_lang.cpp b/taichi/python/export_lang.cpp index b254e1656e218..9de2e18792785 100644 --- a/taichi/python/export_lang.cpp +++ b/taichi/python/export_lang.cpp @@ -23,6 +23,7 @@ #include "taichi/util/action_recorder.h" #include "taichi/system/timeline.h" #include "taichi/python/snode_registry.h" +#include "taichi/program/sparse_matrix.h" #if defined(TI_WITH_CUDA) #include "taichi/backends/cuda/cuda_context.h" @@ -942,6 +943,39 @@ void export_lang(py::module &m) { return program->add_snode_tree(registry->finalize(root)); }, py::return_value_policy::reference); + + py::class_(m, "SparseMatrixBuilder") + .def("print_triplets", &SparseMatrixBuilder::print_triplets) + .def("build", &SparseMatrixBuilder::build) + .def("get_addr", [](SparseMatrixBuilder *mat) { return uint64(mat); }); + + m.def("create_sparse_matrix_builder", + [](int n, int m, uint64 max_num_entries) { + TI_ERROR_IF(!arch_is_cpu(get_current_program().config.arch), + "SparseMatrix only supports CPU for now."); + return SparseMatrixBuilder(n, m, max_num_entries); + }); + + py::class_(m, "SparseMatrix") + .def("to_string", &SparseMatrix::to_string) + .def(py::self + py::self, py::return_value_policy::reference_internal) + .def(py::self - py::self, py::return_value_policy::reference_internal) + .def(float() * py::self, py::return_value_policy::reference_internal) + .def(py::self * float(), py::return_value_policy::reference_internal) + .def(py::self * py::self, py::return_value_policy::reference_internal) + .def("matmul", &SparseMatrix::matmul, + py::return_value_policy::reference_internal) + .def("transpose", &SparseMatrix::transpose, + py::return_value_policy::reference_internal) + .def("get_element", &SparseMatrix::get_element) + .def("num_rows", &SparseMatrix::num_rows) + .def("num_cols", &SparseMatrix::num_cols); + + m.def("create_sparse_matrix", [](int n, int m) { + TI_ERROR_IF(!arch_is_cpu(get_current_program().config.arch), + "SparseMatrix only supports CPU for now."); + return SparseMatrix(n, m); + }); } TI_NAMESPACE_END diff --git a/taichi/runtime/llvm/internal_functions.h b/taichi/runtime/llvm/internal_functions.h index 7e6a7a0dbf644..ec620c9055622 100644 --- a/taichi/runtime/llvm/internal_functions.h +++ b/taichi/runtime/llvm/internal_functions.h @@ -20,6 +20,23 @@ i32 refresh_counter(Context *context) { return 0; } +i32 insert_triplet(Context *context, + int64 base_ptr_, + int i, + int j, + float value) { + auto base_ptr = (int64 *)base_ptr_; + + int64 *num_triplets = base_ptr; + auto data_base_ptr = *(int32 **)(base_ptr + 1); + + auto triplet_id = atomic_add_i64(num_triplets, 1); + data_base_ptr[triplet_id * 3] = i; + data_base_ptr[triplet_id * 3 + 1] = j; + data_base_ptr[triplet_id * 3 + 2] = taichi_union_cast(value); + return 0; +} + i32 test_internal_func_args(Context *context, float32 i, float32 j, int32 k) { return static_cast((i + j) * k); } diff --git a/taichi/transforms/detect_read_only.cpp b/taichi/transforms/detect_read_only.cpp index 8e42df309911f..fb5d5573a2795 100644 --- a/taichi/transforms/detect_read_only.cpp +++ b/taichi/transforms/detect_read_only.cpp @@ -30,7 +30,7 @@ class ExternalPtrAccessVisitor : public BasicStmtVisitor { using BasicStmtVisitor::visit; ExternalPtrAccessVisitor(std::unordered_map &map) - : map_(map), BasicStmtVisitor() { + : BasicStmtVisitor(), map_(map) { } void visit(GlobalLoadStmt *stmt) override { diff --git a/tests/python/test_sparse_matrix.py b/tests/python/test_sparse_matrix.py new file mode 100644 index 0000000000000..ebfe544f09ae7 --- /dev/null +++ b/tests/python/test_sparse_matrix.py @@ -0,0 +1,183 @@ +import taichi as ti + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_builder(): + n = 8 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, n): + Abuilder[i, j] += i + j + + fill(Abuilder) + A = Abuilder.build() + for i in range(n): + for j in range(n): + assert A[i, j] == i + j + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_element_access(): + n = 8 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder()): + for i in range(n): + Abuilder[i, i] += i + + fill(Abuilder) + A = Abuilder.build() + for i in range(n): + assert A[i, i] == i + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_addition(): + n = 8 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + Bbuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder(), + Bbuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, n): + Abuilder[i, j] += i + j + Bbuilder[i, j] += i - j + + fill(Abuilder, Bbuilder) + A = Abuilder.build() + B = Bbuilder.build() + C = A + B + for i in range(n): + for j in range(n): + assert C[i, j] == 2 * i + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_subtraction(): + n = 8 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + Bbuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder(), + Bbuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, n): + Abuilder[i, j] += i + j + Bbuilder[i, j] += i - j + + fill(Abuilder, Bbuilder) + A = Abuilder.build() + B = Bbuilder.build() + C = A - B + for i in range(n): + for j in range(n): + assert C[i, j] == 2 * j + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_scalar_multiplication(): + n = 8 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, n): + Abuilder[i, j] += i + j + + fill(Abuilder) + A = Abuilder.build() + B = A * 3.0 + for i in range(n): + for j in range(n): + assert B[i, j] == 3 * (i + j) + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_transpose(): + n = 8 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, n): + Abuilder[i, j] += i + j + + fill(Abuilder) + A = Abuilder.build() + B = A.transpose() + for i in range(n): + for j in range(n): + assert B[i, j] == A[j, i] + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_elementwise_multiplication(): + n = 8 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + Bbuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder(), + Bbuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, n): + Abuilder[i, j] += i + j + Bbuilder[i, j] += i - j + + fill(Abuilder, Bbuilder) + A = Abuilder.build() + B = Bbuilder.build() + C = A * B + for i in range(n): + for j in range(n): + assert C[i, j] == (i + j) * (i - j) + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_multiplication(): + n = 2 + Abuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + Bbuilder = ti.SparseMatrixBuilder(n, n, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder(), + Bbuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, n): + Abuilder[i, j] += i + j + Bbuilder[i, j] += i - j + + fill(Abuilder, Bbuilder) + A = Abuilder.build() + B = Bbuilder.build() + C = A @ B + assert C[0, 0] == 1.0 + assert C[0, 1] == 0.0 + assert C[1, 0] == 2.0 + assert C[1, 1] == -1.0 + + +@ti.test(arch=ti.cpu) +def test_sparse_matrix_nonsymmetric_multiplication(): + n, k, m = 2, 3, 4 + Abuilder = ti.SparseMatrixBuilder(n, k, max_num_triplets=100) + Bbuilder = ti.SparseMatrixBuilder(k, m, max_num_triplets=100) + + @ti.kernel + def fill(Abuilder: ti.sparse_matrix_builder(), + Bbuilder: ti.sparse_matrix_builder()): + for i, j in ti.ndrange(n, k): + Abuilder[i, j] += i + j + for i, j in ti.ndrange(k, m): + Bbuilder[i, j] -= i + j + + fill(Abuilder, Bbuilder) + A = Abuilder.build() + B = Bbuilder.build() + C = A @ B + GT = [[-5, -8, -11, -14], [-8, -14, -20, -26]] + for i in range(n): + for j in range(m): + assert C[i, j] == GT[i][j]