diff --git a/.vscode/c_cpp_properties.json b/.vscode/c_cpp_properties.json new file mode 100644 index 0000000..4d71147 --- /dev/null +++ b/.vscode/c_cpp_properties.json @@ -0,0 +1,21 @@ +{ + "configurations": [ + { + "name": "Mac", + "includePath": [ + "${workspaceFolder}/**", + + "/Library/Frameworks/R.framework/Resources/include", + "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/usr/include", + "/usr/local/include" + ], + "macFrameworkPath": [ + "/Library/Developer/CommandLineTools/SDKs/MacOSX.sdk/System/Library/Frameworks" + ], + "compilerPath": "/usr/bin/clang", + "cStandard": "c99", + "cppStandard": "c++11" + } + ], + "version": 4 +} diff --git a/NAMESPACE b/NAMESPACE index 5df00aa..418d923 100644 --- a/NAMESPACE +++ b/NAMESPACE @@ -25,6 +25,7 @@ export(sparse_is_na) export(sparse_logical) export(sparse_mean) export(sparse_median) +export(sparse_multiplication) export(sparse_multiplication_scalar) export(sparse_positions) export(sparse_replace_na) diff --git a/NEWS.md b/NEWS.md index 0dd59c0..ce984ad 100644 --- a/NEWS.md +++ b/NEWS.md @@ -8,6 +8,8 @@ * Helper function `sparse_replace_na()` has been added. (#91) +* Adding the arithmatic function `sparse_multiplication()`. (#93) + # sparsevctrs 0.2.0 ## New Functions diff --git a/R/arithmatic.R b/R/arithmatic.R index 972a7d9..dbd2836 100644 --- a/R/arithmatic.R +++ b/R/arithmatic.R @@ -147,3 +147,86 @@ sparse_subtraction_scalar <- function(x, val) { res } + +#' Vector arithmatic with sparse vectors +#' +#' Do arithmatic operations on sparse vectors while trying to void destroying +#' the sparsity. +#' +#' @param x A numeric vector. +#' @param y A numeric vector. +#' +#' @details +#' +#' Note that this function works with both sparse and dense vectors for both `x` +#' and `y`, returning a sparse or dense vector according to the input. +#' +#' For `sparse_multiplication()` the class of the resulting vector depends on +#' the classes of `x` and `y`. If both `x` and `y` are integer vectors then an +#' integer vector is returned, otherwise a double vector is returned. +#' +#' `sparse_multiplication()` will return a non-sparse vector if both `x` and `y` +#' is non-sparse. Otherwise a sparse vector is returned. +#' +#' `sparse_multiplication()` will destroy sparsity of sparse vectors with non-0 +#' `default` values. +#' +#' @return A sparse vector of same type. +#' +#' @examples +#' x_sparse <- sparse_double(c(pi, 5, 0.1), c(2, 5, 10), 10) +#' +#' sparse_multiplication(x_sparse, x_sparse) +#' @name sparse-arithmatic +NULL + +#' @rdname sparse-arithmatic +#' @export +sparse_multiplication <- function(x, y) { + if (!is.numeric(x)) { + cli::cli_abort("{.arg x} must me numeric, not {.obj_type_friendly {x}}.") + } + if (!is.numeric(y)) { + cli::cli_abort("{.arg y} must me numeric not {.obj_type_friendly {x}}.") + } + + if (length(x) != length(y)) { + x_len <- length(x) + y_len <- length(y) + cli::cli_abort( + "{.arg x} ({x_len}) and {.arg y} ({y_len}) must be the same length." + ) + } + + x_class <- class(x) + y_class <- class(y) + + if (x_class != y_class) { + if (x_class == "integer") { + if (is_sparse_vector(x)) { + x <- as_sparse_double(x, default = sparse_default(x)) + } else { + x <- as.double(x) + } + } else { + if (is_sparse_vector(y)) { + y <- as_sparse_double(y, default = sparse_default(y)) + } else { + y <- as.double(y) + } + } + } + + x_default <- sparse_default(x) + y_default <- sparse_default(y) + + if (!is.na(x_default) && x_default != 0) { + x <- x[] + } + + if (!is.na(y_default) && y_default != 0) { + y <- y[] + } + + .Call(ffi_sparse_multiplication, x, y) +} diff --git a/_pkgdown.yml b/_pkgdown.yml index 4273d7e..9ac5fe8 100644 --- a/_pkgdown.yml +++ b/_pkgdown.yml @@ -43,6 +43,7 @@ reference: contents: - type-predicates - sparse-arithmatic-scalar + - sparse-arithmatic - extractors - has_sparse_elements - sparsevctrs_options diff --git a/man/sparse-arithmatic.Rd b/man/sparse-arithmatic.Rd new file mode 100644 index 0000000..731f77e --- /dev/null +++ b/man/sparse-arithmatic.Rd @@ -0,0 +1,40 @@ +% Generated by roxygen2: do not edit by hand +% Please edit documentation in R/arithmatic.R +\name{sparse-arithmatic} +\alias{sparse-arithmatic} +\alias{sparse_multiplication} +\title{Vector arithmatic with sparse vectors} +\usage{ +sparse_multiplication(x, y) +} +\arguments{ +\item{x}{A numeric vector.} + +\item{y}{A numeric vector.} +} +\value{ +A sparse vector of same type. +} +\description{ +Do arithmatic operations on sparse vectors while trying to void destroying +the sparsity. +} +\details{ +Note that this function works with both sparse and dense vectors for both \code{x} +and \code{y}, returning a sparse or dense vector according to the input. + +For \code{sparse_multiplication()} the class of the resulting vector depends on +the classes of \code{x} and \code{y}. If both \code{x} and \code{y} are integer vectors then an +integer vector is returned, otherwise a double vector is returned. + +\code{sparse_multiplication()} will return a non-sparse vector if both \code{x} and \code{y} +is non-sparse. Otherwise a sparse vector is returned. + +\code{sparse_multiplication()} will destroy sparsity of sparse vectors with non-0 +\code{default} values. +} +\examples{ +x_sparse <- sparse_double(c(pi, 5, 0.1), c(2, 5, 10), 10) + +sparse_multiplication(x_sparse, x_sparse) +} diff --git a/src/init.c b/src/init.c index 91132f6..7d71b05 100644 --- a/src/init.c +++ b/src/init.c @@ -2,6 +2,7 @@ #include "sparse-extractors.h" #include "sparse-utils.h" #include "sparse-dummy.h" +#include "sparse-arithmatic.h" // Defined in altrep-sparse-double.c extern SEXP ffi_altrep_new_sparse_double(SEXP); @@ -37,6 +38,7 @@ static const R_CallMethodDef CallEntries[] = { {"ffi_is_sparse_vector", (DL_FUNC) &ffi_is_sparse_vector, 1}, {"ffi_sparse_dummy", (DL_FUNC) &ffi_sparse_dummy, 4}, {"ffi_sparse_dummy_na", (DL_FUNC) &ffi_sparse_dummy_na, 4}, + {"ffi_sparse_multiplication", (DL_FUNC) &ffi_sparse_multiplication, 2}, {NULL, NULL, 0} }; diff --git a/src/sparse-arithmatic.c b/src/sparse-arithmatic.c new file mode 100644 index 0000000..bddbab1 --- /dev/null +++ b/src/sparse-arithmatic.c @@ -0,0 +1,760 @@ +#include "sparse-arithmatic.h" +#include "sparse-utils.h" + +// Defined in altrep-sparse-integer.c +extern SEXP ffi_altrep_new_sparse_integer(SEXP); +extern void sparsevctrs_init_altrep_sparse_integer(DllInfo*); + +SEXP empty_sparse_integer(R_xlen_t len) { + SEXP out = PROTECT(Rf_allocVector(VECSXP, 4)); + + SEXP out_val = Rf_allocVector(INTSXP, 0); + SET_VECTOR_ELT(out, 0, out_val); + + SEXP out_pos = Rf_allocVector(INTSXP, 0); + SET_VECTOR_ELT(out, 1, out_pos); + + SEXP out_length = Rf_ScalarInteger((int) len); + SET_VECTOR_ELT(out, 2, out_length); + + SEXP out_default = Rf_ScalarInteger(0); + SET_VECTOR_ELT(out, 3, out_default); + + SEXP altrep = ffi_altrep_new_sparse_integer(out); + + UNPROTECT(1); + return altrep; +} + +// Defined in altrep-sparse-integer.c +extern SEXP ffi_altrep_new_sparse_double(SEXP); +extern void sparsevctrs_init_altrep_sparse_double(DllInfo*); + +SEXP empty_sparse_double(R_xlen_t len) { + SEXP out = PROTECT(Rf_allocVector(VECSXP, 4)); + + SEXP out_val = Rf_allocVector(REALSXP, 0); + SET_VECTOR_ELT(out, 0, out_val); + + SEXP out_pos = Rf_allocVector(INTSXP, 0); + SET_VECTOR_ELT(out, 1, out_pos); + + SEXP out_length = Rf_ScalarInteger((int) len); + SET_VECTOR_ELT(out, 2, out_length); + + SEXP out_default = Rf_ScalarReal(0); + SET_VECTOR_ELT(out, 3, out_default); + + SEXP altrep = ffi_altrep_new_sparse_double(out); + + UNPROTECT(1); + return altrep; +} + +SEXP find_overlap(SEXP x, SEXP y) { + SEXP x_pos = extract_pos(x); + SEXP y_pos = extract_pos(y); + R_xlen_t x_len = Rf_length(x_pos); + R_xlen_t y_len = Rf_length(y_pos); + + // special case if x or y have length zero + if (x_len == 0 || y_len == 0) { + return R_NilValue; + } + + SEXP x_matches = Rf_allocVector(INTSXP, x_len); + SEXP y_matches = Rf_allocVector(INTSXP, y_len); + for (R_xlen_t i = 0; i < x_len; i++) { + SET_INTEGER_ELT(x_matches, i, 0); + } + for (R_xlen_t i = 0; i < y_len; i++) { + SET_INTEGER_ELT(y_matches, i, 0); + } + + const int* x_pos_v = INTEGER_RO(x_pos); + const int* y_pos_v = INTEGER_RO(y_pos); + + R_xlen_t i = 0, j = 0, n = 0; + + while (i < x_len && j < y_len) { + if (x_pos_v[i] < y_pos_v[j]) { + i++; + } else if (x_pos_v[i] > y_pos_v[j]) { + j++; + } else { + n++; + SET_INTEGER_ELT(x_matches, i, 1); + SET_INTEGER_ELT(y_matches, j, 1); + + i++; + j++; + } + } + + if (n == 0) { + return R_NilValue; + } + + SEXP out = PROTECT(Rf_allocVector(VECSXP, 2)); + + SEXP out_x = Rf_allocVector(INTSXP, n); + SET_VECTOR_ELT(out, 0, out_x); + int* v_out_x = INTEGER(out_x); + + SEXP out_y = Rf_allocVector(INTSXP, n); + SET_VECTOR_ELT(out, 1, out_y); + int* v_out_y = INTEGER(out_y); + + R_xlen_t loc = 0; + for (R_xlen_t i = 0; i < x_len; i++) { + if (INTEGER_ELT(x_matches, i) == 1) { + v_out_x[loc] = i; + loc++; + } + } + loc = 0; + for (R_xlen_t i = 0; i < y_len; i++) { + if (INTEGER_ELT(y_matches, i) == 1) { + v_out_y[loc] = i; + loc++; + } + } + + UNPROTECT(1); + return out; +} + +Rboolean int_match(int x, SEXP table) { + R_xlen_t table_len = Rf_length(table); + + Rboolean out = FALSE; + + for (R_xlen_t i = 0; i < table_len; i++) { + if (INTEGER_ELT(table, i) == x) { + out = TRUE; + break; + } + } + return out; +} + +SEXP find_nas_with_no_overlap(SEXP x, SEXP y) { + SEXP x_pos = extract_pos(x); + SEXP x_val = extract_val(x); + SEXP y_pos = extract_pos(y); + SEXP y_val = extract_val(y); + R_xlen_t x_len = Rf_length(x_pos); + R_xlen_t y_len = Rf_length(y_pos); + + // special case if x or y have length zero + if (x_len == 0 || y_len == 0) { + return R_NilValue; + } + + SEXP x_matches = Rf_allocVector(INTSXP, x_len); + SEXP y_matches = Rf_allocVector(INTSXP, y_len); + + for (R_xlen_t i = 0; i < x_len; i++) { + if (Rf_isInteger(x_val)) { + if (INTEGER_ELT(x_val, i) == NA_INTEGER) { + SET_INTEGER_ELT(x_matches, i, 1); + } else { + SET_INTEGER_ELT(x_matches, i, 0); + } + } else { + if (R_IsNA(REAL_ELT(x_val, i))) { + SET_INTEGER_ELT(x_matches, i, 1); + } else { + SET_INTEGER_ELT(x_matches, i, 0); + } + } + } + + for (R_xlen_t i = 0; i < y_len; i++) { + if (Rf_isInteger(y_val)) { + if (INTEGER_ELT(y_val, i) == NA_INTEGER) { + SET_INTEGER_ELT(y_matches, i, 1); + } else { + SET_INTEGER_ELT(y_matches, i, 0); + } + } else { + if (R_IsNA(REAL_ELT(y_val, i))) { + SET_INTEGER_ELT(y_matches, i, 1); + } else { + SET_INTEGER_ELT(y_matches, i, 0); + } + } + } + + const int* x_pos_v = INTEGER_RO(x_pos); + const int* y_pos_v = INTEGER_RO(y_pos); + + R_xlen_t i = 0, j = 0; + + while (i < x_len && j < y_len) { + if (x_pos_v[i] < y_pos_v[j]) { + i++; + } else if (x_pos_v[i] > y_pos_v[j]) { + j++; + } else { + SET_INTEGER_ELT(x_matches, i, 0); + SET_INTEGER_ELT(y_matches, j, 0); + + i++; + j++; + } + } + + SEXP out = PROTECT(Rf_allocVector(VECSXP, 2)); + R_xlen_t n_x = 0; + for (R_xlen_t i = 0; i < x_len; i++) { + if (INTEGER_ELT(x_matches, i) == 1) { + n_x++; + } + } + R_xlen_t n_y = 0; + for (R_xlen_t i = 0; i < y_len; i++) { + if (INTEGER_ELT(y_matches, i) == 1) { + n_y++; + } + } + + SEXP out_x = Rf_allocVector(INTSXP, n_x); + SET_VECTOR_ELT(out, 0, out_x); + int* v_out_x = INTEGER(out_x); + + SEXP out_y = Rf_allocVector(INTSXP, n_y); + SET_VECTOR_ELT(out, 1, out_y); + int* v_out_y = INTEGER(out_y); + + R_xlen_t loc = 0; + for (R_xlen_t i = 0; i < x_len; i++) { + if (INTEGER_ELT(x_matches, i) == 1) { + v_out_x[loc] = i; + loc++; + } + } + loc = 0; + for (R_xlen_t i = 0; i < y_len; i++) { + if (INTEGER_ELT(y_matches, i) == 1) { + v_out_y[loc] = i; + loc++; + } + } + + UNPROTECT(1); + return out; +} + +SEXP multiplication_doubles_sparse_sparse(SEXP x, SEXP y) { + SEXP overlap = find_overlap(x, y); + SEXP nas = find_nas_with_no_overlap(x, y); + + SEXP x_val = extract_val(x); + SEXP x_pos = extract_pos(x); + SEXP y_val = extract_val(y); + SEXP y_pos = extract_pos(y); + + SEXP x_na_pos = VECTOR_ELT(nas, 0); + SEXP y_na_pos = VECTOR_ELT(nas, 1); + + int x_na_count = Rf_length(x_na_pos); + int y_na_count = Rf_length(y_na_pos); + + if (overlap == R_NilValue && x_na_count == 0 && y_na_count == 0) { + return empty_sparse_double(extract_len(x)); + } + + R_xlen_t n_overlap = 0; + if (overlap != R_NilValue) { + n_overlap = Rf_length(VECTOR_ELT(overlap, 0)); + } + + R_xlen_t out_len = n_overlap + x_na_count + y_na_count; + + SEXP out_pos = Rf_allocVector(INTSXP, out_len); + SEXP out_val = Rf_allocVector(REALSXP, out_len); + R_xlen_t out_idx = 0; + + if (overlap != R_NilValue) { + SEXP x_pos_idx = VECTOR_ELT(overlap, 0); + SEXP y_pos_idx = VECTOR_ELT(overlap, 1); + + SEXP x_pos = extract_pos(x); + + for (R_xlen_t i = 0; i < n_overlap; i++) { + SET_INTEGER_ELT( + out_pos, i, INTEGER_ELT(x_pos, INTEGER_ELT(x_pos_idx, i)) + ); + + SET_REAL_ELT( + out_val, + out_idx, + REAL_ELT(x_val, INTEGER_ELT(x_pos_idx, i)) * + REAL_ELT(y_val, INTEGER_ELT(y_pos_idx, i)) + ); + out_idx++; + } + } + + // set x NA values + for (R_xlen_t i = 0; i < x_na_count; i++) { + SET_INTEGER_ELT( + out_pos, out_idx, INTEGER_ELT(x_pos, INTEGER_ELT(x_na_pos, i)) + ); + SET_REAL_ELT(out_val, out_idx, NA_REAL); + out_idx++; + } + + // set y NA values + for (R_xlen_t i = 0; i < y_na_count; i++) { + SET_INTEGER_ELT( + out_pos, out_idx, INTEGER_ELT(y_pos, INTEGER_ELT(y_na_pos, i)) + ); + SET_REAL_ELT(out_val, out_idx, NA_REAL); + out_idx++; + } + + if (out_len > 1) { + sort_pos_and_val(out_pos, out_val); + } + + SEXP out = PROTECT(Rf_allocVector(VECSXP, 4)); + + SET_VECTOR_ELT(out, 0, out_val); + + SET_VECTOR_ELT(out, 1, out_pos); + + SEXP out_length = Rf_ScalarInteger((int) extract_len(x)); + SET_VECTOR_ELT(out, 2, out_length); + + SEXP out_default = Rf_ScalarReal(0); + SET_VECTOR_ELT(out, 3, out_default); + + SEXP altrep = ffi_altrep_new_sparse_double(out); + + UNPROTECT(1); + return altrep; +} + +SEXP multiplication_doubles_sparse_dense(SEXP x, SEXP y) { + SEXP x_pos = extract_pos(x); + SEXP x_val = extract_val(x); + R_xlen_t x_len = extract_len(x); + R_xlen_t n_values = Rf_length(x_pos); + + R_xlen_t n_zero = 0; + for (R_xlen_t i = 0; i < n_values; i++) { + if (REAL_ELT(y, INTEGER_ELT(x_pos, i) - 1) == 0) { + n_zero++; + } + } + + // Locate NA values for Y - dense + R_xlen_t n_y_nas = 0; + R_xlen_t y_len = Rf_length(y); + for (R_xlen_t i = 0; i < y_len; i++) { + if (R_IsNA(REAL_ELT(y, i))) { + // i + 1 because of R-indexing + if (!int_match((int) i + 1, x_pos)) { + n_y_nas++; + } + } + } + + SEXP y_na_pos = Rf_allocVector(INTSXP, n_y_nas); + R_xlen_t idx = 0; + + for (R_xlen_t i = 0; i < y_len; i++) { + if (R_IsNA(REAL_ELT(y, i))) { + // i + 1 because of R-indexing + if (!int_match((int) i + 1, x_pos)) { + SET_INTEGER_ELT(y_na_pos, idx, i); + idx++; + } + } + } + + // Locate NA values for X sparse + R_xlen_t n_x_nas = 0; + for (R_xlen_t i = 0; i < n_values; i++) { + if (R_IsNA(REAL_ELT(x_val, i))) { + int cur_pos = INTEGER_ELT(x_pos, i); + // cur_pos - 1 because of R-indexing + int cur_y_val = REAL_ELT(y, cur_pos - 1); + + if (cur_y_val == 0) { + n_x_nas++; + } + } + } + + SEXP x_na_pos = Rf_allocVector(INTSXP, n_x_nas); + idx = 0; + + for (R_xlen_t i = 0; i < n_values; i++) { + if (R_IsNA(REAL_ELT(x_val, i))) { + int cur_pos = INTEGER_ELT(x_pos, i); + // cur_pos - 1 because of R-indexing + int cur_y_val = REAL_ELT(y, cur_pos - 1); + + if (cur_y_val == 0) { + SET_INTEGER_ELT(x_na_pos, idx, cur_pos); + idx++; + } + } + } + + R_xlen_t out_len = n_values - n_zero + n_x_nas + n_y_nas; + SEXP out_pos = Rf_allocVector(INTSXP, out_len); + SEXP out_val = Rf_allocVector(REALSXP, out_len); + + idx = 0; + + for (R_xlen_t i = 0; i < n_values; i++) { + int cur_pos = INTEGER_ELT(x_pos, i); + double y_val = REAL_ELT(y, cur_pos - 1); + + if (y_val != 0) { + SET_INTEGER_ELT(out_pos, idx, cur_pos); + double cur_val = REAL_ELT(x_val, i); + SET_REAL_ELT(out_val, idx, y_val * cur_val); + idx++; + } + } + + // set x na values + for (R_xlen_t i = 0; i < n_x_nas; i++) { + int cur_pos = INTEGER_ELT(x_na_pos, i); + + SET_INTEGER_ELT(out_pos, idx, cur_pos); + SET_REAL_ELT(out_val, idx, NA_REAL); + idx++; + } + + // set y na values + for (R_xlen_t i = 0; i < n_y_nas; i++) { + int cur_pos = INTEGER_ELT(y_na_pos, i); + + // cur_pos + 1 because it is R-indexed + SET_INTEGER_ELT(out_pos, idx, cur_pos + 1); + SET_REAL_ELT(out_val, idx, NA_REAL); + idx++; + } + + if (out_len > 1) { + sort_pos_and_val(out_pos, out_val); + } + + SEXP out = PROTECT(Rf_allocVector(VECSXP, 4)); + + SET_VECTOR_ELT(out, 0, out_val); + + SET_VECTOR_ELT(out, 1, out_pos); + + SEXP out_length = Rf_ScalarInteger((int) x_len); + SET_VECTOR_ELT(out, 2, out_length); + + SEXP out_default = Rf_ScalarReal(0); + SET_VECTOR_ELT(out, 3, out_default); + + SEXP altrep = ffi_altrep_new_sparse_double(out); + + UNPROTECT(1); + return altrep; +} + +SEXP multiplication_doubles_dense_dense(SEXP x, SEXP y) { + R_xlen_t len = Rf_length(x); + + SEXP out = Rf_allocVector(REALSXP, len); + + for (R_xlen_t i = 0; i < len; i++) { + double x_val = REAL_ELT(x, i); + double y_val = REAL_ELT(y, i); + SET_REAL_ELT(out, i, x_val * y_val); + } + + return out; +} + +SEXP multiplication_doubles(SEXP x, SEXP y) { + if (is_altrep(x)) { + if (is_altrep(y)) { + return multiplication_doubles_sparse_sparse(x, y); + } else { + return multiplication_doubles_sparse_dense(x, y); + } + } else { + if (is_altrep(y)) { + return multiplication_doubles_sparse_dense(y, x); + } else { + return multiplication_doubles_dense_dense(x, y); + } + } + return x; +} + +SEXP multiplication_integers_sparse_sparse(SEXP x, SEXP y) { + SEXP overlap = find_overlap(x, y); + SEXP nas = find_nas_with_no_overlap(x, y); + + SEXP x_val = extract_val(x); + SEXP x_pos = extract_pos(x); + SEXP y_val = extract_val(y); + SEXP y_pos = extract_pos(y); + + SEXP x_na_pos = VECTOR_ELT(nas, 0); + SEXP y_na_pos = VECTOR_ELT(nas, 1); + + int x_na_count = Rf_length(x_na_pos); + int y_na_count = Rf_length(y_na_pos); + + if (overlap == R_NilValue && x_na_count == 0 && y_na_count == 0) { + return empty_sparse_integer(extract_len(x)); + } + + R_xlen_t n_overlap = 0; + if (overlap != R_NilValue) { + n_overlap = Rf_length(VECTOR_ELT(overlap, 0)); + } + + R_xlen_t out_len = n_overlap + x_na_count + y_na_count; + + SEXP out_pos = Rf_allocVector(INTSXP, out_len); + SEXP out_val = Rf_allocVector(INTSXP, out_len); + R_xlen_t out_idx = 0; + + if (overlap != R_NilValue) { + SEXP x_pos_idx = VECTOR_ELT(overlap, 0); + SEXP y_pos_idx = VECTOR_ELT(overlap, 1); + + SEXP x_pos = extract_pos(x); + + for (R_xlen_t i = 0; i < n_overlap; i++) { + SET_INTEGER_ELT( + out_pos, i, INTEGER_ELT(x_pos, INTEGER_ELT(x_pos_idx, i)) + ); + SET_INTEGER_ELT( + out_val, + out_idx, + INTEGER_ELT(x_val, INTEGER_ELT(x_pos_idx, i)) * + INTEGER_ELT(y_val, INTEGER_ELT(y_pos_idx, i)) + ); + out_idx++; + } + } + + // set x NA values + for (R_xlen_t i = 0; i < x_na_count; i++) { + SET_INTEGER_ELT( + out_pos, out_idx, INTEGER_ELT(x_pos, INTEGER_ELT(x_na_pos, i)) + ); + SET_INTEGER_ELT(out_val, out_idx, NA_INTEGER); + out_idx++; + } + + // set y NA values + for (R_xlen_t i = 0; i < y_na_count; i++) { + SET_INTEGER_ELT( + out_pos, out_idx, INTEGER_ELT(y_pos, INTEGER_ELT(y_na_pos, i)) + ); + SET_INTEGER_ELT(out_val, out_idx, NA_INTEGER); + out_idx++; + } + + if (out_len > 1) { + sort_pos_and_val(out_pos, out_val); + } + + SEXP out = PROTECT(Rf_allocVector(VECSXP, 4)); + + SET_VECTOR_ELT(out, 0, out_val); + + SET_VECTOR_ELT(out, 1, out_pos); + + SEXP out_length = Rf_ScalarInteger((int) extract_len(x)); + SET_VECTOR_ELT(out, 2, out_length); + + SEXP out_default = Rf_ScalarInteger(0); + SET_VECTOR_ELT(out, 3, out_default); + + SEXP altrep = ffi_altrep_new_sparse_integer(out); + + UNPROTECT(1); + return altrep; +} + +SEXP multiplication_integers_sparse_dense(SEXP x, SEXP y) { + SEXP x_pos = extract_pos(x); + SEXP x_val = extract_val(x); + R_xlen_t x_len = extract_len(x); + R_xlen_t n_values = Rf_length(x_pos); + + R_xlen_t n_zero = 0; + for (R_xlen_t i = 0; i < n_values; i++) { + if (INTEGER_ELT(y, INTEGER_ELT(x_pos, i) - 1) == 0) { + n_zero++; + } + } + + // Locate NA values for Y - dense + R_xlen_t n_y_nas = 0; + R_xlen_t y_len = Rf_length(y); + for (R_xlen_t i = 0; i < y_len; i++) { + if (INTEGER_ELT(y, i) == NA_INTEGER) { + // i + 1 because of R-indexing + if (!int_match((int) i + 1, x_pos)) { + n_y_nas++; + } + } + } + + SEXP y_na_pos = Rf_allocVector(INTSXP, n_y_nas); + R_xlen_t idx = 0; + + for (R_xlen_t i = 0; i < y_len; i++) { + if (INTEGER_ELT(y, i) == NA_INTEGER) { + // i + 1 because of R-indexing + if (!int_match((int) i + 1, x_pos)) { + SET_INTEGER_ELT(y_na_pos, idx, i); + idx++; + } + } + } + + // Locate NA values for X sparse + R_xlen_t n_x_nas = 0; + for (R_xlen_t i = 0; i < n_values; i++) { + if (INTEGER_ELT(x_val, i) == NA_INTEGER) { + int cur_pos = INTEGER_ELT(x_pos, i); + // cur_pos - 1 because of R-indexing + int cur_y_val = INTEGER_ELT(y, cur_pos - 1); + + if (cur_y_val == 0) { + n_x_nas++; + } + } + } + + SEXP x_na_pos = Rf_allocVector(INTSXP, n_x_nas); + idx = 0; + + for (R_xlen_t i = 0; i < n_values; i++) { + if (INTEGER_ELT(x_val, i) == NA_INTEGER) { + int cur_pos = INTEGER_ELT(x_pos, i); + // cur_pos - 1 because of R-indexing + int cur_y_val = INTEGER_ELT(y, cur_pos - 1); + + if (cur_y_val == 0) { + SET_INTEGER_ELT(x_na_pos, idx, cur_pos); + idx++; + } + } + } + + R_xlen_t out_len = n_values - n_zero + n_x_nas + n_y_nas; + SEXP out_pos = Rf_allocVector(INTSXP, out_len); + SEXP out_val = Rf_allocVector(INTSXP, out_len); + + idx = 0; + + for (R_xlen_t i = 0; i < n_values; i++) { + int cur_pos = INTEGER_ELT(x_pos, i); + int y_val = INTEGER_ELT(y, cur_pos - 1); + + if (y_val != 0) { + SET_INTEGER_ELT(out_pos, idx, cur_pos); + int cur_val = INTEGER_ELT(x_val, i); + SET_INTEGER_ELT(out_val, idx, y_val * cur_val); + idx++; + } + } + + // set x na values + for (R_xlen_t i = 0; i < n_x_nas; i++) { + int cur_pos = INTEGER_ELT(x_na_pos, i); + + SET_INTEGER_ELT(out_pos, idx, cur_pos); + SET_INTEGER_ELT(out_val, idx, NA_INTEGER); + idx++; + } + + // set y na values + for (R_xlen_t i = 0; i < n_y_nas; i++) { + int cur_pos = INTEGER_ELT(y_na_pos, i); + + // cur_pos + 1 because it is R-indexed + SET_INTEGER_ELT(out_pos, idx, cur_pos + 1); + SET_INTEGER_ELT(out_val, idx, NA_INTEGER); + idx++; + } + + if (out_len > 1) { + sort_pos_and_val(out_pos, out_val); + } + + SEXP out = PROTECT(Rf_allocVector(VECSXP, 4)); + + SET_VECTOR_ELT(out, 0, out_val); + + SET_VECTOR_ELT(out, 1, out_pos); + + SEXP out_length = Rf_ScalarInteger((int) x_len); + SET_VECTOR_ELT(out, 2, out_length); + + SEXP out_default = Rf_ScalarInteger(0); + SET_VECTOR_ELT(out, 3, out_default); + + SEXP altrep = ffi_altrep_new_sparse_integer(out); + + UNPROTECT(1); + return altrep; +} + +SEXP multiplication_integers_dense_dense(SEXP x, SEXP y) { + R_xlen_t len = Rf_length(x); + + SEXP out = Rf_allocVector(INTSXP, len); + + for (R_xlen_t i = 0; i < len; i++) { + int x_val = INTEGER_ELT(x, i); + int y_val = INTEGER_ELT(y, i); + if (x_val == NA_INTEGER || y_val == NA_INTEGER) { + SET_INTEGER_ELT(out, i, NA_INTEGER); + } else { + SET_INTEGER_ELT(out, i, x_val * y_val); + } + } + + return out; +} + +SEXP multiplication_integers(SEXP x, SEXP y) { + if (is_altrep(x)) { + if (is_altrep(y)) { + return multiplication_integers_sparse_sparse(x, y); + } else { + return multiplication_integers_sparse_dense(x, y); + } + } else { + if (is_altrep(y)) { + return multiplication_integers_sparse_dense(y, x); + } else { + return multiplication_integers_dense_dense(x, y); + } + } + return x; +} + +SEXP ffi_sparse_multiplication(SEXP x, SEXP y) { + SEXP out; + + if (Rf_isInteger(x)) { + out = multiplication_integers(x, y); + } else { + out = multiplication_doubles(x, y); + } + + return out; +} diff --git a/src/sparse-arithmatic.h b/src/sparse-arithmatic.h new file mode 100644 index 0000000..8e5db4b --- /dev/null +++ b/src/sparse-arithmatic.h @@ -0,0 +1,10 @@ +#ifndef SPARSEVCTRS_SPARSE_ARITHMATIC_H +#define SPARSEVCTRS_SPARSE_ARITHMATIC_H + +#define R_NO_REMAP +#include +#include "sparse-utils.h" + +SEXP ffi_sparse_multiplication(SEXP x, SEXP y); + +#endif diff --git a/src/sparse-utils.c b/src/sparse-utils.c index 3976097..9663d8f 100644 --- a/src/sparse-utils.c +++ b/src/sparse-utils.c @@ -162,3 +162,63 @@ void verbose_materialize(void) { } } } + +void sort_pos_and_val(SEXP pos, SEXP val) { + R_xlen_t len = Rf_length(pos); + + SEXP index = Rf_allocVector(INTSXP, len); + SEXP sorted_pos = Rf_allocVector(INTSXP, len); + + // Initialize pairs array + for (R_xlen_t i = 0; i < len; i++) { + SET_INTEGER_ELT(sorted_pos, i, INTEGER_ELT(pos, i)); + SET_INTEGER_ELT(index, i, i); + } + + // Sort pairs based on values + for (int i = 0; i < len - 1; i++) { + for (int j = 0; j < len - i - 1; j++) { + if (INTEGER_ELT(sorted_pos, j) > INTEGER_ELT(sorted_pos, j + 1)) { + // Swap pairs + int temp_pos = INTEGER_ELT(sorted_pos, j); + int temp_index = INTEGER_ELT(index, j); + + SET_INTEGER_ELT(sorted_pos, j, INTEGER_ELT(sorted_pos, j + 1)); + SET_INTEGER_ELT(sorted_pos, j + 1, temp_pos); + + SET_INTEGER_ELT(index, j, INTEGER_ELT(index, j + 1)); + SET_INTEGER_ELT(index, j + 1, temp_index); + } + } + } + + for (R_xlen_t i = 0; i < len; i++) { + SET_INTEGER_ELT(pos, i, INTEGER_ELT(sorted_pos, i)); + } + + if (Rf_isInteger(val)) { + SEXP sorted_val = Rf_allocVector(INTSXP, len); + + for (R_xlen_t i = 0; i < len; i++) { + int cur_index = INTEGER_ELT(index, i); + + SET_INTEGER_ELT(sorted_val, i, INTEGER_ELT(val, cur_index)); + } + + for (R_xlen_t i = 0; i < len; i++) { + SET_INTEGER_ELT(val, i, INTEGER_ELT(sorted_val, i)); + } + } else { + SEXP sorted_val = Rf_allocVector(REALSXP, len); + + for (R_xlen_t i = 0; i < len; i++) { + int cur_index = INTEGER_ELT(index, i); + + SET_REAL_ELT(sorted_val, i, REAL_ELT(val, cur_index)); + } + + for (R_xlen_t i = 0; i < len; i++) { + SET_REAL_ELT(val, i, REAL_ELT(sorted_val, i)); + } + } +} diff --git a/src/sparse-utils.h b/src/sparse-utils.h index 4d1f149..47c010d 100644 --- a/src/sparse-utils.h +++ b/src/sparse-utils.h @@ -33,4 +33,6 @@ bool is_index_handleable(SEXP x); void verbose_materialize(void); +void sort_pos_and_val(SEXP pos, SEXP val); + #endif diff --git a/tests/testthat/test-arithmatic.R b/tests/testthat/test-arithmatic.R index 2f8ff3b..8055a8c 100644 --- a/tests/testthat/test-arithmatic.R +++ b/tests/testthat/test-arithmatic.R @@ -222,3 +222,391 @@ test_that("scalar subtraction works", { is_sparse_double(sparse_subtraction_scalar(vec_double, 5)) ) }) + +test_that("vector multiplication works - integers", { + sparse_int_1 <- sparse_integer(c(4, 78), c(1, 10), 10) + sparse_int_2 <- sparse_integer(c(4, 78), c(1, 9), 10) + sparse_int_3 <- sparse_integer(c(4, 3, 4, 5), c(2, 3, 5, 9), 10) + sparse_int_NA <- sparse_integer(c(4, NA, 4, 5), c(2, 3, 5, 9), 10) + sparse_int_NA_2 <- sparse_integer(c(4, NA, 4, 5), c(2, 4, 5, 9), 10) + + dense_int_1 <- sparse_int_1[] + dense_int_2 <- sparse_int_2[] + dense_int_3 <- sparse_int_3[] + dense_int_NA <- sparse_int_NA[] + dense_int_NA_2 <- sparse_int_NA_2[] + + # integer, sparse x sparse + expect_identical( + sparse_multiplication(sparse_int_1, sparse_int_1), + sparse_int_1 * sparse_int_1 + ) + expect_identical( + sparse_multiplication(sparse_int_1, sparse_int_2), + sparse_int_1 * sparse_int_2 + ) + expect_identical( + sparse_multiplication(sparse_int_1, sparse_int_3), + sparse_int_1 * sparse_int_3 + ) + expect_identical( + sparse_multiplication(sparse_int_1, sparse_int_NA), + sparse_int_1 * sparse_int_NA + ) + expect_identical( + sparse_multiplication(sparse_int_3, sparse_int_NA), + sparse_int_3 * sparse_int_NA + ) + expect_identical( + sparse_multiplication(sparse_int_NA, sparse_int_NA_2), + sparse_int_NA * sparse_int_NA_2 + ) + # integer, dense x dense + expect_identical( + sparse_multiplication(dense_int_1, dense_int_1), + dense_int_1 * dense_int_1 + ) + expect_identical( + sparse_multiplication(dense_int_1, dense_int_2), + dense_int_1 * dense_int_2 + ) + expect_identical( + sparse_multiplication(dense_int_1, dense_int_3), + dense_int_1 * dense_int_3 + ) + expect_identical( + sparse_multiplication(dense_int_1, dense_int_NA), + dense_int_1 * dense_int_NA + ) + expect_identical( + sparse_multiplication(dense_int_3, dense_int_NA), + dense_int_3 * dense_int_NA + ) + expect_identical( + sparse_multiplication(dense_int_NA, dense_int_NA_2), + dense_int_NA * dense_int_NA_2 + ) + # integer, sparse x dense + expect_identical( + sparse_multiplication(sparse_int_1, dense_int_1), + sparse_int_1 * dense_int_1 + ) + expect_identical( + sparse_multiplication(sparse_int_1, dense_int_2), + sparse_int_1 * dense_int_2 + ) + expect_identical( + sparse_multiplication(sparse_int_1, dense_int_3), + sparse_int_1 * dense_int_3 + ) + expect_identical( + sparse_multiplication(sparse_int_1, dense_int_NA), + sparse_int_1 * dense_int_NA + ) + expect_identical( + sparse_multiplication(sparse_int_3, dense_int_NA), + sparse_int_3 * dense_int_NA + ) + expect_identical( + sparse_multiplication(sparse_int_NA, dense_int_NA_2), + sparse_int_NA * dense_int_NA_2 + ) + # integer, dense x sparse + expect_identical( + sparse_multiplication(dense_int_1, sparse_int_1), + dense_int_1 * sparse_int_1 + ) + expect_identical( + sparse_multiplication(dense_int_1, sparse_int_2), + dense_int_1 * sparse_int_2 + ) + expect_identical( + sparse_multiplication(dense_int_1, sparse_int_3), + dense_int_1 * sparse_int_3 + ) + expect_identical( + sparse_multiplication(dense_int_1, sparse_int_NA), + dense_int_1 * sparse_int_NA + ) + expect_identical( + sparse_multiplication(dense_int_3, sparse_int_NA), + dense_int_3 * sparse_int_NA + ) + expect_identical( + sparse_multiplication(dense_int_NA, sparse_int_NA_2), + dense_int_NA * sparse_int_NA_2 + ) +}) + +test_that("vector multiplication works - doubles", { + sparse_dbl_1 <- sparse_double(c(4.3, 7.8), c(1, 10), 10) + sparse_dbl_2 <- sparse_double(c(4.3, 7.8), c(1, 9), 10) + sparse_dbl_3 <- sparse_double(c(4.2, 3.1, 4.4, 5.5), c(2, 3, 5, 9), 10) + sparse_dbl_NA <- sparse_double(c(4.2, NA, 4.4, 5.5), c(2, 3, 5, 9), 10) + sparse_dbl_NA_2 <- sparse_double(c(4.2, NA, 4.4, 5.5), c(2, 4, 5, 9), 10) + + dense_dbl_1 <- sparse_dbl_1[] + dense_dbl_2 <- sparse_dbl_2[] + dense_dbl_3 <- sparse_dbl_3[] + dense_dbl_NA <- sparse_dbl_NA[] + dense_dbl_NA_2 <- sparse_dbl_NA_2[] + + # double, sparse x sparse + expect_identical( + sparse_multiplication(sparse_dbl_1, sparse_dbl_1), + sparse_dbl_1 * sparse_dbl_1 + ) + expect_identical( + sparse_multiplication(sparse_dbl_1, sparse_dbl_2), + sparse_dbl_1 * sparse_dbl_2 + ) + expect_identical( + sparse_multiplication(sparse_dbl_1, sparse_dbl_3), + sparse_dbl_1 * sparse_dbl_3 + ) + expect_identical( + sparse_multiplication(sparse_dbl_1, sparse_dbl_NA), + sparse_dbl_1 * sparse_dbl_NA + ) + expect_identical( + sparse_multiplication(sparse_dbl_3, sparse_dbl_NA), + sparse_dbl_3 * sparse_dbl_NA + ) + expect_identical( + sparse_multiplication(sparse_dbl_NA, sparse_dbl_NA_2), + sparse_dbl_NA * sparse_dbl_NA_2 + ) + # double, dense x dense + expect_identical( + sparse_multiplication(dense_dbl_1, dense_dbl_1), + dense_dbl_1 * dense_dbl_1 + ) + expect_identical( + sparse_multiplication(dense_dbl_1, dense_dbl_2), + dense_dbl_1 * dense_dbl_2 + ) + expect_identical( + sparse_multiplication(dense_dbl_1, dense_dbl_3), + dense_dbl_1 * dense_dbl_3 + ) + expect_identical( + sparse_multiplication(dense_dbl_1, dense_dbl_NA), + dense_dbl_1 * dense_dbl_NA + ) + expect_identical( + sparse_multiplication(dense_dbl_3, dense_dbl_NA), + dense_dbl_3 * dense_dbl_NA + ) + expect_identical( + sparse_multiplication(dense_dbl_NA, dense_dbl_NA_2), + dense_dbl_NA * dense_dbl_NA_2 + ) + # double, sparse x dense + expect_identical( + sparse_multiplication(sparse_dbl_1, dense_dbl_1), + sparse_dbl_1 * dense_dbl_1 + ) + expect_identical( + sparse_multiplication(sparse_dbl_1, dense_dbl_2), + sparse_dbl_1 * dense_dbl_2 + ) + expect_identical( + sparse_multiplication(sparse_dbl_1, dense_dbl_3), + sparse_dbl_1 * dense_dbl_3 + ) + expect_identical( + sparse_multiplication(sparse_dbl_1, dense_dbl_NA), + sparse_dbl_1 * dense_dbl_NA + ) + expect_identical( + sparse_multiplication(sparse_dbl_3, dense_dbl_NA), + sparse_dbl_3 * dense_dbl_NA + ) + expect_identical( + sparse_multiplication(sparse_dbl_NA, dense_dbl_NA_2), + sparse_dbl_NA * dense_dbl_NA_2 + ) + # double, dense x sparse + expect_identical( + sparse_multiplication(dense_dbl_1, sparse_dbl_1), + dense_dbl_1 * sparse_dbl_1 + ) + expect_identical( + sparse_multiplication(dense_dbl_1, sparse_dbl_2), + dense_dbl_1 * sparse_dbl_2 + ) + expect_identical( + sparse_multiplication(dense_dbl_1, sparse_dbl_3), + dense_dbl_1 * sparse_dbl_3 + ) + expect_identical( + sparse_multiplication(dense_dbl_1, sparse_dbl_NA), + dense_dbl_1 * sparse_dbl_NA + ) + expect_identical( + sparse_multiplication(dense_dbl_3, sparse_dbl_NA), + dense_dbl_3 * sparse_dbl_NA + ) + expect_identical( + sparse_multiplication(dense_dbl_NA, sparse_dbl_NA_2), + dense_dbl_NA * sparse_dbl_NA_2 + ) +}) + +test_that("vector multiplication works - types", { + sparse_int <- sparse_integer(c(4, 78), c(1, 10), 10) + dense_int <- sparse_int[] + sparse_dbl <- sparse_double(c(4.3, 7.8), c(1, 10), 10) + dense_dbl <- sparse_dbl[] + + res <- sparse_multiplication(sparse_int, sparse_int) + expect_true(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(sparse_int, dense_int) + expect_true(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(sparse_int, sparse_dbl) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_int, dense_dbl) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(dense_int, sparse_int) + expect_true(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(dense_int, dense_int) + expect_false(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(dense_int, sparse_dbl) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(dense_int, dense_dbl) + expect_false(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl, sparse_int) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl, dense_int) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl, sparse_dbl) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl, dense_dbl) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(dense_dbl, sparse_int) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(dense_dbl, dense_int) + expect_false(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(dense_dbl, sparse_dbl) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(dense_dbl, dense_dbl) + expect_false(is_sparse_vector(res)) + expect_true(is.double(res)) +}) + +test_that("vector multiplication works - non-zero defaults", { + sparse_int_0 <- sparse_integer(c(4, 78), c(1, 10), 10, default = 0) + sparse_int_1 <- sparse_integer(c(4, 78), c(1, 10), 10, default = 1) + sparse_dbl_0 <- sparse_double(c(4.3, 7.8), c(1, 10), 10, default = 0) + sparse_dbl_1 <- sparse_double(c(4.3, 7.8), c(1, 10), 10, default = 1) + + res <- sparse_multiplication(sparse_int_0, sparse_int_0) + expect_identical(res, sparse_int_0 * sparse_int_0) + expect_true(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(sparse_int_0, sparse_int_1) + expect_identical(res, sparse_int_0 * sparse_int_1) + expect_true(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(sparse_int_0, sparse_dbl_0) + expect_identical(res, sparse_int_0 * sparse_dbl_0) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_int_0, sparse_dbl_1) + expect_identical(res, sparse_int_0 * sparse_dbl_1) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_int_1, sparse_int_0) + expect_identical(res, sparse_int_1 * sparse_int_0) + expect_true(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(sparse_int_1, sparse_int_1) + expect_identical(res, sparse_int_1 * sparse_int_1) + expect_false(is_sparse_vector(res)) + expect_true(is.integer(res)) + + res <- sparse_multiplication(sparse_int_1, sparse_dbl_0) + expect_identical(res, sparse_int_1 * sparse_dbl_0) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_int_1, sparse_dbl_1) + expect_identical(res, sparse_int_1 * sparse_dbl_1) + expect_false(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_0, sparse_int_0) + expect_identical(res, sparse_dbl_0 * sparse_int_0) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_0, sparse_int_1) + expect_identical(res, sparse_dbl_0 * sparse_int_1) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_0, sparse_dbl_0) + expect_identical(res, sparse_dbl_0 * sparse_dbl_0) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_0, sparse_dbl_1) + expect_identical(res, sparse_dbl_0 * sparse_dbl_1) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_1, sparse_int_0) + expect_identical(res, sparse_dbl_1 * sparse_int_0) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_1, sparse_int_1) + expect_identical(res, sparse_dbl_1 * sparse_int_1) + expect_false(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_1, sparse_dbl_0) + expect_identical(res, sparse_dbl_1 * sparse_dbl_0) + expect_true(is_sparse_vector(res)) + expect_true(is.double(res)) + + res <- sparse_multiplication(sparse_dbl_1, sparse_dbl_1) + expect_identical(res, sparse_dbl_1 * sparse_dbl_1) + expect_false(is_sparse_vector(res)) + expect_true(is.double(res)) +})