diff --git a/lib/Kernel/EvalVisitor.cpp b/lib/Kernel/EvalVisitor.cpp index 0985b11c38..9e0bc6f594 100644 --- a/lib/Kernel/EvalVisitor.cpp +++ b/lib/Kernel/EvalVisitor.cpp @@ -120,6 +120,8 @@ EvalResults EvalVisitor::operator()(const MultiplyNode& node) { const auto* lVec = std::get_if>(&lVal); const auto* rVec = std::get_if>(&rVal); if (lVec && rVec) { + LDBG() << "lVec shape: " << lVec->size(); + LDBG() << "rVec shape: " << rVec->size(); assert(left.getShape() == right.getShape() && "disagreeing shapes"); std::vector result(dim); for (size_t i = 0; i < dim; ++i) { diff --git a/lib/Kernel/Kernel.cpp b/lib/Kernel/Kernel.cpp index 6df3fe1033..00e3547993 100644 --- a/lib/Kernel/Kernel.cpp +++ b/lib/Kernel/Kernel.cpp @@ -25,10 +25,10 @@ static std::unordered_map> correspondingOp = { {KernelName::MatvecNaive, {"linalg.matvec"}}, {KernelName::MatvecDiagonal, - {"linalg.matvec", "linalg.conv_2d_nchw_fchw"}}, + {"linalg.matvec", "linalg.conv_2d_nchw_fchw", "linalg.conv_1d"}}, {KernelName::VecmatDiagonal, {"linalg.vecmat"}}, {KernelName::MatmulDiagonal, {"linalg.matmul"}}, - {KernelName::MatmulDiagonal, {"linalg.conv2d"}}, + {KernelName::MatmulDiagonal, {"linalg.conv_2d"}}, {KernelName::MatmulBicyclic, {"linalg.matmul"}}, {KernelName::Dot, {"linalg.dot"}}, }; diff --git a/lib/Kernel/KernelImplementationTest.cpp b/lib/Kernel/KernelImplementationTest.cpp index 7b2470ec26..c41a3ca177 100644 --- a/lib/Kernel/KernelImplementationTest.cpp +++ b/lib/Kernel/KernelImplementationTest.cpp @@ -255,6 +255,46 @@ TEST_P(KernelImplementationTest, Test2DConvWithLayout) { EXPECT_EQ(extractedResult, expected); } +TEST_P(KernelImplementationTest, Test1DConvWithLayout) { + MLIRContext context; + RankedTensorType dataType = + RankedTensorType::get({3}, mlir::IndexType::get(&context)); + RankedTensorType filterType = + RankedTensorType::get({2}, mlir::IndexType::get(&context)); + + int numSlots = 8; + // length 3 input data, length 2 filter + std::vector data = {1, -1, 0}; + std::vector filter = {1, -1}; + + auto dataLayout = getRowMajorLayoutRelation(dataType, numSlots); + std::vector> packedData = + evaluateLayoutOnVector(dataLayout, data); + + auto filterLayout = getConvFilterDiagonalizedRelation(filterType, dataType, + /*padding=*/0, numSlots) + .value(); + std::vector> packedFilter = + evaluateLayoutOnVector(filterLayout, filter); + RankedTensorType expandedFilterType = get1dConvFilterExpandedType( + filterType, dataType, /*stride=*/1, /*padding=*/0); + + std::vector expected = {2, -1}; + LiteralValue filterInput = packedFilter; + LiteralValue vectorInput = packedData[0]; + + auto dag = implementHaleviShoup( + vectorInput, filterInput, expandedFilterType.getShape(), + DagType::intTensor(32, {numSlots}), + /*zeroDiagonals=*/{}, /*unroll=*/std::get<0>(GetParam())); + LiteralValue actual = evalKernel(dag)[0]; + // Result is a length 2 vector repeated in a tensor of size 8. + std::vector actualVector = std::get>(actual.get()); + std::vector extractedResult = {actualVector.begin(), + actualVector.begin() + 2}; + EXPECT_EQ(extractedResult, expected); +} + TEST(KernelImplementationTest, BicyclicMatmul) { MLIRContext context; std::vector> matrixA = { diff --git a/lib/Transforms/ConvertToCiphertextSemantics/ConvertToCiphertextSemantics.cpp b/lib/Transforms/ConvertToCiphertextSemantics/ConvertToCiphertextSemantics.cpp index fb17e87550..ad226cfe41 100644 --- a/lib/Transforms/ConvertToCiphertextSemantics/ConvertToCiphertextSemantics.cpp +++ b/lib/Transforms/ConvertToCiphertextSemantics/ConvertToCiphertextSemantics.cpp @@ -878,6 +878,136 @@ struct ConvertLinalgMatvecLayout bool unrollKernels; }; +struct ConvertLinalgConv1D + : public ContextAwareOpConversionPattern { + public: + using ContextAwareOpConversionPattern< + linalg::Conv1DOp>::ContextAwareOpConversionPattern; + + ConvertLinalgConv1D( + const ContextAwareTypeConverter& contextAwareTypeConverter, + MLIRContext* context, bool unrollKernels = true) + : ContextAwareOpConversionPattern(contextAwareTypeConverter, context, + /*benefit=*/10), + unrollKernels(unrollKernels) {} + + LayoutAttr getLayoutAttr(Value value) const { + auto layoutLookup = getTypeConverter()->getContextualAttr(value); + if (failed(layoutLookup)) { + return nullptr; + } + return dyn_cast(layoutLookup.value()); + } + + bool supportsExpandedHaleviShoup(linalg::Conv1DOp op, + OpAdaptor adaptor) const { + Value filter = adaptor.getInputs().back(); + auto materializedFilterType = cast(filter.getType()); + + // If one of these dimensions is not a power of two, then we can't do + // the Halevi-Shoup or Squat Packing Matrix Multiplication conversion. + auto dimensions = materializedFilterType.getShape(); + int64_t numRows = dimensions[0]; + int64_t numCols = dimensions[1]; + bool isPowerOfTwoDims = isPowerOfTwo(numRows) && isPowerOfTwo(numCols); + + auto kernelAttr = op->getAttrOfType( + secret::SecretDialect::kKernelAttrName); + bool isConv1dAsMatvec = + kernelAttr && kernelAttr.getName() == KernelName::MatvecDiagonal; + + LLVM_DEBUG(llvm::dbgs() + << "supports expanded conv1d as matvec with halevi-shoup: " + << "isPowerOfTwoDims=" << isPowerOfTwoDims + << " isConv1dAsMatvec=" << isConv1dAsMatvec << "\n"); + + return isPowerOfTwoDims && isConv1dAsMatvec; + } + + void haleviShoupKernel( + linalg::Conv1DOp op, OpAdaptor adaptor, + ContextAwareConversionPatternRewriter& rewriter) const { + LLVM_DEBUG(llvm::dbgs() + << "Converting linalg.conv1d op with halevi shoup kernel: " << op + << "\n"); + + TypedValue data = + cast>(adaptor.getInputs()[0]); + SSAValue vectorLeaf(data); + TypedValue filter = + cast>(adaptor.getInputs()[1]); + SSAValue matrixLeaf(filter); + + // The original matrix shape is the shape of the expanded filter before + // diagonalization. + RankedTensorType expandedMatrixType = get1dConvFilterExpandedType( + cast(op.getInputs()[1].getType()), + cast(op.getInputs()[0].getType()), /*stride=*/1, + /*padding=*/0); + + // Collect any zero diagonals of the filter matrix. + LayoutAttr filterLayout = getLayoutAttr(adaptor.getInputs()[1]); + auto filterRelation = filterLayout.getIntegerRelation(); + + PointCollector collector; + std::map zeroDiagonals; + getCtComplementPoints(filterRelation, collector, filter.getType()); + for (const auto& point : collector.points) { + zeroDiagonals[point[0]] = true; + } + + auto dagType = kernel::mlirTypeToDagType(data.getType()); + std::shared_ptr> implementedKernel = + implementHaleviShoup(vectorLeaf, matrixLeaf, + expandedMatrixType.getShape().vec(), dagType, + zeroDiagonals, + /*unroll=*/unrollKernels); + + rewriter.setInsertionPointAfter(op); + ImplicitLocOpBuilder b(op.getLoc(), rewriter); + IRMaterializingVisitor visitor(data.getType(), [&](Operation* createdOp) { + setMaterializedAttr(createdOp); + }); + Value finalOutput = visitor.process(implementedKernel, b)[0]; + + auto layoutAttr = cast(op->getAttr(kLayoutAttrName)); + auto finalOutputOp = finalOutput.getDefiningOp(); + finalOutputOp->setAttr(kLayoutAttrName, layoutAttr); + setMaterializedAttr(finalOutputOp); + + // Add the initial accumulator value. + Value result = adaptor.getOutputs()[0]; + Operation* addBias = + makeAppropriatelyTypedAddOp(b, op->getLoc(), finalOutput, result); + addBias->setAttr(kLayoutAttrName, layoutAttr); + setMaterializedAttr(addBias); + rewriter.replaceOp(op, addBias); + } + + LogicalResult matchAndRewrite( + linalg::Conv1DOp op, OpAdaptor adaptor, + ContextAwareConversionPatternRewriter& rewriter) const final { + Value data = adaptor.getInputs().front(); + Value filter = adaptor.getInputs().back(); + LayoutAttr dataLayout = getLayoutAttr(data); + LayoutAttr filterLayout = getLayoutAttr(filter); + + if (!dataLayout || !filterLayout) + return rewriter.notifyMatchFailure( + op, "missing new layout attribute for data and filter"); + + if (supportsExpandedHaleviShoup(op, adaptor)) { + haleviShoupKernel(op, adaptor, rewriter); + return success(); + } + + return op.emitError() << "unsupported layout for 1d conv"; + } + + private: + bool unrollKernels; +}; + struct ConvertLinalgConv2D : public ContextAwareOpConversionPattern { public: @@ -2324,9 +2454,9 @@ struct ConvertToCiphertextSemantics ConvertTensorExtractLayout, ConvertTensorExtractSlice, ConvertTensorInsertLayout, ConvertTensorInsertSlice>( typeConverter, context); - patterns.add(typeConverter, context, - unrollKernels); + patterns.add( + typeConverter, context, unrollKernels); patterns.add(typeConverter, context, ciphertextSize); ConversionConfig config; diff --git a/lib/Transforms/LayoutOptimization/BUILD b/lib/Transforms/LayoutOptimization/BUILD index eb2d6d88be..b2d19524b8 100644 --- a/lib/Transforms/LayoutOptimization/BUILD +++ b/lib/Transforms/LayoutOptimization/BUILD @@ -68,6 +68,7 @@ cc_library( "@heir//lib/Interface:HoistingInterfaces", "@heir//lib/Kernel", "@heir//lib/Utils:AttributeUtils", + "@heir//lib/Utils/Layout:Convolution", "@heir//lib/Utils/Layout:Hoisting", "@llvm-project//llvm:Support", "@llvm-project//mlir:Analysis", diff --git a/lib/Transforms/LayoutOptimization/Hoisting.h b/lib/Transforms/LayoutOptimization/Hoisting.h index 446680c336..660e215d09 100644 --- a/lib/Transforms/LayoutOptimization/Hoisting.h +++ b/lib/Transforms/LayoutOptimization/Hoisting.h @@ -16,7 +16,7 @@ struct HoistResult { // A new result layout ::mlir::Attribute newOutputLayout; - // A new result layout + // A new result kernel ::mlir::heir::KernelName newKernel; // The convert_layout op hoisted. diff --git a/lib/Transforms/LayoutOptimization/InterfaceImpl.cpp b/lib/Transforms/LayoutOptimization/InterfaceImpl.cpp index 05df259069..3667ba7381 100644 --- a/lib/Transforms/LayoutOptimization/InterfaceImpl.cpp +++ b/lib/Transforms/LayoutOptimization/InterfaceImpl.cpp @@ -10,10 +10,10 @@ #include "lib/Dialect/TensorExt/IR/TensorExtDialect.h" #include "lib/Dialect/TensorExt/IR/TensorExtOps.h" #include "lib/Interface/HoistingInterfaces.h" -#include "lib/Kernel/Kernel.h" #include "lib/Kernel/KernelName.h" #include "lib/Transforms/LayoutOptimization/Hoisting.h" #include "lib/Utils/AttributeUtils.h" +#include "lib/Utils/Layout/Convolution.h" #include "lib/Utils/Layout/Hoisting.h" #include "llvm/include/llvm/ADT/STLExtras.h" // from @llvm-project #include "llvm/include/llvm/Support/Debug.h" // from @llvm-project @@ -37,6 +37,7 @@ namespace heir { using tensor_ext::ConvertLayoutOp; using tensor_ext::LayoutAttr; static auto& kLayoutAttrName = tensor_ext::TensorExtDialect::kLayoutAttrName; +using ::mlir::linalg::Conv1DOp; using ::mlir::linalg::MatvecOp; using presburger::IntegerRelation; using presburger::PresburgerSpace; @@ -113,6 +114,42 @@ struct MatvecHoistingImpl } }; +struct Conv1dHoistingImpl + : public LayoutConversionHoistableOpInterface::ExternalModel< + Conv1dHoistingImpl, Conv1DOp> { + std::vector getHoisters( + Operation* op, tensor_ext::ConvertLayoutOp convertLayoutOp) const { + std::vector hoisters; + linalg::Conv1DOp conv1dOp = cast(op); + + auto kernel = op->getAttrOfType( + secret::SecretDialect::kKernelAttrName); + if (!kernel) { + LLVM_DEBUG(llvm::dbgs() + << "Kernel attribute not found on op " << *op << "\n"); + return hoisters; + } + + if (!op->hasAttr(tensor_ext::TensorExtDialect::kLayoutAttrName)) { + LLVM_DEBUG(llvm::dbgs() + << "Layout attribute not found on op " << *op << "\n"); + return hoisters; + } + + switch (kernel.getName()) { + case heir::KernelName::MatvecNaive: + case heir::KernelName::MatvecDiagonal: + hoisters.push_back(createPrecomposingConv1dHoister(conv1dOp)); + break; + default: + assert(false && "unsupported kernel for layout hoisting"); + break; + } + + return hoisters; + } +}; + struct MatmulHoistingImpl : public LayoutConversionHoistableOpInterface::ExternalModel< MatmulHoistingImpl, linalg::MatmulOp> { @@ -267,6 +304,48 @@ Hoister createPrecomposingMatvecHoister(linalg::MatvecOp op) { }; } +Hoister createPrecomposingConv1dHoister(linalg::Conv1DOp op) { + return [op](ConvertLayoutOp convertLayoutOp) -> llvm::FailureOr { + HoistResult result; + auto fromLayout = dyn_cast(convertLayoutOp.getFromLayout()); + auto toLayout = dyn_cast(convertLayoutOp.getToLayout()); + + if (!fromLayout || !toLayout) return failure(); + + // Operand order for Conv_1d op is: + // + // 0: data vector + // 1: filter vector + // 2: output vector + result.convertLayoutOp = convertLayoutOp; + // All the matvec kernels we have today should maintain the layout of the + // vector before and after the op. + result.newOutputLayout = toLayout; + + auto filterType = cast(op->getOperand(1).getType()); + auto dataType = cast(op->getOperand(0).getType()); + + auto maybeFilterRelation = + getConvFilterDiagonalizedRelation(filterType, dataType, 1, 0); + assert(succeeded(maybeFilterRelation) && + "Could not get diagonalized filter relation"); + auto filterRelation = maybeFilterRelation.value(); + + // Replace the kernel by a Matrix vector product, coming from filterRelation + result.newKernel = KernelName::MatvecDiagonal; + + presburger::IntegerRelation newMatrixLayoutRelation = + hoistConversionThroughMatvec(filterRelation, + fromLayout.getIntegerRelation(), + toLayout.getIntegerRelation()); + Attribute newMatrixLayout = LayoutAttr::getFromIntegerRelation( + op->getContext(), newMatrixLayoutRelation); + result.newInputLayouts = + SmallVector{newMatrixLayout, toLayout, toLayout}; + return result; + }; +} + void registerLayoutConversionHoistableInterface(DialectRegistry& registry) { registry.addExtension(+[](MLIRContext* ctx, arith::ArithDialect* dialect) { arith::AddFOp::attachInterface>(*ctx); @@ -282,6 +361,7 @@ void registerLayoutConversionHoistableInterface(DialectRegistry& registry) { registry.addExtension(+[](MLIRContext* ctx, linalg::LinalgDialect* dialect) { linalg::MatvecOp::attachInterface(*ctx); linalg::MatmulOp::attachInterface(*ctx); + linalg::Conv1DOp::attachInterface(*ctx); }); } diff --git a/lib/Transforms/LayoutOptimization/InterfaceImpl.h b/lib/Transforms/LayoutOptimization/InterfaceImpl.h index 545329a897..729b58c6de 100644 --- a/lib/Transforms/LayoutOptimization/InterfaceImpl.h +++ b/lib/Transforms/LayoutOptimization/InterfaceImpl.h @@ -17,6 +17,7 @@ Hoister createTrivialHoister(Operation* op); /// to vecToLayout, while keeping the kernel the same. Hoister createPrecomposingMatvecHoister(linalg::MatvecOp op); +Hoister createPrecomposingConv1dHoister(linalg::Conv1DOp op); /// Construct a hoister that hoists a layout conversion through a trivial /// rank-reducing collapse_shape operation. Hoister createCollapseShapeHoister(tensor::CollapseShapeOp op); diff --git a/lib/Transforms/LayoutPropagation/LayoutPropagation.cpp b/lib/Transforms/LayoutPropagation/LayoutPropagation.cpp index 0e1eb67137..705f98fcc0 100644 --- a/lib/Transforms/LayoutPropagation/LayoutPropagation.cpp +++ b/lib/Transforms/LayoutPropagation/LayoutPropagation.cpp @@ -60,6 +60,7 @@ namespace mlir { namespace heir { +using linalg::Conv1DOp; using linalg::Conv2DNchwFchwOp; using linalg::Conv2DOp; using linalg::DotOp; @@ -142,6 +143,7 @@ struct LayoutPropagation : impl::LayoutPropagationBase { LogicalResult visitOperation(ExpandShapeOp op); LogicalResult visitOperation(GenericOp op); LogicalResult visitOperation(ReduceOp op); + LogicalResult visitOperation(Conv1DOp op); LogicalResult visitOperation(Conv2DOp op); LogicalResult visitOperation(Conv2DNchwFchwOp op); LogicalResult visitOperation(VecmatOp op); @@ -165,6 +167,7 @@ struct LayoutPropagation : impl::LayoutPropagationBase { // Op-specific compatibility functions CompatibilityResult hasCompatibleArgumentLayouts(DotOp op); + CompatibilityResult hasCompatibleArgumentLayouts(Conv1DOp op); CompatibilityResult hasCompatibleArgumentLayouts(Conv2DOp op); CompatibilityResult hasCompatibleArgumentLayouts(Conv2DNchwFchwOp op); CompatibilityResult hasCompatibleArgumentLayouts(ReduceOp op); @@ -294,7 +297,7 @@ LogicalResult LayoutPropagation::visitOperation(Operation* op) { // secret ops .Case([&](auto op) { return visitOperation(op); }) // linalg ops - .Case([&](auto op) { return visitOperation(op); }) // affine ops .Case([&](auto op) { return visitOperation(op); }) @@ -611,6 +614,79 @@ LogicalResult LayoutPropagation::visitOperation(DotOp op) { return success(); } +LogicalResult LayoutPropagation::visitOperation(Conv1DOp op) { + LLVM_DEBUG(llvm::dbgs() << "Specializing visitor on Conv1DOp\n"); + Value data = op.getInputs().front(); + Value filter = op.getInputs().back(); + auto dataType = cast(data.getType()); + auto filterType = cast(filter.getType()); + + // Flattened data must fit into the ciphertext size. + if (dataType.getNumElements() > ciphertextSize) { + return op->emitOpError() + << "Flattened data must fit into a single ciphertext, but got " + << dataType.getNumElements() << " elements and ciphertext size is " + << ciphertextSize; + } + + MLIRContext* ctx = &getContext(); + mlir::IRRewriter builder(ctx); + + // TODO(#1597): a layout optimizer should really be selecting the + // layout instead of this pass. + LayoutAttr dataLayout = assignedLayouts.at(data); + if (!isRelationRowMajor(dataType, ciphertextSize, + dataLayout.getIntegerRelation())) { + LLVM_DEBUG(llvm::dbgs() << "conv_1d data input is not row major, inserting " + "layout conversion.\n"); + auto [toReplace, newDataLayoutAttr] = + convertToLayout(ctx, builder, op, data, dataLayout, + getRowMajorLayoutRelation(dataType, ciphertextSize)); + debugAssignLayout(toReplace, newDataLayoutAttr); + assignedLayouts.insert({toReplace, newDataLayoutAttr}); + } + + // The kernel for this operation requires expanding the conv filter matrix + // into a larger matrix and then diagonalizing. + LayoutAttr filterLayout = assignedLayouts.at(filter); + if (!isRelationConvFilterDiagonalized(filterType, dataType, /*padding=*/0, + ciphertextSize, + filterLayout.getIntegerRelation())) { + LLVM_DEBUG(llvm::dbgs() << "conv_1d filter input is not diagonalized, " + "inserting layout conversion.\n"); + // Insert a layout conversion op to make the matrix layout expanded and + // squat diagonal + auto convRelation = getConvFilterDiagonalizedRelation( + filterType, dataType, /*padding=*/0, ciphertextSize); + if (failed(convRelation)) { + return failure(); + } + auto [toReplace, newFilterLayoutAttr] = convertToLayout( + ctx, builder, op, filter, filterLayout, convRelation.value()); + debugAssignLayout(toReplace, newFilterLayoutAttr); + assignedLayouts.insert({toReplace, newFilterLayoutAttr}); + } + // Always one result, and for the kernels we have right now it's always a + // row-major replicated vector. Since the + // output matrix will have different shape than the input, assign the new + // layout. + auto result = op->getResult(0); + RankedTensorType outputType = cast(result.getType()); + FailureOr outputLayoutResult = defaultLayoutForType(outputType); + if (failed(outputLayoutResult)) { + return failure(); + } + LayoutAttr resultLayout = outputLayoutResult.value(); + + assignedLayouts.insert({result, resultLayout}); + setResultLayoutAttr(op); + auto kernelAttr = + secret::KernelAttr::get(ctx, KernelName::MatvecDiagonal, /*force=*/false); + op->setAttr(secret::SecretDialect::kKernelAttrName, kernelAttr); + + return success(); +} + LogicalResult LayoutPropagation::visitOperation(Conv2DOp op) { LLVM_DEBUG(llvm::dbgs() << "Specializing visitor on Conv2DOp\n"); Value data = op.getInputs().front(); @@ -1039,8 +1115,8 @@ CompatibilityResult LayoutPropagation::hasCompatibleArgumentLayouts( affine::AffineYieldOp>( [&](auto op) { return CompatibilityResult{true, std::nullopt}; }) // Ops with special rules - .Case( + .Case( [&](auto op) { return hasCompatibleArgumentLayouts(op); }) // By default, assume operands must all have the same layout. .Default([&](Operation* op) { @@ -1156,6 +1232,22 @@ CompatibilityResult LayoutPropagation::hasCompatibleArgumentLayouts( return {true, std::nullopt}; } +CompatibilityResult LayoutPropagation::hasCompatibleArgumentLayouts( + Conv1DOp op) { + // Currently only support secret data and plaintext filters. + Value data = op.getInputs().front(); + Value filter = op.getInputs().back(); + if (isSecret(filter, solver) || !isSecret(data, solver)) { + return {false, op->emitError("Only secret data and plaintext filters are " + "supported for linalg.conv1d")}; + } + + if (!assignedLayouts.contains(data)) { + return {false, op->emitError("data operand has no assigned layout")}; + } + return {true, std::nullopt}; +} + CompatibilityResult LayoutPropagation::hasCompatibleArgumentLayouts( Conv2DOp op) { // Currently only support secret data and plaintext filters. diff --git a/tests/Examples/openfhe/ckks/conv_1d/BUILD b/tests/Examples/openfhe/ckks/conv_1d/BUILD new file mode 100644 index 0000000000..52eeb3f8b8 --- /dev/null +++ b/tests/Examples/openfhe/ckks/conv_1d/BUILD @@ -0,0 +1,19 @@ +# See README.md for setup required to run these tests + +load("@heir//tests/Examples/openfhe:test.bzl", "openfhe_end_to_end_test") + +package(default_applicable_licenses = ["@heir//:license"]) + +openfhe_end_to_end_test( + name = "conv_1d_test", + generated_lib_header = "conv_1d_lib.h", + heir_opt_flags = [ + "--annotate-module=backend=openfhe scheme=ckks", + "--mlir-to-ckks=ciphertext-degree=32", + "--scheme-to-openfhe", + ], + heir_translate_flags = [], + mlir_src = "@heir//tests/Examples/openfhe/ckks/conv_1d:conv_1d.mlir", + tags = ["notap"], + test_src = "conv_1d_test.cpp", +) diff --git a/tests/Examples/openfhe/ckks/conv_1d/conv_1d.mlir b/tests/Examples/openfhe/ckks/conv_1d/conv_1d.mlir new file mode 100644 index 0000000000..17e61f22c4 --- /dev/null +++ b/tests/Examples/openfhe/ckks/conv_1d/conv_1d.mlir @@ -0,0 +1,11 @@ +module { + func.func @conv_1d(%arg0: !secret.secret>, %arg1: tensor<4xf32>) -> !secret.secret> { + %1 = tensor.empty() : tensor<5xf32> + %3 = secret.generic(%arg0: !secret.secret>) { + ^body(%input0: tensor<8xf32>): + %4 = linalg.conv_1d ins(%input0, %arg1 : tensor<8xf32>, tensor<4xf32>) outs(%1 : tensor<5xf32>) -> tensor<5xf32> + secret.yield %4 : tensor<5xf32> + } -> !secret.secret> + return %3 : !secret.secret> + } +} diff --git a/tests/Examples/openfhe/ckks/conv_1d/conv_1d_test.cpp b/tests/Examples/openfhe/ckks/conv_1d/conv_1d_test.cpp new file mode 100644 index 0000000000..a440b81991 --- /dev/null +++ b/tests/Examples/openfhe/ckks/conv_1d/conv_1d_test.cpp @@ -0,0 +1,45 @@ +#include + +#include "gtest/gtest.h" // from @googletest + +// Generated headers (block clang-format from messing up order) +#include "tests/Examples/openfhe/ckks/conv_1d/conv_1d_lib.h" + +namespace mlir { +namespace heir { +namespace openfhe { + +TEST(Conv1DTest, RunTest) { + auto cryptoContext = conv_1d__generate_crypto_context(); + auto keyPair = cryptoContext->KeyGen(); + auto publicKey = keyPair.publicKey; + auto secretKey = keyPair.secretKey; + cryptoContext = conv_1d__configure_crypto_context(cryptoContext, secretKey); + + // ct is a length 8 input vector + std::vector m = {0.0f, 0.1f, 0.2f, 0.3f, 0.4f, 0.5f, 0.6f, 0.7f}; + + // pt is a length 4 input filter + std::vector filter = {1.0f, -1.0f, 0.0f, 1.0f}; + + // expected is the result of the conv 1d row major, which should be a 5 + std::vector expected = {0.2f, 0.3f, 0.4f, 0.5f, 0.6f}; + + auto ctEncrypted = + conv_1d__encrypt__arg0(cryptoContext, m, keyPair.publicKey); + + auto result = conv_1d(cryptoContext, ctEncrypted, filter); + + auto actual = + conv_1d__decrypt__result0(cryptoContext, result, keyPair.secretKey); + + ASSERT_EQ(actual.size(), expected.size()); + + for (size_t i = 0; i < expected.size(); ++i) { + EXPECT_NEAR(expected[i], actual[i], 1e-3); + } +} + +} // namespace openfhe +} // namespace heir +} // namespace mlir diff --git a/tests/Transforms/convert_to_ciphertext_semantics/conv1d.mlir b/tests/Transforms/convert_to_ciphertext_semantics/conv1d.mlir new file mode 100644 index 0000000000..89b238ce65 --- /dev/null +++ b/tests/Transforms/convert_to_ciphertext_semantics/conv1d.mlir @@ -0,0 +1,26 @@ +// RUN: heir-opt %s --split-input-file --convert-to-ciphertext-semantics=ciphertext-size=32 | FileCheck %s + +#kernel = #secret.kernel +#layout = #tensor_ext.layout<"{ [i0] -> [ct, slot] : ct = 0 and (-i0 + slot) mod 4 = 0 and 0 <= i0 <= 2 and 0 <= slot <= 1023 }"> +#layout1 = #tensor_ext.layout<"{ [i0] -> [ct, slot] : ct = 0 and (-i0 + slot) mod 8 = 0 and 0 <= i0 <= 4 and 0 <= slot <= 1023 }"> +#layout2 = #tensor_ext.layout<"{ [i0] -> [ct, slot] : (-i0 + ct + 4*floor((1 + slot)/4)) mod 8 = 0 and 0 <= i0 <= 2 and 0 <= ct <= 3 and 0 <= slot <= 1023 and 4*floor((1 + slot)/4) >= -4 + i0 + slot and 4*floor((1 + slot)/4) <= slot and 4*floor((1 + slot)/4) <= i0 + slot }"> +module { + // CHECK: func.func @conv1d + func.func @conv1d(%arg0: !secret.secret> {tensor_ext.layout = #layout1}) -> (!secret.secret> {tensor_ext.layout = #layout}) { + // CHECK-DAG: %[[cst:.*]] = arith.constant dense<0{{.*}}> : tensor<1x32xf32> + // CHECK-DAG: %[[cst_0:.*]] = arith.constant dense<2{{.*}}> : tensor<3xf32> + %cst = arith.constant dense<0.000000e+00> : tensor<3xf32> + %cst_0 = arith.constant dense<2.000000e+00> : tensor<3xf32> + %0 = secret.generic(%arg0: !secret.secret> {tensor_ext.layout = #layout1}) { + ^body(%input0: tensor<5xf32>): + // CHECK: secret.generic + // CHECK: func.call @_assign_layout_{{[0-9]+}}(%[[cst_0]]) + // CHECK-COUNT-7: tensor_ext.rotate + %1 = tensor_ext.assign_layout %cst_0 {layout = #layout2, tensor_ext.layout = #layout2} : tensor<3xf32> + %2 = tensor_ext.assign_layout %cst {layout = #layout1, tensor_ext.layout = #layout1} : tensor<3xf32> + %3 = linalg.conv_1d {secret.kernel = #kernel, tensor_ext.layout = #layout} ins(%input0, %1 : tensor<5xf32>, tensor<3xf32>) outs(%2 : tensor<3xf32>) -> tensor<3xf32> + secret.yield %3 : tensor<3xf32> + } -> (!secret.secret> {tensor_ext.layout = #layout}) + return %0 : !secret.secret> + } +} diff --git a/tests/Transforms/layout_propagation/conv1d.mlir b/tests/Transforms/layout_propagation/conv1d.mlir new file mode 100644 index 0000000000..213fafe98a --- /dev/null +++ b/tests/Transforms/layout_propagation/conv1d.mlir @@ -0,0 +1,26 @@ +// RUN: heir-opt --layout-propagation --fold-convert-layout-into-assign-layout %s | FileCheck %s + +// CHECK: #kernel = #secret.kernel +// CHECK: @conv1d +// CHECK-SAME: %[[arg0:.*]]: !secret.secret> {tensor_ext.layout = [[rm_layout:.*]]}) -> +func.func @conv1d(%arg0: !secret.secret>) -> !secret.secret> { + %cst = arith.constant dense<0.000000e+00> : tensor<3xf32> + // CHECK: %[[out:.*]] = arith.constant dense<0.00 + // CHECK: %[[filter:.*]] = arith.constant + // CHECK-SAME: tensor<3xf32> + + // Assign a layout to the filter and input + // CHECK-DAG: tensor_ext.assign_layout %[[filter]] + // CHECK-DAG: tensor_ext.assign_layout %[[out]] + %cst_0 = arith.constant dense<2.0> : tensor<3xf32> + %0 = secret.generic(%arg0 : !secret.secret>) { + ^body(%input0: tensor<5xf32>): + // CHECK: linalg.conv_1d + // CHECK-SAME: secret.kernel = #kernel + %1 = linalg.conv_1d ins(%input0, %cst_0 : tensor<5xf32>, tensor<3xf32>) outs(%cst : tensor<3xf32>) -> tensor<3xf32> + secret.yield %1 : tensor<3xf32> + // CHECK: secret.yield + // CHECK-NEXT: -> (!secret.secret> {tensor_ext.layout = [[rm_layout2:.*]]}) + } -> !secret.secret> + return %0 : !secret.secret> +}