diff --git a/CompilerInterface/BaseCompilerInterface.py b/CompilerInterface/BaseCompilerInterface.py index a58e639..1d0c492 100644 --- a/CompilerInterface/BaseCompilerInterface.py +++ b/CompilerInterface/BaseCompilerInterface.py @@ -1,3 +1,18 @@ +# ------------------------------------------------------------------------------ +# +# Part of the MLCompilerBridge Project, under the Apache 2.0 License. +# See the LICENSE file under home directory for license and copyright +# information. +# +# ------------------------------------------------------------------------------ +# +# This file contains the abstract class for compiler interface. It specifies the +# methods for communication with compiler. It also initializes the correct +# SerDes object for serialization and deserialization of data. +# +# ------------------------------------------------------------------------------ + + from abc import ABC, abstractmethod from SerDes import SerDes import os diff --git a/CompilerInterface/GrpcCompilerInterface.py b/CompilerInterface/GrpcCompilerInterface.py index f2a6278..60522e0 100644 --- a/CompilerInterface/GrpcCompilerInterface.py +++ b/CompilerInterface/GrpcCompilerInterface.py @@ -1,3 +1,17 @@ +# ------------------------------------------------------------------------------ +# +# Part of the MLCompilerBridge Project, under the Apache 2.0 License. +# See the LICENSE file under home directory for license and copyright +# information. +# +# ------------------------------------------------------------------------------ +# +# Compiler interface for gRPC. This class implements methods for communication +# with compiler using gRPC. +# +# ------------------------------------------------------------------------------ + + from abc import ABC, abstractmethod from BaseCompilerInterface import BaseCompilerInterface import os diff --git a/CompilerInterface/PipeCompilerInterface.py b/CompilerInterface/PipeCompilerInterface.py index 778d361..1279218 100644 --- a/CompilerInterface/PipeCompilerInterface.py +++ b/CompilerInterface/PipeCompilerInterface.py @@ -1,3 +1,16 @@ +# ------------------------------------------------------------------------------ +# +# Part of the MLCompilerBridge Project, under the Apache 2.0 License. +# See the LICENSE file under home directory for license and copyright +# information. +# +# ------------------------------------------------------------------------------ +# +# Compiler interface for pipes. This class implements methods for communication +# with compiler using pipes. +# +# ------------------------------------------------------------------------------ + from abc import ABC, abstractmethod from BaseCompilerInterface import BaseCompilerInterface import os diff --git a/CompilerInterface/SerDes.py b/CompilerInterface/SerDes.py index 329144e..a43672b 100644 --- a/CompilerInterface/SerDes.py +++ b/CompilerInterface/SerDes.py @@ -1,9 +1,22 @@ +# ------------------------------------------------------------------------------ +# +# Part of the MLCompilerBridge Project, under the Apache 2.0 License. +# See the LICENSE file under home directory for license and copyright +# information. +# +# ------------------------------------------------------------------------------ +# +# SerDes for JSON and bitstream data. +# +# ------------------------------------------------------------------------------ + import os, io import json import log_reader import ctypes import struct + class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, ctypes.c_long): @@ -12,6 +25,7 @@ def default(self, obj): return obj.value return super(NpEncoder, self).default(obj) + ## Class for serialization and deserialization in various formats for communication. class SerDes: ## Contructor for SerDes object @@ -77,14 +91,14 @@ def _pack(data): elif isinstance(data, float): return struct.pack("f", data) elif isinstance(data, str) and len(data) == 1: - return struct.pack('c', data) + return struct.pack("c", data) elif isinstance(data, ctypes.c_double): - return struct.pack('d', data.value) + return struct.pack("d", data.value) elif isinstance(data, ctypes.c_long): - return struct.pack('l', data.value) + return struct.pack("l", data.value) elif isinstance(data, list): return b"".join([_pack(x) for x in data]) - + msg = _pack(data) hdr = len(msg).to_bytes(8, "little") self.buffer = hdr + msg diff --git a/CompilerInterface/log_reader.py b/CompilerInterface/log_reader.py index 078cae0..b12518a 100644 --- a/CompilerInterface/log_reader.py +++ b/CompilerInterface/log_reader.py @@ -1,7 +1,16 @@ -"""Reader for training log. +# ------------------------------------------------------------------------------ +# +# Part of the MLCompilerBridge Project, under the Apache 2.0 License. +# See the LICENSE file under home directory for license and copyright +# information. +# +# ------------------------------------------------------------------------------ +# +# Reader for training log. +# See lib/Analysis/TrainingLogger.cpp for a description of the format. +# +# ------------------------------------------------------------------------------ -See lib/Analysis/TrainingLogger.cpp for a description of the format. -""" import ctypes import dataclasses import io diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000..d9f7f90 --- /dev/null +++ b/LICENSE @@ -0,0 +1,191 @@ + Copyright 2024 The authors of "The Next 700 ML-Enabled Compiler Optimizations" + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS diff --git a/LICENSE.txt b/LICENSE.txt deleted file mode 100644 index f756012..0000000 --- a/LICENSE.txt +++ /dev/null @@ -1,35 +0,0 @@ -BSD 4-Clause License - -Copyright (c) 2023, The authors of ml-llvm-tools - -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. All advertising materials mentioning features or use of this software must - display the following acknowledgement: - This product includes software developed by the authors of ml-llvm-tools - from the Department of Computer Science and Engineering, IIT Hyderabad, India. - -4. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY COPYRIGHT HOLDER "AS IS" AND ANY EXPRESS OR -IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF -MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO -EVENT SHALL COPYRIGHT HOLDER BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; -OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, -WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR -OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF -ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/MLModelRunner/C/ONNXModelRunnerCWrapper.cpp b/MLModelRunner/C/ONNXModelRunnerCWrapper.cpp index 4caebf2..031d77c 100644 --- a/MLModelRunner/C/ONNXModelRunnerCWrapper.cpp +++ b/MLModelRunner/C/ONNXModelRunnerCWrapper.cpp @@ -1,3 +1,15 @@ +//=== MLModelRunner/C/ONNXModelRunner.cpp - C API for ONNXModelRunner -----===// +// +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// +// +// This file defines the C APIs for ONNXModelRunner. +// +//===----------------------------------------------------------------------===// + #include "MLModelRunner/C/ONNXModelRunner.h" #include "MLModelRunner/ONNXModelRunner/agent.h" #include "MLModelRunner/ONNXModelRunner/utils.h" diff --git a/MLModelRunner/C/PipeModelRunnerCWrapper.cpp b/MLModelRunner/C/PipeModelRunnerCWrapper.cpp index d94b870..ded4ca3 100644 --- a/MLModelRunner/C/PipeModelRunnerCWrapper.cpp +++ b/MLModelRunner/C/PipeModelRunnerCWrapper.cpp @@ -1,3 +1,15 @@ +//=== MLModelRunner/C/PipeModelRunner.cpp - C API for PipeModelRunner -----===// +// +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// +// +// This file defines the C APIs for PipeModelRunner. +// +//===----------------------------------------------------------------------===// + #include "MLModelRunner/C/PipeModelRunner.h" #include "MLModelRunner/MLModelRunner.h" @@ -15,7 +27,7 @@ PipeModelRunnerWrapper *createPipeModelRunner(const char *OutboundName, int SerDesType) { PipeModelRunnerWrapper *obj = new PipeModelRunnerWrapper(); obj->model = new PipeModelRunner(OutboundName, InboundName, - (BaseSerDes::Kind)SerDesType); + (BaseSerDes::Kind)SerDesType); return obj; } diff --git a/MLModelRunner/ONNXModelRunner/ONNXModelRunner.cpp b/MLModelRunner/ONNXModelRunner/ONNXModelRunner.cpp index a0714e8..a7209e4 100755 --- a/MLModelRunner/ONNXModelRunner/ONNXModelRunner.cpp +++ b/MLModelRunner/ONNXModelRunner/ONNXModelRunner.cpp @@ -1,10 +1,15 @@ -//===----------------------------------------------------------------------===// +//===- ONNXModelRunner.cpp - ONNX Runner ------------------------*- C++ -*-===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // //===----------------------------------------------------------------------===// +// +// This file defines the ONNXModelRunner class to support ML model inference +// via ONNX. +// +//===----------------------------------------------------------------------===// #include "MLModelRunner/ONNXModelRunner/ONNXModelRunner.h" #include "SerDes/baseSerDes.h" @@ -48,4 +53,4 @@ void *ONNXModelRunner::evaluateUntyped() { return new int(0); } -} // namespace MLBridge \ No newline at end of file +} // namespace MLBridge diff --git a/MLModelRunner/ONNXModelRunner/README.md b/MLModelRunner/ONNXModelRunner/README.md index 1c9056b..7fff031 100644 --- a/MLModelRunner/ONNXModelRunner/README.md +++ b/MLModelRunner/ONNXModelRunner/README.md @@ -1,6 +1,6 @@ -# LLVM Inference Engine +# ONNX Model Runner (In-Process Model Runner) -This directory contains the source code for LLVM Inference Engine. A ONNX based framework to support integrtion of trained models with LLVM pass. +A ONNX based Model Runner to support integrtion of trained models with the compiler during inference. ## Trained model integration @@ -8,24 +8,7 @@ Integration of a trained model happnes in two steps. * Step 1: The model trained on some native environment need to be exported in ONNX format -* Step 2: The model can be quried anywhere in the LLVM environment by creating a instance of ONNXModel class provired by passing ONNX model file path - -## Directory structure - -The direcory and its subdirctories contain source code for OONX model integrations as well as base classes of vaious components used by and RL algorithm. - - . - ├── onnx.cpp # Defination of ONNXModel class - ├── agent.cpp # RL Agent class which will compute action by quring model - ├── Include - │ ├── agent.h # Declare RL agent class - │ ├── driver.h # A interface class to query RL baaed model - │ ├── environment.h # Declare a base Environment class which can be extended further - │ ├── onnx.h # Declaration of ONNXModel class - │ └── utils.h # Utility defination used my Agent class - └── CMakeLists.txt # Build file for LLVM Inference Engine - -The classes can be further inherited to implement the pass related logic. +* Step 2: The model can be queried anywhere in the compiler environment by creating a instance of ONNXModel class provided by setting the ONNX model's path. ## Example Usage: diff --git a/MLModelRunner/ONNXModelRunner/agent.cpp b/MLModelRunner/ONNXModelRunner/agent.cpp index d8bb676..a8ce12a 100755 --- a/MLModelRunner/ONNXModelRunner/agent.cpp +++ b/MLModelRunner/ONNXModelRunner/agent.cpp @@ -1,10 +1,15 @@ -//===----------------------------------------------------------------------===// +//===- agent.cpp - RL Agent/Model for ONNX Runner --------------*- C++ -*-===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // //===----------------------------------------------------------------------===// +// +// This file defines the Agent class, which is a wrapper around the ONNXModel +// class. +// +//===----------------------------------------------------------------------===// #include "MLModelRunner/ONNXModelRunner/agent.h" #include "MLModelRunner/Utils/Debug.h" @@ -40,4 +45,4 @@ unsigned Agent::computeAction(Observation &input) { return argmaxVal; } -} // namespace MLBridge \ No newline at end of file +} // namespace MLBridge diff --git a/MLModelRunner/ONNXModelRunner/onnx.cpp b/MLModelRunner/ONNXModelRunner/onnx.cpp index bf64e72..2fffc35 100644 --- a/MLModelRunner/ONNXModelRunner/onnx.cpp +++ b/MLModelRunner/ONNXModelRunner/onnx.cpp @@ -1,10 +1,15 @@ -//===----------------------------------------------------------------------===// +//===- onnx.cpp - ONNX Interface with CPP Runtime --------------*- C++ -*-===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // //===----------------------------------------------------------------------===// +// +// This file defines the ONNXModel class, which is a wrapper around the ONNX +// C++ interface. +// +//===----------------------------------------------------------------------===// #include "MLModelRunner/ONNXModelRunner/onnx.h" #include "onnxruntime_cxx_api.h" diff --git a/MLModelRunner/PipeModelRunner.cpp b/MLModelRunner/PipeModelRunner.cpp index f8c5c09..77b4df3 100755 --- a/MLModelRunner/PipeModelRunner.cpp +++ b/MLModelRunner/PipeModelRunner.cpp @@ -1,13 +1,20 @@ -//===- PipeModelRunner.cpp - noop ML model runner ----------------===// +//===- PipeModelRunner.cpp - Pipe based Model Runner ------------*- C++ -*-===// // -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +// (Preliminary version adopted from InteractiveModelRunner.cpp of LLVM 17.X) // //===----------------------------------------------------------------------===// // -// A runner that communicates with an external agent via 2 file descriptors. +// This file defines the PipeModelRunner class to interface with an external ML +// model during training and inference. The model is assumed to be running as an +// external process and the communication is done via 2 file descriptors using +// pipes. +// //===----------------------------------------------------------------------===// + #include "MLModelRunner/PipeModelRunner.h" #include "MLModelRunner/MLModelRunner.h" #include "MLModelRunner/Utils/Debug.h" @@ -115,4 +122,4 @@ void *PipeModelRunner::evaluateUntyped() { return SerDes->deserializeUntyped(reply); } -} // namespace MLBridge \ No newline at end of file +} // namespace MLBridge diff --git a/MLModelRunner/Utils/MLConfig.cpp b/MLModelRunner/Utils/MLConfig.cpp index 4e345b7..abd2a6a 100644 --- a/MLModelRunner/Utils/MLConfig.cpp +++ b/MLModelRunner/Utils/MLConfig.cpp @@ -1,3 +1,11 @@ +//===- MLConfig.cpp - Set ML Config Paths -----------------------*- C++ -*-===// +// +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// + #include "MLModelRunner/Utils/MLConfig.h" llvm::cl::opt MLBridge::MLConfig::mlconfig( diff --git a/MLModelRunner/gRPCModelRunner/CMakeLists.txt b/MLModelRunner/gRPCModelRunner/CMakeLists.txt index 5fb005f..c7f3c67 100755 --- a/MLModelRunner/gRPCModelRunner/CMakeLists.txt +++ b/MLModelRunner/gRPCModelRunner/CMakeLists.txt @@ -24,12 +24,6 @@ else() set(_GRPC_PYTHON_PLUGIN_EXECUTABLE $) endif() - -set(CMAKE_CXX_STANDARD 11) -# add_custom_target(protobuf_grpc_version ALL -# COMMAND ${CMAKE_COMMAND} -E echo "protoc path = $ Using Protobuf ${Protobuf_VERSION} Using gRPC ${gRPC_VERSION} have AOT ${LLVM_HAVE_TF_AOT}") - - file(GLOB proto_list ${PROTOS_DIRECTORY}/*.proto) set(proto_dir ${PROTOS_DIRECTORY}) file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/include/grpc) diff --git a/SerDes/JSON.cpp b/SerDes/JSON.cpp index e0a9016..1b04087 100644 --- a/SerDes/JSON.cpp +++ b/SerDes/JSON.cpp @@ -1,10 +1,16 @@ //=== JSON.cpp - JSON value, parsing and serialization - C++ -----------*-===// // +// From LLVM 10.X Support Library +// // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception // //===---------------------------------------------------------------------===// +// +// This file implements a JSON parser and serializer to support JSONSerDes. +// +//===---------------------------------------------------------------------===// #include "MLModelRunner/Utils/JSON.h" #include "llvm/ADT/STLExtras.h" diff --git a/SerDes/TensorSpec.cpp b/SerDes/TensorSpec.cpp index 2eb7071..e61ff8a 100755 --- a/SerDes/TensorSpec.cpp +++ b/SerDes/TensorSpec.cpp @@ -1,8 +1,10 @@ //===- TensorSpec.cpp - tensor type abstraction ---------------------------===// // -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +// (Preliminary version adopted from TensorSpec.cpp of LLVM 12.X) // //===----------------------------------------------------------------------===// // @@ -10,6 +12,7 @@ // utils. // //===----------------------------------------------------------------------===// + #include "SerDes/TensorSpec.h" #include "MLModelRunner/Utils/JSON.h" #include "llvm/Support/Debug.h" diff --git a/SerDes/bitstreamSerDes.cpp b/SerDes/bitstreamSerDes.cpp index 889d3af..1a38333 100644 --- a/SerDes/bitstreamSerDes.cpp +++ b/SerDes/bitstreamSerDes.cpp @@ -1,9 +1,15 @@ -//=== MLCompilerBridge/SerDes/bitstreamSerDes.cpp - Bitstream SerDes -// Implementation ---*- C++ -*-===// +//===- bitstreamSerDes.cpp - Serializer for Bitstream -----------*- C++ -*-===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===----------------------------------------------------------------------===// +// +// This file defines the BitstreamSerDes class, which is a serializer for +// Bitstream format. +// +//===----------------------------------------------------------------------===// #include "SerDes/bitstreamSerDes.h" #include "MLModelRunner/Utils/Debug.h" @@ -140,4 +146,4 @@ void *BitstreamSerDes::deserializeUntyped(void *data) { this->MessageLength = res->size(); return res->data(); } -} // namespace MLBridge \ No newline at end of file +} // namespace MLBridge diff --git a/SerDes/jsonSerDes.cpp b/SerDes/jsonSerDes.cpp index 9814a3c..105e9b4 100755 --- a/SerDes/jsonSerDes.cpp +++ b/SerDes/jsonSerDes.cpp @@ -1,13 +1,20 @@ -//=== MLCBridge/SerDes/jsonSerDes.cpp - JsonSerDes class definition ===// +//===- jsonstreamSerDes.cpp - Serializer for JSON ---------------*- C++ -*-===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===----------------------------------------------------------------------===// +// +// This file defines the JsonSerDes class, which is a wrapper around the JSON +// C++ interface to serialize and deserialize data to and from JSON. +// +//===----------------------------------------------------------------------===// #include "SerDes/jsonSerDes.h" +#include "MLModelRunner/Utils/DataTypes.h" #include "MLModelRunner/Utils/Debug.h" #include "MLModelRunner/Utils/JSON.h" -#include "MLModelRunner/Utils/DataTypes.h" #include "SerDes/baseSerDes.h" #include "llvm/Support/Debug.h" #include diff --git a/SerDes/protobufSerDes.cpp b/SerDes/protobufSerDes.cpp index 5b82315..b96d2cb 100644 --- a/SerDes/protobufSerDes.cpp +++ b/SerDes/protobufSerDes.cpp @@ -1,9 +1,17 @@ -//=== MLCompilerBridge/SerDes/protobufSerDes.cpp - Protobuf SerDes -// Implementation ---*- C++ -*-===// +//===- protobufSerDes.cpp - Protobuf Serializer for gRPC -------*- C++ -*-===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===----------------------------------------------------------------------===// +// +// This file defines the ProtobufSerDes class, which is a wrapper around the +// protobuf C++ interface to support gRPC communication between the client and +// server. The protobuf C++ interface is used to serialize and deserialize +// messages. +// +//===----------------------------------------------------------------------===// #include "SerDes/protobufSerDes.h" #include "google/protobuf/descriptor.h" @@ -223,4 +231,4 @@ void ProtobufSerDes::cleanDataStructures() { Request->Clear(); Response->Clear(); } -} // namespace MLBridge \ No newline at end of file +} // namespace MLBridge diff --git a/SerDes/tensorflowSerDes.cpp b/SerDes/tensorflowSerDes.cpp index ffde191..d8ab21c 100644 --- a/SerDes/tensorflowSerDes.cpp +++ b/SerDes/tensorflowSerDes.cpp @@ -1,8 +1,15 @@ -//=== MLCompilerBridge/SerDes/tensorflowSerDes.cpp - Tensorflow SerDes Implementation ---*- C++ -*-===// +//===- tensorflowSerDes.cpp - Serializer support for TF ---------*- C++ -*-===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===----------------------------------------------------------------------===// +// +// This file defines the TensorflowSerDes class, to support interfacing with +// Tensorflow AOT models via TFModelRunner. +// +//===----------------------------------------------------------------------===// #include "SerDes/tensorflowSerDes.h" #include "SerDes/baseSerDes.h" diff --git a/include/MLModelRunner/C/ONNXModelRunner.h b/include/MLModelRunner/C/ONNXModelRunner.h index a383841..d402ba6 100644 --- a/include/MLModelRunner/C/ONNXModelRunner.h +++ b/include/MLModelRunner/C/ONNXModelRunner.h @@ -1,8 +1,46 @@ //=== MLModelRunner/C/ONNXModelRunner.h - C API for ONNXModelRunner - C++ -===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===---------------------------------------------------------------------===// +// +// This file defines the C APIs for ONNXModelRunner. +// This is a wrapper around the ONNXModelRunner class that provides an interface +// for the MLCompilerBridge to interact with the ONNX models during inference. +// +// Usage for single agent: +// 1. Create an ONNXModelRunner object using createSingleAgentOMR +// 2. Evaluate the features using singleAgentEvaluate +// 3. Destroy the instance of ONNXModelRunner using destroyONNXModelRunner +// +// Usage for multiple agents: +// 1. Create an Environment object using createEnvironment +// 2. Set the number of features using env_setNumFeatures +// 3. Set the step function using env_setStepFunc +// 4. Set the reset function using env_setResetFunc +// 5. Set the next agent using env_setNextAgent +// 6. Create an ONNXModelRunner object using createONNXModelRunner +// 7. Evaluate the features using evaluate +// 8. Destroy the instance of ONNXModelRunner using destroyONNXModelRunner +// 9. Destroy the instance of Environment using destroyEnvironment +// +// Using Environment: +// 1. Create an Environment object using createEnvironment +// 2. Set the number of features using env_setNumFeatures +// 3. Set the step function using env_setStepFunc +// 4. Set the reset function using env_setResetFunc +// 5. Set the next agent using env_setNextAgent +// 6. Destroy the instance of Environment using destroyEnvironment after calling +// destroyONNXModelRunner. +// +// Internally, the ONNXModelRunner will call the step function to get the next +// action and the reset function to reset the environment. The step function +// should return a pointer to an array of floats. The reset function should +// return a pointer to an array of floats. +// +//===---------------------------------------------------------------------===// #ifndef ONNX_MODEL_RUNNER_WRAPPER_H #define ONNX_MODEL_RUNNER_WRAPPER_H @@ -15,8 +53,6 @@ typedef signed Action; extern "C" { #endif -// Define an opaque pointer type for ONNXModelRunner - Environment *createEnvironment(); void env_setDone(Environment *env); void env_resetDone(Environment *env); diff --git a/include/MLModelRunner/C/PipeModelRunner.h b/include/MLModelRunner/C/PipeModelRunner.h index bd454b8..f306864 100644 --- a/include/MLModelRunner/C/PipeModelRunner.h +++ b/include/MLModelRunner/C/PipeModelRunner.h @@ -1,8 +1,24 @@ -//=== MLCompilerBridge/include/MLModelRunner/C/PipeModelRunner.h - C API for PipeModelRunner ===// +//===--- MLModelRunner/C/PipeModelRunner.h - C API for PipeModelRunner ---===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===---------------------------------------------------------------------===// +// +// This file defines the C API for PipeModelRunner. +// PipeModelRunner is a wrapper around the MLModelRunner class that provides +// an interface for the MLCompilerBridge to interact with the PipeModelRunner +// class. +// +// Usage: +// 1. Create an instance of PipeModelRunnerWrapper using createPipeModelRunner +// 2. Populate the features using populateXXXFeatures functions +// 3. Evaluate the features using evaluateXXXFeatures functions +// 4. Destroy the instance of PipeModelRunnerWrapper using +// destroyPipeModelRunner +// +//===---------------------------------------------------------------------===// #ifndef PIPE_MODEL_RUNNER_WRAPPER_H #define PIPE_MODEL_RUNNER_WRAPPER_H @@ -14,24 +30,24 @@ extern "C" { // Define an opaque pointer type for PipeModelRunnerWrapper typedef struct PipeModelRunnerWrapper PipeModelRunnerWrapper; -// Function to create an instance of PipeModelRunnerWrapper +/// Creates an instance of PipeModelRunnerWrapper PipeModelRunnerWrapper *createPipeModelRunner(const char *outBoundName, const char *inBoundName, int serDesType); -// Function to call a method on PipeModelRunnerWrapper +/// Populates the features of PipeModelRunnerWrapper void populateFloatFeatures(PipeModelRunnerWrapper *obj, const char *name, const float *data, const int size); void populateIntFeatures(PipeModelRunnerWrapper *obj, const char *name, const int *data, const int size); + +/// Evaluates the features of PipeModelRunnerWrapper int evaluateIntFeatures(PipeModelRunnerWrapper *obj); float evaluateFloatFeatures(PipeModelRunnerWrapper *obj); -// Function to destroy an instance of PipeModelRunnerWrapper +/// Destroys an instance of PipeModelRunnerWrapper void destroyPipeModelRunner(PipeModelRunnerWrapper *obj); -// void test(); - #ifdef __cplusplus } #endif diff --git a/include/MLModelRunner/MLModelRunner.h b/include/MLModelRunner/MLModelRunner.h index 73e433e..bcd4f96 100644 --- a/include/MLModelRunner/MLModelRunner.h +++ b/include/MLModelRunner/MLModelRunner.h @@ -1,17 +1,36 @@ //===- MLModelRunner.h ---- ML model runner interface -----------*- C++ -*-===// // -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -// Base MLModelRunner class which exposes APIs to set the features -// to be send to the ML model and get the result back from the model. +// (Preliminary version adopted from MLModelRunner.h of LLVM 17.X) // -// How to support a new ModelRunner: -// 1. Create a new class which inherits from MLModelRunner. -// 2. Implement the evaluateUntyped() method. //===----------------------------------------------------------------------===// // +// The MLModelRunner class is the main interface for interacting with the +// ML models. The MLCompilerBridge uses the MLModelRunner class to set the +// features to be sent to the model and get the result back from the model. +// +// This class internally uses the SerDes class to serialize and deserialize the +// features and result. +// +// The MLModelRunner class is an abstract class and cannot be instantiated. +// +// This class internally uses the SerDes class to serialize and deserialize the +// features and result. +// +// Supporting new Model Runners: +// 1. Create a new class inheriting the MLModelRunner class. +// 2. Override evaluateUntyped() method to call the model and get the result. +// +// Using any of the existing Model Runners: +// 1. Instantiate the model runner object with the appropriate arguments. +// 2. Call populateFeatures() to set the features to be sent to the model. +// 3. Call evaluate() to get the send and receive the result from the model. +// Similar flows apply for both training and inference. + +//===----------------------------------------------------------------------===// #ifndef ML_MODEL_RUNNER_H #define ML_MODEL_RUNNER_H diff --git a/include/MLModelRunner/ONNXModelRunner/ONNXModelRunner.h b/include/MLModelRunner/ONNXModelRunner/ONNXModelRunner.h index 05a4af0..ab852c4 100755 --- a/include/MLModelRunner/ONNXModelRunner/ONNXModelRunner.h +++ b/include/MLModelRunner/ONNXModelRunner/ONNXModelRunner.h @@ -1,17 +1,28 @@ -//===----------------------------------------------------------------------===// +//=== MLModelRunner/ONNXModelRunner/ONNXModelRunner.h - C++ --------------===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // -// ONNXModelRunner class supporting communication via ONNX Runtime +//===----------------------------------------------------------------------===// +// +// ONNXModelRunner class supporting communication via ONNX C++ Runtime. +// Only inference is supported. +// +// This class interfaces with Environment and Agent classes to support +// ML/RL model inference via ONNXModel. // -// How to use? -// 1. Create agent objects with the path to the ONNX model -// 2. Create an environment object inheriting from MLBridge::Environment -// 3. Create an ONNXModelRunner object with the environment and the agents -// 4. Populate the features to be sent to the model -// 5. Call evaluate() to get the result back from the model +// Usage: +// 1. Construct an ONNXModelRunner object with the environment and the agents. +// Environment and agents are created by the user by inheriting from the +// Environment class and using the Agent class. +// 2. Multiple agents can be added to the ONNXModelRunner object using the +// addAgent() method. The agents are identified by a unique name. +// 3. Call evaluate() to get the result from the model. +// +// Internally the ONNXModelRunner object will call the step() method of the +// environment to get the next observation and the computeAction() method of the +// agent to get the action corresponding to the observation. // //===----------------------------------------------------------------------===// @@ -28,8 +39,10 @@ class ONNXModelRunner : public MLModelRunner { ONNXModelRunner(MLBridge::Environment *env, std::map agents, llvm::LLVMContext *Ctx = nullptr); + void setEnvironment(MLBridge::Environment *_env) { env = _env; } MLBridge::Environment *getEnvironment() { return env; } + void addAgent(Agent *agent, std::string name); void computeAction(Observation &obs); diff --git a/include/MLModelRunner/ONNXModelRunner/agent.h b/include/MLModelRunner/ONNXModelRunner/agent.h index 6b9ce0c..09a7129 100644 --- a/include/MLModelRunner/ONNXModelRunner/agent.h +++ b/include/MLModelRunner/ONNXModelRunner/agent.h @@ -1,12 +1,14 @@ -//===----------------------------------------------------------------------===// +//=== MLModelRunner/ONNXModelRunner/agent.h - Agent Model Helper - C++ -===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // -// Agent class to support ML model inference +//===---------------------------------------------------------------------===// +// +// Agent class to support ML/RL model inference via ONNX // -// How to use? +// Usage: // 1. Construct an agent object with the path to the ONNX model // 2. Call computeAction() to get the action from the model // @@ -21,8 +23,6 @@ #include #include -#define DEBUG_TYPE "rl-inference-engine" - namespace MLBridge { class Agent { public: diff --git a/include/MLModelRunner/ONNXModelRunner/environment.h b/include/MLModelRunner/ONNXModelRunner/environment.h index 1dd9fab..c8072a3 100644 --- a/include/MLModelRunner/ONNXModelRunner/environment.h +++ b/include/MLModelRunner/ONNXModelRunner/environment.h @@ -1,15 +1,44 @@ -//===----------------------------------------------------------------------===// +//=== MLModelRunner/ONNXModelRunner/environment.h - ONNX Environment C++ -===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // -// Base Environment class to support ML model inference +//===---------------------------------------------------------------------===// +// +// Base Environment class to support ONNX based inference of RL models. This +// class is used to define the environment for the agents to interact with. +// +// The Environment should be defined by the compiler pass that is using the +// MLCompilerBridge. The environment should be defined by inheriting from this +// class and implementing the step() and reset() methods. +// +// step() and reset() are typical methods used in RL environments. // -// How to use? +// The step() method takes an action as input and returns the observation +// corresponding to the next state. The reset() method returns the initial +// observation. +// +// Usage: // 1. Create an environment class inheriting from MLBridge::Environment -// 2. Implement the step() and reset() methods -// 3. Create an ONNXModelRunner object with the environment and the agents +// 2. Implement step() and reset() methods +// +// Example: +// class MyEnvironment : public MLBridge::Environment { +// public: +// Observation &step(Action action) override { +// // Implement the step function here +// } +// Observation &reset() override { +// // Implement the reset function here +// } +// }; +// +// This environment can then be used by the ONNXModelRunner to interact with +// the agents. getNextAgent() and setNextAgent() methods can be used to set the +// next agent to interact with. These methods are used in step() and reset() to +// get the next agent to interact with in case of multi-agent environment. +// //===----------------------------------------------------------------------===// #ifndef ONNX_MODELRUNNER_ENVIRONMENT_H @@ -29,12 +58,30 @@ class Environment { std::map obsMap; public: + /// CheckDone returns true if the termination condition is met. bool checkDone() { return done == true; }; + + /// SetDone sets the termination condition to true. void setDone() { done = true; } void resetDone() { done = false; } + + /// GetNextAgent returns the name of the next agent to interact with. std::string getNextAgent() { return nextAgent; }; + + /// SetNextAgent sets the name of the next agent to interact with. void setNextAgent(std::string name) { nextAgent = name; } + + /// Step function takes an action as input and returns the observation + /// corresponding to the next state. This method should be implemented by the + /// user. Typically this method should call setDone() if the termination + /// condition is met. setNextAgent() can be called in this method to set the + /// next agent to interact with. virtual Observation &step(Action action) = 0; + + /// Reset function returns the initial observation. This method should be + /// implemented by the user. This method can internally call setNextAgent() to + /// set the next agent to interact with and setDone()/resetDone() to set/reset + /// the termination condition. virtual Observation &reset() = 0; }; } // namespace MLBridge diff --git a/include/MLModelRunner/ONNXModelRunner/onnx.h b/include/MLModelRunner/ONNXModelRunner/onnx.h index 622cade..45bd6ad 100644 --- a/include/MLModelRunner/ONNXModelRunner/onnx.h +++ b/include/MLModelRunner/ONNXModelRunner/onnx.h @@ -1,10 +1,15 @@ -//===----------------------------------------------------------------------===// +//=== MLModelRunner/ONNXModelRunner/onnx.h --- ONNX C++ Interface -----===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // -//===----------------------------------------------------------------------===// +//===---------------------------------------------------------------------===// +// +// This file defines the ONNXModel class, which is a wrapper around the ONNX +// C++ interface. +// +//===---------------------------------------------------------------------===// #ifndef ONNX_MODELRUNNER_ONNX_H #define ONNX_MODELRUNNER_ONNX_H @@ -26,6 +31,8 @@ class ONNXModel { public: ONNXModel(const char *model_path); + + /// Runs the ONNX model on the input and returns the output void run(llvm::SmallVector &input, llvm::SmallVector &output); }; diff --git a/include/MLModelRunner/ONNXModelRunner/utils.h b/include/MLModelRunner/ONNXModelRunner/utils.h index cd77f4d..3f7cf1b 100755 --- a/include/MLModelRunner/ONNXModelRunner/utils.h +++ b/include/MLModelRunner/ONNXModelRunner/utils.h @@ -1,7 +1,7 @@ -//===----------------------------------------------------------------------===// +//=== MLModelRunner/ONNXModelRunner/utils.h - C++ ------------------------===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // //===----------------------------------------------------------------------===// diff --git a/include/MLModelRunner/PipeModelRunner.h b/include/MLModelRunner/PipeModelRunner.h index b39cb47..b3b6a36 100755 --- a/include/MLModelRunner/PipeModelRunner.h +++ b/include/MLModelRunner/PipeModelRunner.h @@ -1,16 +1,24 @@ -//===- PipeModelRunner.h ---- PipeModelRunner -----*- C++ -*-===// +//===- PipeModelRunner.h ---- PipeModelRunner ------*- C++ -*-------------===// // -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -// PipeModelRunner class supporting communication via OS pipes -// -// How to use? +// (Preliminary version adopted from InteractiveModelRunner.h of LLVM 17.X) +// +//===----------------------------------------------------------------------===// +// +// PipeModelRunner class supporting communication via OS pipes between the +// compiler and an external ML agent. +// +// Usage: // 1. Create a PipeModelRunner object with the names of the pipes, and the -// serialization technique -// 2. Populate the features to be sent to the model -// 3. Call evaluate() to get the result back from the model +// serialization technique. +// 2. Populate the features to be sent to the model. +// 3. Call evaluate() to get the result back from the model. +// +// This supports both training and inference. Supports interleaved +// communication. // //===----------------------------------------------------------------------===// @@ -30,10 +38,10 @@ namespace MLBridge { /// A MLModelRunner that asks for advice from an external agent, or host. It /// uses 2 files - ideally named pipes - one to send data to that agent, and /// one to receive advice. -/// The compiler will send observations; the host is expected to reply with a tensor value after -/// each observation as a binary buffer that's conforming to the shape of the -/// advice. Interleaved, the data closely resembles the training log for a -/// log where we don't capture the reward signal. +/// The compiler will send observations; the host is expected to reply with a +/// tensor value after each observation as a binary buffer that's conforming to +/// the shape of the advice. Interleaved, the data closely resembles the +/// training log for a log where we don't capture the reward signal. /// /// Note that the correctness of the received data is the responsibility of the /// host. In particular, if insufficient data were sent, the compiler will block diff --git a/include/MLModelRunner/TFModelRunner.h b/include/MLModelRunner/TFModelRunner.h index c577854..c469f5f 100644 --- a/include/MLModelRunner/TFModelRunner.h +++ b/include/MLModelRunner/TFModelRunner.h @@ -1,8 +1,10 @@ -//===- ReleaseModeModelRunner.h - Fast, precompiled model runner ---------===// +//===- TFModelRunner.h ---- TF precompiled model runner ------*- C++-*----===// // -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +// (Preliminary version adopted from ReleaseModeModelRunner.h of LLVM 17.X) // //===----------------------------------------------------------------------===// // @@ -43,7 +45,8 @@ template class TFModelRunner final : public MLModelRunner { DecisionName.str()); assert(ResultIndex >= 0 && "Cannot find DecisionName in inlining model"); } - TFModelRunner(llvm::StringRef DecisionName, llvm::StringRef FeedPrefix = "feed_", + TFModelRunner(llvm::StringRef DecisionName, + llvm::StringRef FeedPrefix = "feed_", llvm::StringRef FetchPrefix = "fetch_") : MLModelRunner(MLModelRunner::Kind::TFAOT, BaseSerDes::Kind::Tensorflow), CompiledModel(std::make_unique()) { diff --git a/include/MLModelRunner/Utils/DataTypes.h b/include/MLModelRunner/Utils/DataTypes.h index e4d38a8..e9659c4 100644 --- a/include/MLModelRunner/Utils/DataTypes.h +++ b/include/MLModelRunner/Utils/DataTypes.h @@ -1,16 +1,33 @@ +//=== MLModelRunner/Utils/DataTypes.h - Supported Data Types - C++ -------===// +// +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// +// +// This file defines the bit widths of integral and floating point types +// supported by the MLCompilerBridge. +// +// The bit widths of floating and integral types supported by the +// MLCompilerBridge can be configured by defining the MLBRIDGE_EXTENDED_TYPES +// macro in the CMakeLists.txt file. +// +//===----------------------------------------------------------------------===// + #ifndef MLBRIDGE_DATATYPES_H #define MLBRIDGE_DATATYPES_H namespace MLBridge { #ifdef MLBRIDGE_EXTENDED_TYPES - using RealType = double; - using IntegerType = long; +using RealType = double; +using IntegerType = long; #else - using RealType = float; - using IntegerType = int; +using RealType = float; +using IntegerType = int; #endif -} +} // namespace MLBridge -#endif \ No newline at end of file +#endif diff --git a/include/MLModelRunner/Utils/Debug.h b/include/MLModelRunner/Utils/Debug.h index 821719c..04eb1b8 100644 --- a/include/MLModelRunner/Utils/Debug.h +++ b/include/MLModelRunner/Utils/Debug.h @@ -1,3 +1,15 @@ +//=== MLModelRunner/Utils/Debug.h - Debug definitions with support - C++ --===// +// +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// +// +// This file defines the debug macros for the MLCompilerBridge. +// +//===----------------------------------------------------------------------===// + #ifndef MLBRIDGE_DEBUG_H #define MLBRIDGE_DEBUG_H diff --git a/include/MLModelRunner/Utils/JSON.h b/include/MLModelRunner/Utils/JSON.h index f11d67c..6fb6a48 100644 --- a/include/MLModelRunner/Utils/JSON.h +++ b/include/MLModelRunner/Utils/JSON.h @@ -1,5 +1,7 @@ //===--- JSON.h - JSON values, parsing and serialization -------*- C++ -*-===// // +// From LLVM 10.X Support library. +// // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception diff --git a/include/MLModelRunner/Utils/MLConfig.h b/include/MLModelRunner/Utils/MLConfig.h index d069a75..a701efa 100644 --- a/include/MLModelRunner/Utils/MLConfig.h +++ b/include/MLModelRunner/Utils/MLConfig.h @@ -1,8 +1,16 @@ -//=== MLModelRunner/Utils/MLConfig.h - MLConfig class definition ---*- C++ -*-===// +//=== MLModelRunner/Utils/MLConfig.h -MLConfig class definition -*- C++ -*-===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===----------------------------------------------------------------------===// +// +// This file defines the MLConfig class, which is a wrapper around the MLConfig +// command line option for passing information like path of the models and other +// configuration to the compiler passes. +// +//===----------------------------------------------------------------------===// #ifndef MLBRIDGE_CONFIG_H #define MLBRIDGE_CONFIG_H diff --git a/include/MLModelRunner/gRPCModelRunner.h b/include/MLModelRunner/gRPCModelRunner.h index 627f869..5f546e9 100644 --- a/include/MLModelRunner/gRPCModelRunner.h +++ b/include/MLModelRunner/gRPCModelRunner.h @@ -1,18 +1,64 @@ -//===----------------------------------------------------------------------===// +//=== MLModelRunner/gRPCModelRunner.h -MLConfig class definition - C++ -*--===// // -// Part of the ml-llvm-tools Project, under the BSD 4-Clause License. -// See the LICENSE.txt file under ml-llvm-tools directory for license +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright // information. // -// gRPCModelRunner class supporting communication via gRPC +//===---------------------------------------------------------------------===// // -// How to use? +// gRPCModelRunner class supporting communication via gRPC. This class is used +// to communicate with the gRPC server and send/receive data to/from the model. +// Supports interleaved communication with the model. +// +// There are two ways to use this class: +// 1. Training mode - gRPC Server: In this mode, the gRPCModelRunner object is +// created with the server address and the service object. The service object is +// used to create the server and the server waits for the client to connect. +// Once the client connects, the server waits for the client to send the request +// and then responds with the result. +// In Training mode, GrpcCompilerInterface class from GrpcCompilerInterface.py +// acts as the client and sends the request to the server. +// +// 2. Inference mode - gRPC Client: In this mode, the gRPCModelRunner object is +// created with the server address, request and response objects. The request +// object is used to send the features to the model and the response object is +// used to receive the result from the model. +// +// In Inference mode, the compiler pass using this class acts as the client and +// sends the request to the server implemented by using +// GrpcCompilerInterface.py. +// +//===---------------------------------------------------------------------===// +// +// Interfacing with the model using protobuf (.proto) files: +// Users should define the service, stub, request and response by writing a +// .proto file. The service and stub objects are generated using the protoc +// compiler. The request and response objects are generated using the protoc +// compiler or created by the user. The build process will automatically compile +// .proto files and generates the relevant stubs and request/response objects in +// both CPP and Python which will be used by gRPCModelRunner and +// GrpcCompilerInterface respectively.. +// +// In Inference mode, users should override `getAdvice()` RPC method in the +// Python model code to process the request and send the response back to the +// client. This method is called by the gRPC server in the evaluate_untyped() +// method of gRPCModelRunner class to get the result from the model after +// populating the features in the request object. +// +// In Training mode, users should override the RPC function/service that they +// declare in the .proto file in the Compiler pass which is using the +// gRPCModelRunner. This function is called by the gRPC client (Python model) +// that is using the GrpcCompilerInterface class to send the request to the +// server. +// +// Usage: // 1. Create a .proto file with the service and message definitions // 2. Generate the stubs using protoc // 3. Create a gRPCModelRunner object with the server address, stub, request and // response // 4. Populate the features to be sent to the model // 5. Call evaluate() to get the result back from the model +// // ===----------------------------------------------------------------------===// #ifndef GRPC_MODELRUNNER_H @@ -26,13 +72,14 @@ #include #include -// grpc model runner requires service, stub, request and response namespace MLBridge { +/// grpc model runner requires service, stub, request and response template class gRPCModelRunner : public MLModelRunner { public: + /// For server mode gRPCModelRunner(std::string server_address, grpc::Service *s, - llvm::LLVMContext *Ctx = nullptr) // For server mode + llvm::LLVMContext *Ctx = nullptr) : MLModelRunner(MLModelRunner::Kind::gRPC, BaseSerDes::Kind::Protobuf, Ctx), server_address(server_address), request(nullptr), response(nullptr), @@ -40,9 +87,9 @@ class gRPCModelRunner : public MLModelRunner { RunService(s); } + /// For client mode gRPCModelRunner(std::string server_address, Request *request, - Response *response, - llvm::LLVMContext *Ctx = nullptr) // For client mode + Response *response, llvm::LLVMContext *Ctx = nullptr) : MLModelRunner(MLModelRunner::Kind::gRPC, BaseSerDes::Kind::Protobuf, Ctx), server_address(server_address), request(request), response(response), diff --git a/include/SerDes/TensorSpec.h b/include/SerDes/TensorSpec.h index 61b4748..18c7639 100644 --- a/include/SerDes/TensorSpec.h +++ b/include/SerDes/TensorSpec.h @@ -1,11 +1,13 @@ //===- TensorSpec.h - type descriptor for a tensor --------------*- C++ -*-===// // -// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. -// See https://llvm.org/LICENSE.txt for license information. -// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===----------------------------------------------------------------------===// +// (Preliminary version adopted from TensorSpec.h of LLVM 17.X) // +//===----------------------------------------------------------------------===// + #ifndef MLBRIDGE_TENSORSPEC_H #define MLBRIDGE_TENSORSPEC_H diff --git a/include/SerDes/baseSerDes.h b/include/SerDes/baseSerDes.h index 8c18dc2..d6dfb69 100644 --- a/include/SerDes/baseSerDes.h +++ b/include/SerDes/baseSerDes.h @@ -1,16 +1,22 @@ -//===- SerDes/baseSerDes.h - Base class for serialization and deserialization -//---*- C++ -*-===// +//=== SerDes/baseSerDes.h - Base for serialization and deserialization C++ ===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -// Base class for serialization and deserialization +//===----------------------------------------------------------------------===// // -// How to support a new serializtion technique: +// This is the base class for SerDes. It defines the interface for the +// serialization and deserialization of the data structures used for the +// communication by the MLModelRunner. +// +// Supporting new SerDes: // 1. Create a new class which inherits from BaseSerDes. -// 2. Implement the setFeature(), getSerializedData() -// cleanDataStructures() and deserializeUntyped() -// methods. -//===------------------===// +// 2. Implement the setFeature(), getSerializedData(), cleanDataStructures() and +// deserializeUntyped() methods. +// 3. Add the new SerDes to the enum class Kind in this class. +// +//===----------------------------------------------------------------------===// #ifndef BASE_SERDES_H #define BASE_SERDES_H diff --git a/include/SerDes/bitstreamSerDes.h b/include/SerDes/bitstreamSerDes.h index 4302ca7..febd23c 100644 --- a/include/SerDes/bitstreamSerDes.h +++ b/include/SerDes/bitstreamSerDes.h @@ -1,12 +1,15 @@ -//=== SerDes/bitstreamSerDes.h - Bitstream Serialization/Deserialization ---*- -// C++ -*-===// +//=== SerDes/bitstreamSerDes.h -Bitstream Serialization/Deserialization-C++===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// // // Bitstream Serialization/Deserialization which sends header information // followed by the raw data. // -//===------------------===// +//===----------------------------------------------------------------------===// #ifndef BITSTREAM_SERIALIZER_H #define BITSTREAM_SERIALIZER_H @@ -19,7 +22,6 @@ #include #include - namespace MLBridge { class BitstreamSerDes : public BaseSerDes { public: diff --git a/include/SerDes/jsonSerDes.h b/include/SerDes/jsonSerDes.h index a2d0ed0..a2dc886 100644 --- a/include/SerDes/jsonSerDes.h +++ b/include/SerDes/jsonSerDes.h @@ -1,9 +1,14 @@ -//=== SerDes/jsonSerDes.h - Json Serialization/Deserialization ---*- C++ +//=== SerDes/jsonSerDes.h -Json Serialization/Deserialization ---*- C++ ---===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -// Json Serialization/Deserialization using LLVM's json library -//===------------------===// +//===----------------------------------------------------------------------===// +// +// Json Serialization/Deserialization using LLVM's json library. +// +//===----------------------------------------------------------------------===// #ifndef JSON_SERIALIZER_H #define JSON_SERIALIZER_H @@ -28,7 +33,7 @@ class JsonSerDes : public BaseSerDes { } \ void setFeature(const std::string &name, const std::vector &value) \ override { \ - J[name] = llvm::json::Array(value); \ + J[name] = llvm::json::Array(value); \ } SUPPORTED_TYPES(SET_FEATURE) #undef SET_FEATURE diff --git a/include/SerDes/protobufSerDes.h b/include/SerDes/protobufSerDes.h index 74bad0c..1d86507 100644 --- a/include/SerDes/protobufSerDes.h +++ b/include/SerDes/protobufSerDes.h @@ -1,9 +1,14 @@ -//=== SerDes/protobufSerDes.h - Protobuf Serialization/Deserialization ---*- C++ +//=== SerDes/protobufSerDes.h - Protobuf Serialization/Deserialization C++-===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -// Protobuf Serialization/Deserialization to support gRPC communication -//===------------------===// +//===----------------------------------------------------------------------===// +// +// Protobuf Serialization/Deserialization to support gRPC communication. +// +//===----------------------------------------------------------------------===// #ifndef PROTOBUF_SERIALIZER_H #define PROTOBUF_SERIALIZER_H diff --git a/include/SerDes/tensorflowSerDes.h b/include/SerDes/tensorflowSerDes.h index 29efaff..c322709 100644 --- a/include/SerDes/tensorflowSerDes.h +++ b/include/SerDes/tensorflowSerDes.h @@ -1,9 +1,14 @@ -//=== SerDes/tensorflowSerDes.h - Tensorflow Serialization/Deserialization ---*- -//C++ +//=== SerDes/tensorflowSerDes.h - SerDes for TF support ---*- C++ ---------===// // -// Part of the MLCompilerBridge Project +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. // -//===------------------===// +//===----------------------------------------------------------------------===// +// +// Serialization/Deserialization to support TF AOT models. +// +//===----------------------------------------------------------------------===// #ifndef TENSORFLOW_SERIALIZER_H #define TENSORFLOW_SERIALIZER_H diff --git a/test/MLBridgeTest.cpp b/test/MLBridgeTest.cpp index 7d6199e..f6d8a89 100644 --- a/test/MLBridgeTest.cpp +++ b/test/MLBridgeTest.cpp @@ -1,3 +1,11 @@ +//===----------------------------------------------------------------------===// +// +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// + #include "MLModelRunner/MLModelRunner.h" #include "MLModelRunner/ONNXModelRunner/ONNXModelRunner.h" #include "MLModelRunner/PipeModelRunner.h" @@ -19,18 +27,19 @@ #include #include -static cl::opt - cl_server_address("test-server-address", cl::Hidden, - cl::desc("Server address, format :"), - cl::init("localhost:5050")); +static llvm::cl::opt + cl_server_address("test-server-address", llvm::cl::Hidden, + llvm::cl::desc("Server address, format :"), + llvm::cl::init("localhost:5050")); -static cl::opt cl_pipe_name("test-pipe-name", cl::Hidden, - cl::init("hellopipe"), - cl::desc("Name for pipe file")); +static llvm::cl::opt + cl_pipe_name("test-pipe-name", llvm::cl::Hidden, + llvm::cl::init("hellopipe"), + llvm::cl::desc("Name for pipe file")); -static cl::opt - cl_test_config("test-config", cl::Hidden, cl::init("pipe-bytes"), - cl::desc("Method for communication with python model")); +static llvm::cl::opt cl_test_config( + "test-config", llvm::cl::Hidden, llvm::cl::init("pipe-bytes"), + llvm::cl::desc("Method for communication with python model")); namespace { std::unique_ptr MLRunner; @@ -135,7 +144,7 @@ int testONNX() { return 0; } } // namespace int main(int argc, char **argv) { - cl::ParseCommandLineOptions(argc, argv); + llvm::cl::ParseCommandLineOptions(argc, argv); test_config = cl_test_config.getValue(); pipe_name = cl_pipe_name.getValue(); server_address = cl_server_address.getValue(); @@ -154,4 +163,4 @@ int main(int argc, char **argv) { std::cout << "test-config must belong to [pipe-bytes, pipe-json, grpc, onnx]\n"; return 0; -} \ No newline at end of file +} diff --git a/test/inference/HelloMLBridge_Env.h b/test/inference/HelloMLBridge_Env.h index 8ea88ad..574f4e3 100644 --- a/test/inference/HelloMLBridge_Env.h +++ b/test/inference/HelloMLBridge_Env.h @@ -1,3 +1,11 @@ +//===----------------------------------------------------------------------===// +// +// Part of the MLCompilerBridge Project, under the Apache 2.0 License. +// See the LICENSE file under home directory for license and copyright +// information. +// +//===----------------------------------------------------------------------===// + #include "MLModelRunner/ONNXModelRunner/environment.h" #include "MLModelRunner/ONNXModelRunner/utils.h" #include "llvm/IR/Module.h" @@ -6,18 +14,17 @@ using namespace MLBridge; class HelloMLBridgeEnv : public Environment { Observation CurrObs; + public: - HelloMLBridgeEnv() { - setNextAgent("agent"); - }; - Observation& reset() override; - Observation& step(Action) override; + HelloMLBridgeEnv() { setNextAgent("agent"); }; + Observation &reset() override; + Observation &step(Action) override; protected: std::vector FeatureVector; }; -Observation& HelloMLBridgeEnv::step(Action Action) { +Observation &HelloMLBridgeEnv::step(Action Action) { CurrObs.clear(); std::copy(FeatureVector.begin(), FeatureVector.end(), std::back_inserter(CurrObs)); @@ -26,7 +33,7 @@ Observation& HelloMLBridgeEnv::step(Action Action) { return CurrObs; } -Observation& HelloMLBridgeEnv::reset() { +Observation &HelloMLBridgeEnv::reset() { std::copy(FeatureVector.begin(), FeatureVector.end(), std::back_inserter(CurrObs)); return CurrObs;