diff --git a/llvm/include/llvm/Analysis/InlineAdvisor.h b/llvm/include/llvm/Analysis/InlineAdvisor.h index 53c018d15cd71a..2740106bc7db80 100644 --- a/llvm/include/llvm/Analysis/InlineAdvisor.h +++ b/llvm/include/llvm/Analysis/InlineAdvisor.h @@ -36,9 +36,8 @@ struct ReplayInlinerSettings; /// /// - Development mode, for training new models. /// In this mode, we trade off runtime performance for flexibility. This mode -/// requires the full C Tensorflow API library, and evaluates models -/// dynamically. This mode also permits generating training logs, for offline -/// training. +/// requires the TFLite library, and evaluates models dynamically. This mode +/// also permits generating training logs, for offline training. /// /// - Dynamically load an advisor via a plugin (PluginInlineAdvisorAnalysis) enum class InliningAdvisorMode : int { Default, Release, Development }; diff --git a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h index 77ae60059ce9eb..06925e620bdd61 100644 --- a/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h +++ b/llvm/include/llvm/Analysis/InlineModelFeatureMaps.h @@ -92,8 +92,8 @@ constexpr bool isHeuristicInlineCostFeature(InlineCostFeatureIndex Feature) { // List of features. Each feature is defined through a triple: // - the name of an enum member, which will be the feature index -// - a textual name, used for Tensorflow model binding (so it needs to match the -// names used by the Tensorflow model) +// - a textual name, used for ML model binding (so it needs to match the +// names used by the ML model). // - a documentation description. Currently, that is not used anywhere // programmatically, and serves as workaround to inability of inserting comments // in macros. diff --git a/llvm/include/llvm/Analysis/MLModelRunner.h b/llvm/include/llvm/Analysis/MLModelRunner.h index 903411fbdf7ecf..21f155de85aecb 100644 --- a/llvm/include/llvm/Analysis/MLModelRunner.h +++ b/llvm/include/llvm/Analysis/MLModelRunner.h @@ -17,7 +17,9 @@ namespace llvm { class LLVMContext; /// MLModelRunner interface: abstraction of a mechanism for evaluating a -/// tensorflow "saved model". +/// ML model. More abstractly, evaluating a function that has as tensors as +/// arguments, described via TensorSpecs, and returns a tensor. Currently, the +/// latter is assumed to be a scalar, in absence of more elaborate scenarios. /// NOTE: feature indices are expected to be consistent all accross /// MLModelRunners (pertaining to the same model), and also Loggers (see /// TFUtils.h) diff --git a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h index 7e29bbd2be0961..1a79f2c36c3f7d 100644 --- a/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h +++ b/llvm/include/llvm/Analysis/ModelUnderTrainingRunner.h @@ -23,9 +23,10 @@ namespace llvm { -/// ModelUnderTrainingRunner - training mode implementation. It uses TF C APIs +/// ModelUnderTrainingRunner - training mode implementation. It uses TFLite /// to dynamically load and evaluate a TF SavedModel -/// (https://www.tensorflow.org/guide/saved_model). Runtime performance is +/// (https://www.tensorflow.org/guide/saved_model) converted to TFLite. see +/// lib/Analysis/models/saved-model-to-tflite.py. Runtime performance is /// sacrificed for ease of use while training. class ModelUnderTrainingRunner final : public MLModelRunner { public: diff --git a/llvm/include/llvm/Analysis/TensorSpec.h b/llvm/include/llvm/Analysis/TensorSpec.h index c50507b7a6b114..d6b23cf56f6a5f 100644 --- a/llvm/include/llvm/Analysis/TensorSpec.h +++ b/llvm/include/llvm/Analysis/TensorSpec.h @@ -26,11 +26,15 @@ namespace llvm { /// Machine Learning on Heterogeneous Distributed Systems", section 4.2, para 2: /// https://static.googleusercontent.com/media/research.google.com/en//pubs/archive/45166.pdf) /// -/// Known tensor types. The left part is the C type, the right is a name we -/// can use to identify the type (to implement TensorSpec equality checks), and -/// to use, if needed, when mapping to an underlying evaluator's type system. -/// The main requirement is that the C type we use has the same size and -/// encoding (e.g. endian-ness) as the one used by the evaluator. +/// Note that the design is motivated by Tensorflow, but it is not intended to +/// be Tensorflow-specific. +/// +/// Known tensor types. The left part is the C type, the +/// right is a name we can use to identify the type (to implement TensorSpec +/// equality checks), and to use, if needed, when mapping to an underlying +/// evaluator's type system. The main requirement is that the C type we use has +/// the same size and encoding (e.g. endian-ness) as the one used by the +/// evaluator. #define SUPPORTED_TENSOR_TYPES(M) \ M(float, Float) \ M(double, Double) \ diff --git a/llvm/include/llvm/Analysis/Utils/TFUtils.h b/llvm/include/llvm/Analysis/Utils/TFUtils.h index 04bb8af3a515a8..817702b869e932 100644 --- a/llvm/include/llvm/Analysis/Utils/TFUtils.h +++ b/llvm/include/llvm/Analysis/Utils/TFUtils.h @@ -1,4 +1,4 @@ -//===- TFUtils.h - utilities for tensorflow C API ---------------*- C++ -*-===// +//===- TFUtils.h - utilities for TFLite -------------------------*- C++ -*-===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -93,7 +93,7 @@ class TFModelEvaluator final { return static_cast(getUntypedInput(Index)); } - /// Returns true if the tensorflow model was loaded successfully, false + /// Returns true if the model was loaded successfully, false /// otherwise. bool isValid() const { return !!Impl; } diff --git a/llvm/include/llvm/Config/llvm-config.h.cmake b/llvm/include/llvm/Config/llvm-config.h.cmake index 8d0a1bc5dc5658..17b2d47fb6c43a 100644 --- a/llvm/include/llvm/Config/llvm-config.h.cmake +++ b/llvm/include/llvm/Config/llvm-config.h.cmake @@ -98,7 +98,7 @@ /* Define if zstd compression is available */ #cmakedefine01 LLVM_ENABLE_ZSTD -/* Define if LLVM is using tflite instead of libtensorflow */ +/* Define if LLVM is using tflite */ #cmakedefine LLVM_HAVE_TFLITE /* Define to 1 if you have the header file. */ diff --git a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp index 456d58660680d7..7d51302bcc1adb 100644 --- a/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp +++ b/llvm/lib/Analysis/DevelopmentModeInlineAdvisor.cpp @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements a model runner using Tensorflow C APIs, allowing the +// This file implements a model runner using TFLite, allowing the // loading of a model from a command line option. // //===----------------------------------------------------------------------===// diff --git a/llvm/lib/Analysis/TFLiteUtils.cpp b/llvm/lib/Analysis/TFLiteUtils.cpp index b2862033e9cfbf..2762e22f28cef3 100644 --- a/llvm/lib/Analysis/TFLiteUtils.cpp +++ b/llvm/lib/Analysis/TFLiteUtils.cpp @@ -1,4 +1,4 @@ -//===- TFUtils.cpp - tensorflow evaluation utilities ----------------------===// +//===- TFUtils.cpp - TFLite-based evaluation utilities --------------------===// // // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. // See https://llvm.org/LICENSE.txt for license information. @@ -6,7 +6,7 @@ // //===----------------------------------------------------------------------===// // -// This file implements utilities for interfacing with tensorflow C APIs. +// This file implements utilities for interfacing with TFLite. // //===----------------------------------------------------------------------===// #include "llvm/Config/config.h"