Skip to content

Commit

Permalink
[mlgo] drop the prefix _ in _model_selector
Browse files Browse the repository at this point in the history
`_` upsets the saved model freezer (assumptions about python naming).
  • Loading branch information
mtrofin committed Jun 26, 2024
1 parent 1abe22c commit 3a462d8
Show file tree
Hide file tree
Showing 2 changed files with 8 additions and 8 deletions.
8 changes: 4 additions & 4 deletions llvm/include/llvm/Analysis/ReleaseModeModelRunner.h
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ class ReleaseModeModelRunner final : public MLModelRunner {
// of the model selector to {high, low}
bool InputIsPresent = true;
populateTensor(InputSpec.size(),
TensorSpec::createSpec<uint64_t>("_model_selector", {2}),
TensorSpec::createSpec<uint64_t>("model_selector", {2}),
Options.FeedPrefix, InputIsPresent);

// If we hit the "report an error" cases outlined above, continue with the
Expand All @@ -80,21 +80,21 @@ class ReleaseModeModelRunner final : public MLModelRunner {
if (Options.ModelSelector.empty() && InputIsPresent)
Ctx.emitError(
"A model selector was not specified but the underlying model "
"requires selecting one because it exposes a _model_selector input");
"requires selecting one because it exposes a model_selector input");
uint64_t High = 0;
uint64_t Low = 0;
if (!Options.ModelSelector.empty()) {
if (!InputIsPresent)
Ctx.emitError("A model selector was specified but the underlying model "
"does not expose a _model_selector input");
"does not expose a model_selector input");
const auto Hash = MD5::hash(arrayRefFromStringRef(Options.ModelSelector));
High = Hash.high();
Low = Hash.low();
}
getTensor<uint64_t>(InputSpec.size())[0] = High;
getTensor<uint64_t>(InputSpec.size())[1] = Low;
// At this point, the model selector is set up. If the user didn't provide
// one, but the model has a _model_selector, it'll be set to (0, 0) which
// one, but the model has a model_selector, it'll be set to (0, 0) which
// the composite model should treat as error as part of its implementation
// (but that should only matter if there is a custom handler that doesn't
// exit on error)
Expand Down
8 changes: 4 additions & 4 deletions llvm/unittests/Analysis/MLModelRunnerTest.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ class ComposedAOTModel final {
public:
ComposedAOTModel() = default;
int LookupArgIndex(const std::string &Name) {
if (Name == "prefix__model_selector")
if (Name == "prefix_model_selector")
return 2;
return getModel()->LookupArgIndex(Name);
}
Expand Down Expand Up @@ -201,7 +201,7 @@ TEST(ReleaseModelRunner, ModelSelectorNoInputFeaturePresent) {
EXPECT_DEATH(std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>(
Ctx, Inputs, "", makeOptions().setModelSelector(M2Selector)),
"A model selector was specified but the underlying model does "
"not expose a _model_selector input");
"not expose a model_selector input");
}

TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) {
Expand All @@ -212,10 +212,10 @@ TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) {
std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>(
Ctx, Inputs, "", makeOptions()),
"A model selector was not specified but the underlying model requires "
"selecting one because it exposes a _model_selector input");
"selecting one because it exposes a model_selector input");
}

// Test that we correctly set up the _model_selector tensor value. We are only
// Test that we correctly set up the model_selector tensor value. We are only
// responsbile for what happens if the user doesn't specify a value (but the
// model supports the feature), or if the user specifies one, and we correctly
// populate the tensor, and do so upfront (in case the model implementation
Expand Down

0 comments on commit 3a462d8

Please sign in to comment.