Skip to content

Commit 7abed54

Browse files
mtrofinAlexisPerry
authored andcommitted
[mlgo] drop the prefix _ in _model_selector
`_` upsets the saved model freezer (assumptions about python naming).
1 parent 06b4382 commit 7abed54

File tree

2 files changed

+8
-8
lines changed

2 files changed

+8
-8
lines changed

llvm/include/llvm/Analysis/ReleaseModeModelRunner.h

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -71,7 +71,7 @@ class ReleaseModeModelRunner final : public MLModelRunner {
7171
// of the model selector to {high, low}
7272
bool InputIsPresent = true;
7373
populateTensor(InputSpec.size(),
74-
TensorSpec::createSpec<uint64_t>("_model_selector", {2}),
74+
TensorSpec::createSpec<uint64_t>("model_selector", {2}),
7575
Options.FeedPrefix, InputIsPresent);
7676

7777
// If we hit the "report an error" cases outlined above, continue with the
@@ -80,21 +80,21 @@ class ReleaseModeModelRunner final : public MLModelRunner {
8080
if (Options.ModelSelector.empty() && InputIsPresent)
8181
Ctx.emitError(
8282
"A model selector was not specified but the underlying model "
83-
"requires selecting one because it exposes a _model_selector input");
83+
"requires selecting one because it exposes a model_selector input");
8484
uint64_t High = 0;
8585
uint64_t Low = 0;
8686
if (!Options.ModelSelector.empty()) {
8787
if (!InputIsPresent)
8888
Ctx.emitError("A model selector was specified but the underlying model "
89-
"does not expose a _model_selector input");
89+
"does not expose a model_selector input");
9090
const auto Hash = MD5::hash(arrayRefFromStringRef(Options.ModelSelector));
9191
High = Hash.high();
9292
Low = Hash.low();
9393
}
9494
getTensor<uint64_t>(InputSpec.size())[0] = High;
9595
getTensor<uint64_t>(InputSpec.size())[1] = Low;
9696
// At this point, the model selector is set up. If the user didn't provide
97-
// one, but the model has a _model_selector, it'll be set to (0, 0) which
97+
// one, but the model has a model_selector, it'll be set to (0, 0) which
9898
// the composite model should treat as error as part of its implementation
9999
// (but that should only matter if there is a custom handler that doesn't
100100
// exit on error)

llvm/unittests/Analysis/MLModelRunnerTest.cpp

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,7 @@ class ComposedAOTModel final {
102102
public:
103103
ComposedAOTModel() = default;
104104
int LookupArgIndex(const std::string &Name) {
105-
if (Name == "prefix__model_selector")
105+
if (Name == "prefix_model_selector")
106106
return 2;
107107
return getModel()->LookupArgIndex(Name);
108108
}
@@ -201,7 +201,7 @@ TEST(ReleaseModelRunner, ModelSelectorNoInputFeaturePresent) {
201201
EXPECT_DEATH(std::make_unique<ReleaseModeModelRunner<AdditionAOTModel>>(
202202
Ctx, Inputs, "", makeOptions().setModelSelector(M2Selector)),
203203
"A model selector was specified but the underlying model does "
204-
"not expose a _model_selector input");
204+
"not expose a model_selector input");
205205
}
206206

207207
TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) {
@@ -212,10 +212,10 @@ TEST(ReleaseModelRunner, ModelSelectorNoSelectorGiven) {
212212
std::make_unique<ReleaseModeModelRunner<ComposedAOTModel>>(
213213
Ctx, Inputs, "", makeOptions()),
214214
"A model selector was not specified but the underlying model requires "
215-
"selecting one because it exposes a _model_selector input");
215+
"selecting one because it exposes a model_selector input");
216216
}
217217

218-
// Test that we correctly set up the _model_selector tensor value. We are only
218+
// Test that we correctly set up the model_selector tensor value. We are only
219219
// responsbile for what happens if the user doesn't specify a value (but the
220220
// model supports the feature), or if the user specifies one, and we correctly
221221
// populate the tensor, and do so upfront (in case the model implementation

0 commit comments

Comments
 (0)