|
18 | 18 |
|
19 | 19 | using namespace llvm;
|
20 | 20 |
|
21 |
| -#define _IMR_CL_VALS(T, N) clEnumValN(TensorType::N, #T, #T), |
22 |
| - |
23 |
| -static cl::opt<TensorType> DebugReply( |
24 |
| - "interactive-model-runner-echo-type", cl::init(TensorType::Invalid), |
25 |
| - cl::Hidden, |
| 21 | +static cl::opt<bool> DebugReply( |
| 22 | + "interactive-model-runner-echo-reply", cl::init(false), cl::Hidden, |
26 | 23 | cl::desc("The InteractiveModelRunner will echo back to stderr "
|
27 |
| - "the data received " |
28 |
| - "from the host as the specified type (for debugging purposes)."), |
29 |
| - cl::values(SUPPORTED_TENSOR_TYPES(_IMR_CL_VALS) |
30 |
| - clEnumValN(TensorType::Invalid, "disable", "Don't echo"))); |
31 |
| - |
32 |
| -#undef _IMR_CL_VALS |
| 24 | + "the data received from the host (for debugging purposes).")); |
33 | 25 |
|
34 | 26 | InteractiveModelRunner::InteractiveModelRunner(
|
35 | 27 | LLVMContext &Ctx, const std::vector<TensorSpec> &Inputs,
|
@@ -83,7 +75,8 @@ void *InteractiveModelRunner::evaluateUntyped() {
|
83 | 75 | }
|
84 | 76 | InsPoint += *ReadOrErr;
|
85 | 77 | }
|
86 |
| - if (DebugReply != TensorType::Invalid) |
87 |
| - dbgs() << tensorValueToString(OutputBuffer.data(), OutputSpec); |
| 78 | + if (DebugReply) |
| 79 | + dbgs() << OutputSpec.name() << ": " |
| 80 | + << tensorValueToString(OutputBuffer.data(), OutputSpec) << "\n"; |
88 | 81 | return OutputBuffer.data();
|
89 | 82 | }
|
0 commit comments