Skip to content

Commit e38a367

Browse files
committed
chore: [collection] delete comments
Signed-off-by: inocsin <[email protected]>
1 parent 7b66f0b commit e38a367

File tree

7 files changed

+19
-185
lines changed

7 files changed

+19
-185
lines changed

core/compiler.cpp

Lines changed: 8 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -295,14 +295,14 @@ void MapInputsAndDetermineDTypes(
295295
ir::StaticParams& static_params,
296296
ir::CollectionTypeMap& first_use_type_map) {
297297
// ir::TypeMap& first_use_type_map) {
298-
// Associate input specs with inputs
299-
// cfg.convert_info.inputs = std::move(ir::associate_specs_with_inputs(g, cfg.inputs, static_params));
300-
cfg.convert_info.collection_inputs = std::move(ir::associate_specs_with_collection_inputs(g, cfg.graph_inputs, static_params));
301-
302-
auto collection_inputs = ir::get_collection_inputs(g, static_params);
303-
LOG_DEBUG("In MapInputsAndDetermineDTypes " << "g->inputs() size " << g->inputs().size() << ", collection_inputs size " << collection_inputs.size());
304-
// for (auto& in : g->inputs()) {
305-
// if (static_params.find(in) == static_params.end()) {
298+
// Associate input specs with inputs
299+
// cfg.convert_info.inputs = std::move(ir::associate_specs_with_inputs(g, cfg.inputs, static_params));
300+
cfg.convert_info.collection_inputs = std::move(ir::associate_specs_with_collection_inputs(g, cfg.graph_inputs, static_params));
301+
302+
auto collection_inputs = ir::get_collection_inputs(g, static_params);
303+
LOG_DEBUG("In MapInputsAndDetermineDTypes " << "g->inputs() size " << g->inputs().size() << ", collection_inputs size " << collection_inputs.size());
304+
// for (auto& in : g->inputs()) {
305+
// if (static_params.find(in) == static_params.end()) {
306306
for (auto in : collection_inputs) {
307307
std::vector<ir::Input>& spec = cfg.convert_info.collection_inputs.find(in)->second;
308308
// ir::Input& spec = cfg.convert_info.inputs.find(in)->second;

core/ir/ir.cpp

Lines changed: 2 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -67,28 +67,6 @@ std::vector<const torch::jit::Value*> get_tensor_inputs(
6767
if (in->type()->isSubtypeOf(c10::TensorType::get()) && static_params.find(in) == static_params.end()) {
6868
input_tensors.push_back(in);
6969
}
70-
// else if (in->type()->cast<c10::TupleType>() && static_params.find(in) == static_params.end()) {
71-
// // } else if (in->type()->isSubtypeOf(c10::TupleType::create()) && static_params.find(in) == static_params.end()) {
72-
// at::ArrayRef<torch::jit::Value*> unpack_tuple = torch::jit::createTupleUnpack(in);
73-
// LOG_DEBUG("Tuple size " << unpack_tuple.size());
74-
// for (auto item: unpack_tuple) {
75-
// input_tensors.push_back(in);
76-
// }
77-
// } else if (in->type()->isSubtypeOf(c10::ListType::ofTensors()) && static_params.find(in) == static_params.end()) {
78-
79-
// LOG_DEBUG("List use size " << in->uses().size());
80-
// // for (auto use : in->uses()) {
81-
// // LOG_DEBUG(use.user->outputs()[0]->debugName());
82-
// // }
83-
// // TODO: set the correct list number according to the Input IValue
84-
// int n = 2;
85-
// auto unpack_node = g->createListUnpack(in, n);
86-
// g->block()->appendNode(unpack_node);
87-
// for (auto item: unpack_node->outputs()) {
88-
// input_tensors.push_back(item);
89-
// }
90-
// LOG_DEBUG("Unpack List of size " << n);
91-
// }
9270
}
9371
return input_tensors;
9472
}
@@ -101,11 +79,6 @@ std::vector<const torch::jit::Value*> get_collection_inputs(
10179
LOG_DEBUG("get_collection_inputs, inputs size " << inputs.size());
10280
for (auto in : inputs) {
10381
LOG_DEBUG("input debug name: " << in->debugName());
104-
// Disregarding inputs that are not tensors or are static
105-
//
106-
// Ex.
107-
// self.1:__torch__.alexnet -> ignored
108-
// input.1:Tensor -> used
10982
if (in->type()->isSubtypeOf(c10::TensorType::get()) && static_params.find(in) == static_params.end()) {
11083
input_tensors.push_back(in);
11184
} else if (in->type()->kind() == torch::jit::TypeKind::TupleType && static_params.find(in) == static_params.end()) {
@@ -242,21 +215,18 @@ CollectionTypeMap get_block_first_calc_dtypes_opt_collection(torch::jit::Block*
242215
if (i->type() == c10::TensorType::get()) {
243216
torch::jit::Value* in = i;
244217
types.insert({in, {get_value_first_calc_dtype_opt(b, i)}});
218+
245219
} else if(i->type()->kind() == torch::jit::TypeKind::TupleType) {
246220
LOG_DEBUG("get_block_first_calc_dtypes_opt_collection TupleType");
247221
// TODO: to evaluate the data type of tuple element
248222
// make sure very time get the same ptr
249223
c10::optional<at::ScalarType> tp = get_value_first_calc_dtype_opt(b, i);
250224
at::ArrayRef<torch::jit::Value*> unpack_tuple = torch::jit::createTupleUnpack(i);
251225
LOG_DEBUG("get_block_first_calc_dtypes_opt_collection: tuple size " << unpack_tuple.size());
252-
// Assume all tuple has the same datatype
226+
// TODO: calculate the tuple element type
253227
// std::vector<c10::optional<at::ScalarType>> dytpes(unpack_tuple.size(), tp);
254228
std::vector<c10::optional<at::ScalarType>> dytpes(unpack_tuple.size());
255229
types.insert({i, dytpes}); // insert an empty
256-
// for (auto item: unpack_tuple) {
257-
// torch::jit::Value* in = item;
258-
// types.insert({in, get_value_first_calc_dtype_opt(b, i)});
259-
// }
260230

261231
} else if(i->type()->kind() == torch::jit::TypeKind::ListType) {
262232
// TODO: to decide the size of list and type of list element
@@ -265,7 +235,6 @@ CollectionTypeMap get_block_first_calc_dtypes_opt_collection(torch::jit::Block*
265235
// std::vector<c10::optional<at::ScalarType>> dytpes(i->uses().size());
266236
std::vector<c10::optional<at::ScalarType>> dytpes(i->uses().size(), tp);
267237
types.insert({i, dytpes}); // insert an empty
268-
269238
}
270239
}
271240
return types;

core/ir/ir.h

Lines changed: 0 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -12,8 +12,6 @@ namespace core {
1212
namespace ir {
1313

1414
struct Input : torch::CustomClassHolder {
15-
// Input(std::vector<int64_t> shape);
16-
// Input(std::vector<int64_t> min_shape, std::vector<int64_t> opt_shape, std::vector<int64_t> max_shape);
1715
Input() {};
1816
Input(
1917
std::vector<int64_t> shape,
@@ -42,15 +40,6 @@ struct Input : torch::CustomClassHolder {
4240

4341
// Add to spec
4442
struct GraphInputs {
45-
// GraphInputs() {}
46-
// GraphInputs(torch::jit::IValue inputs) {
47-
// input_signature = inputs;
48-
// // TODO flatten IValue
49-
// }
50-
// GraphInputs(std::vector<Input> inputs) {
51-
// flattened_inputs = inputs;
52-
// // TODO construct the IValue
53-
// }
5443
torch::jit::IValue input_signature; // nested Input, full input spec
5544
std::vector<Input> flattened_inputs; // flattend Input
5645
std::vector<std::vector<Input>> collection_inputs; // only support two layer nesting, e.g. ((a, b), [c, d], e)

core/partitioning/shape_analysis.cpp

Lines changed: 1 addition & 24 deletions
Original file line numberDiff line numberDiff line change
@@ -55,44 +55,26 @@ std::unordered_map<const torch::jit::Value*, torch::jit::IValue> generateRandomI
5555

5656
if (input.first->type()->kind() == torch::jit::TypeKind::ListType) {
5757
// create list
58-
// auto list = c10::impl::GenericList(c10::TensorType::get());
59-
// list.append(ivalues_maps[input]);
6058
LOG_DEBUG("generateRandomInputs, generate random input of list type");
61-
// jit_inputs_ivalues.push_back(ivalues_maps[input].toList());
6259
std::vector<torch::jit::IValue> list;
6360
c10::TypePtr elementType = c10::TensorType::get();
6461
auto generic_list = c10::impl::GenericList(elementType);
65-
LOG_DEBUG("generateRandomInputs, 0");
6662
for (int i = 0; i < input.second.size(); i++) {
67-
// types for list is {}
68-
// auto in = generateSingleInput(input.second[i], types[input.first][i]);
69-
// TODO: need to decide the input type of list elements in ir.cpp
70-
// c10::optional<at::ScalarType> type_opt = {};
71-
// auto in = generateSingleInput(input.second[i], type_opt);
7263
auto in = generateSingleInput(input.second[i], types[input.first][i]);
73-
// list.push_back(in.clone());
7464
generic_list.push_back(in.clone());
75-
LOG_DEBUG("generateRandomInputs, 1");
7665
}
77-
// c10::TypePtr elementType = list[0].type();
78-
LOG_DEBUG("generateRandomInputs, 2");
79-
// generic_list.append(list);
8066
ivalue_map[input.first] = c10::IValue(generic_list);
81-
// jit_inputs_ivalues.push_back(list);
8267
LOG_DEBUG("generateRandomInputs, finish generate random input of list type");
8368
} else if (input.first->type()->kind() == torch::jit::TypeKind::TupleType) {
8469
// create tuple
85-
// auto tuple = torch::jit::Tuple::create(ivalues_maps[input]);
8670
LOG_DEBUG("generateRandomInputs, generate random input of tuple type");
8771
std::vector<torch::jit::IValue> list;
8872
for (int i = 0; i < input.second.size(); i++) {
8973
auto in = generateSingleInput(input.second[i], types[input.first][i]);
9074
list.push_back(in.clone());
9175
}
9276
auto tuple = c10::ivalue::Tuple::create(list); // create tuple ptr
93-
9477
ivalue_map[input.first] = c10::IValue(tuple);
95-
// jit_inputs_ivalues.push_back(tuple);
9678
} else {
9779
LOG_DEBUG("generateRandomInputs, generate random input of tensor type");
9880
auto in = generateSingleInput(input.second[0], types[input.first][0]);
@@ -151,17 +133,12 @@ void getSegmentsOutputByRunning(
151133
jit_inputs_ivalues.push_back(ivalues_maps[input].toBool());
152134
} else if (input->type()->kind() == torch::jit::TypeKind::ListType) {
153135
// create list
154-
// auto list = c10::impl::GenericList(c10::TensorType::get());
155-
// list.append(ivalues_maps[input]);
156136
LOG_DEBUG("getSegmentsOutputByRunning, handle list type");
157-
jit_inputs_ivalues.push_back(ivalues_maps[input].toList());
158-
// jit_inputs_ivalues.push_back(list);
137+
jit_inputs_ivalues.push_back(ivalues_maps[input].toList());;
159138
} else if (input->type()->kind() == torch::jit::TypeKind::TupleType) {
160139
// create tuple
161-
// auto tuple = torch::jit::Tuple::create(ivalues_maps[input]);
162140
LOG_DEBUG("getSegmentsOutputByRunning, handle tuple type");
163141
jit_inputs_ivalues.push_back(ivalues_maps[input].toTuple());
164-
// jit_inputs_ivalues.push_back(tuple);
165142
} else {
166143
TORCHTRT_THROW_ERROR("Unable to find type for value: " << input->debugName() << " to get the ivalues.\n");
167144
}

cpp/include/torch_tensorrt/torch_tensorrt.h

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -521,7 +521,6 @@ struct TORCHTRT_API Input : torch::CustomClassHolder{
521521
*/
522522
struct TORCHTRT_API GraphInputs {
523523
torch::jit::IValue input_signature; // nested Input, full input spec
524-
// std::vector<Input> flattened_inputs; // flattend Input
525524
};
526525

527526
/**

0 commit comments

Comments
 (0)