Skip to content

Revert "Save some size in dtype_util when dtype selective build is not in use" #10410

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 23, 2025
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
39 changes: 2 additions & 37 deletions kernels/portable/cpu/util/dtype_util.h
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@ enum class SupportedTensorDtypes {
namespace internal {

template <typename CTYPE_COMPUTE, const char* op_name>
load_to_compute_fn<CTYPE_COMPUTE> get_load_to_compute_fn_impl(
load_to_compute_fn<CTYPE_COMPUTE> get_load_to_compute_fn(
const Tensor& t,
SupportedTensorDtypes dtypes) {
switch (dtypes) {
Expand All @@ -252,7 +252,7 @@ load_to_compute_fn<CTYPE_COMPUTE> get_load_to_compute_fn_impl(
}

template <typename CTYPE_COMPUTE, const char* op_name>
store_compute_to_tensor_fn<CTYPE_COMPUTE> get_store_compute_to_tensor_fn_impl(
store_compute_to_tensor_fn<CTYPE_COMPUTE> get_store_compute_to_tensor_fn(
const Tensor& t,
SupportedTensorDtypes dtypes) {
switch (dtypes) {
Expand Down Expand Up @@ -285,41 +285,6 @@ store_compute_to_tensor_fn<CTYPE_COMPUTE> get_store_compute_to_tensor_fn_impl(
return nullptr;
}

#ifndef EXECUTORCH_SELECTIVE_BUILD_DTYPE
constexpr const char kGenericElementwiseOpName[] = "generic_elementwise_op";
#endif // EXECUTORCH_SELECTIVE_BUILD_DTYPE

template <typename CTYPE_COMPUTE, const char* op_name>
load_to_compute_fn<CTYPE_COMPUTE> get_load_to_compute_fn(
const Tensor& t,
SupportedTensorDtypes dtypes) {
// NOTE: Selective build relies on the operator name being passed
// here. When it's *not* active, using the same operator name
// everywhere saves on size because we don't require a new template
// instantiation for every operator.
return get_load_to_compute_fn_impl<
CTYPE_COMPUTE,
#ifdef EXECUTORCH_SELECTIVE_BUILD_DTYPE
op_name
#else // EXECUTORCH_SELECTIVE_BUILD_DTYPE
kGenericElementwiseOpName
#endif // EXECUTORCH_SELECTIVE_BUILD_DTYPE
>(t, dtypes);
}

template <typename CTYPE_COMPUTE, const char* op_name>
store_compute_to_tensor_fn<CTYPE_COMPUTE> get_store_compute_to_tensor_fn(
const Tensor& t,
SupportedTensorDtypes dtypes) {
return get_store_compute_to_tensor_fn_impl<
CTYPE_COMPUTE,
#ifdef EXECUTORCH_SELECTIVE_BUILD_DTYPE
op_name
#else // EXECUTORCH_SELECTIVE_BUILD_DTYPE
kGenericElementwiseOpName
#endif // EXECUTORCH_SELECTIVE_BUILD_DTYPE
>(t, dtypes);
}
bool check_tensor_dtype(
const Tensor t,
SupportedTensorDtypes dtypes,
Expand Down
Loading