Skip to content

Bump LLVM #428

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion cmake/llvm-version-imex.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
add6b2f35f2bcf1f59a2ab2d5b3dab124fe0895a
7842374103b26933d71a8fe354cd4d8715d55b1c
2 changes: 1 addition & 1 deletion cmake/llvm-version.txt
Original file line number Diff line number Diff line change
@@ -1 +1 @@
add6b2f35f2bcf1f59a2ab2d5b3dab124fe0895a
7842374103b26933d71a8fe354cd4d8715d55b1c
6 changes: 3 additions & 3 deletions include/gc/Dialect/LLVMIR/XeVMOps.td
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def XeVM_L1StoreCacheControl : XeVM_StoreCacheControl<"L1">;
def XeVM_L3StoreCacheControl : XeVM_StoreCacheControl<"L3">;

def XeVM_BlockLoad2dOp : XeVM_Op<"blockload2d">,
Results<(outs FixedVectorOf<[XeVM_ElemType]>:$res)>,
Results<(outs FixedVectorOfRankAndType<[1,2,3], [XeVM_ElemType]>:$res)>,
Arguments<(ins
Arg<LLVM_AnyPointer, "", [MemRead]>:$ptr,
I32:$base_width,
Expand Down Expand Up @@ -137,7 +137,7 @@ def XeVM_BlockStore2dOp : XeVM_Op<"blockstore2d">,
I32Attr:$tile_width,
I32Attr:$tile_height,
I32Attr:$v_blocks,
FixedVectorOf<[XeVM_ElemType]>:$stored_val,
FixedVectorOfRankAndType<[1, 2, 3], [XeVM_ElemType]>:$stored_val,
DefaultValuedAttr<XeVM_L1StoreCacheControl, "::mlir::xevm::L1StoreCacheControl::DEFAULT">:$l1_cache_control,
DefaultValuedAttr<XeVM_L3StoreCacheControl, "::mlir::xevm::L3StoreCacheControl::DEFAULT">:$l3_cache_control
)> {
Expand Down Expand Up @@ -243,7 +243,7 @@ def XeVM_PrecisionTypeAttr : I32EnumAttr<"PrecisionType",
}

def XeVM_DPASOp : XeVM_Op<"dpas">,
Results<(outs FixedVectorOf<[XeVM_MatrixElemType]>:$d)>,
Results<(outs FixedVectorOfRankAndType<[1], [XeVM_MatrixElemType]>:$d)>,
Arguments<(ins
FixedVectorOfRankAndType<[1], [XeVM_MatrixElemType]>:$c,
FixedVectorOfRankAndType<[1], [XeVM_MatrixElemType]>:$a,
Expand Down
8 changes: 4 additions & 4 deletions include/gc/Transforms/Microkernel/BrgemmRuntimeUtils.h
Original file line number Diff line number Diff line change
Expand Up @@ -27,13 +27,13 @@ static inline int64_t getDnnlDataTypeVal(RewriterBase &rewriter,
auto context = rewriter.getContext();
auto tattr = dyn_cast_or_null<TypeAttr>(attr);
assert(tattr);
if (tattr == TypeAttr::get(FloatType::getF32(context))) {
if (tattr == TypeAttr::get(Float32Type::get(context))) {
return static_cast<int64_t>(dnnl_f32);
} else if (tattr == TypeAttr::get(FloatType::getF64(context))) {
} else if (tattr == TypeAttr::get(Float64Type::get(context))) {
return static_cast<int64_t>(dnnl_f64);
} else if (tattr == TypeAttr::get(FloatType::getBF16(context))) {
} else if (tattr == TypeAttr::get(BFloat16Type::get(context))) {
return static_cast<int64_t>(dnnl_bf16);
} else if (tattr == TypeAttr::get(FloatType::getF16(context))) {
} else if (tattr == TypeAttr::get(Float16Type::get(context))) {
return static_cast<int64_t>(dnnl_f16);
} else if (tattr == TypeAttr::get(
IntegerType::get(context, 32, IntegerType::Signed))) {
Expand Down
5 changes: 3 additions & 2 deletions include/gc/Transforms/Utils/StructuredOpMatcher.h
Original file line number Diff line number Diff line change
Expand Up @@ -163,7 +163,7 @@ struct HasStaticStrides {
SmallVector<int64_t> strides;
if (auto memRefType = dyn_cast_or_null<MemRefType>(operandType)) {
int64_t offset;
if (failed(getStridesAndOffset(memRefType, strides, offset)))
if (failed(memRefType.getStridesAndOffset(strides, offset)))
return false;
if (llvm::any_of(strides, [](int64_t stride) {
return stride == ShapedType::kDynamic;
Expand Down Expand Up @@ -244,7 +244,8 @@ struct NumDpsInits {
// Callable object to validate number of input operands for `op`.
struct NumDpsInputs {
NumDpsInputs() = delete;
explicit NumDpsInputs(std::function<bool(size_t)> fun) : fun(std::move(fun)){};
explicit NumDpsInputs(std::function<bool(size_t)> fun)
: fun(std::move(fun)){};

bool operator()(Operation *op) {
if (auto linalgOp = dyn_cast_or_null<linalg::LinalgOp>(op))
Expand Down
8 changes: 5 additions & 3 deletions lib/gc/Dialect/Linalgx/Utils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -385,7 +385,7 @@ bool isGenericAttrEquivalent(linalg::GenericOp op, ShapedType shapeA,
DenseMap<AffineExpr, AffineExpr> replaceMap;
std::map<unsigned, utils::IteratorType> iterMap;
// get shape-to-loop map
AffineMap inverse = inversePermutation(concatAffineMaps(inMaps));
AffineMap inverse = inversePermutation(concatAffineMaps(inMaps, context));
assert(inverse && "shape-to-loops map to be non-null");
assert(dimSize == inverse.getResults().size());
// renumber the dim id based on shape-to-loop map
Expand Down Expand Up @@ -492,8 +492,10 @@ bool isGenericPackedMatmulOpImpl(linalg::GenericOp genericOp,
return false;
}
// Check for packing
ValueRange inputs = genericOp.getDpsInputs();
ValueRange outputs = genericOp.getDpsInits();
auto inputsVec = genericOp.getDpsInputs();
ValueRange inputs = inputsVec;
auto outputsVec = genericOp.getDpsInits();
ValueRange outputs = outputsVec;
auto shapeA = cast<ShapedType>(inputs.front().getType());
auto shapeB = cast<ShapedType>(inputs.back().getType());
auto shapeC = cast<ShapedType>(outputs.back().getType());
Expand Down
6 changes: 3 additions & 3 deletions lib/gc/Dialect/Microkernel/MicrokernelOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -551,11 +551,11 @@ static LogicalResult verifyBrgemmDataTypes(ArrayAttr dtypes,

auto context = op.getContext();

#define FTAttr(t) TypeAttr::get(FloatType::get##t(context))
#define FTAttr(t) TypeAttr::get(t::get(context))
#define ITAttr(s, w) TypeAttr::get(IntegerType::get(context, w, IntegerType::s))
SmallVector<std::pair<TypeAttr, TypeAttr>> validDataTypes = {
{FTAttr(F32), FTAttr(F32)},
{FTAttr(BF16), FTAttr(BF16)},
{FTAttr(Float32Type), FTAttr(Float32Type)},
{FTAttr(BFloat16Type), FTAttr(BFloat16Type)},
{ITAttr(Unsigned, 8), ITAttr(Signed, 8)},
{ITAttr(Signed, 8), ITAttr(Unsigned, 8)},
{ITAttr(Unsigned, 8), ITAttr(Unsigned, 8)},
Expand Down
7 changes: 4 additions & 3 deletions lib/gc/ExecutionEngine/GPURuntime/ocl/GpuOclRuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -718,7 +718,7 @@ StringRef createStaticMain(OpBuilder &builder, ModuleOp &module,
auto offsetPtr = constArgs.end();
constArgs.emplace_back(0);
constArgs.append(shape.begin(), shape.end());
if (failed(getStridesAndOffset(type, constArgs, *offsetPtr))) {
if (failed(type.getStridesAndOffset(constArgs, *offsetPtr))) {
gcLogD("Failed to get strides and offset of arg", i,
" of the function ", funcName.begin());
return {};
Expand Down Expand Up @@ -929,8 +929,9 @@ OclModuleBuilder::build(const OclRuntime::Ext &ext) {
builder.getI64IntegerAttr(static_cast<int64_t>(wgSize)));
TargetDeviceSpecInterface devSpec =
TargetDeviceSpecAttr::get(ctx, dltiAttrs);
auto sysSpec =
TargetSystemSpecAttr::get(ctx, ArrayRef(std::pair(devStr, devSpec)));
DataLayoutEntryInterface dl =
DataLayoutEntryAttr::get(ctx, devStr, devSpec);
auto sysSpec = TargetSystemSpecAttr::get(ctx, ArrayRef(dl));
mod = mlirModule.clone();
mod.getOperation()->setAttr("#dlti.sys_spec", sysSpec);
PassManager pm{ctx};
Expand Down
2 changes: 1 addition & 1 deletion lib/gc/Transforms/DecomposeAggregatedOps.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ struct DecomposeAggregatedOps
void runOnOperation() override {
RewritePatternSet patterns(getOperation().getContext());
patterns.add<DecomposeAggregateOpsImpl>(patterns.getContext());
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};

Expand Down
3 changes: 1 addition & 2 deletions lib/gc/Transforms/DecomposeTensorOperation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,8 +170,7 @@ struct DecomposeTensorOperationPass
patterns.add<DecomposeGatherOp>(patterns.getContext());
tensor::populateDecomposeTensorConcatPatterns(patterns);

if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns)))) {
return signalPassFailure();
}
}
Expand Down
7 changes: 3 additions & 4 deletions lib/gc/Transforms/DeepTileContractionOp.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ generateOuterLoop(RewriterBase &b, linalg::LinalgOp linalgOp,
// the extra copy generated by bufferization. So remove the dummy loop
// at this early stage.
if (!isDummyLoop(tilingResult->loops.back())) {
b.replaceOp(currentOp, tilingResult->replacements);
b.replaceOp(currentOp, tilingResult->mergeResult.replacements);
currentOp = dyn_cast<linalg::LinalgOp>(tilingResult->tiledOps.back());
if (iteratorTypes[d] == mlir::utils::IteratorType::reduction)
result.reductionLoops.push_back(tilingResult->loops.back());
Expand Down Expand Up @@ -477,7 +477,7 @@ generateOuterLoop(RewriterBase &b, linalg::LinalgOp linalgOp,
b, cast<TilingInterface>(currentOp.getOperation()), tileOption);
if (failed(tilingResult))
return failure();
b.replaceOp(currentOp, tilingResult->replacements);
b.replaceOp(currentOp, tilingResult->mergeResult.replacements);
currentOp = dyn_cast<linalg::LinalgOp>(tilingResult->tiledOps.back());
}
}
Expand Down Expand Up @@ -1029,8 +1029,7 @@ struct DeepTileContractionOp
dialect->getCanonicalizationPatterns(patterns);
for (RegisteredOperationName op : ctx.getRegisteredOperations())
op.getCanonicalizationPatterns(patterns, &ctx);
if (failed(
applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns))))
return signalPassFailure();
}
};
Expand Down
3 changes: 1 addition & 2 deletions lib/gc/Transforms/FoldTensorOperation.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,7 @@ struct FoldTensorOperationPass
// Use to remove useless tensor operation like extract or
// insert slice.
config.strictMode = GreedyRewriteStrictness::ExistingOps;
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(pattern),
config);
(void)applyPatternsGreedily(getOperation(), std::move(pattern), config);
}
};
} // namespace
Expand Down
2 changes: 1 addition & 1 deletion lib/gc/Transforms/GPU/AllocsToSLM.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ struct AllocsToSLM : public gc::impl::AllocsToSLMBase<AllocsToSLM> {

RewritePatternSet patterns(ctx);
patterns.add<ConvertAlloc>(patterns.getContext());
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};

Expand Down
6 changes: 3 additions & 3 deletions lib/gc/Transforms/GPU/IMEX/LinalgToXeGPU.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2124,17 +2124,17 @@ struct LinalgToXeGPU : public gc::impl::LinalgToXeGPUBase<LinalgToXeGPU> {
// Run GEMM pattern first to allow fusion with its consumers.
RewritePatternSet gemmPatterns(&getContext());
populateLinalgGemmToXeGPUPatterns(gemmPatterns, options);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(gemmPatterns));
(void)applyPatternsGreedily(getOperation(), std::move(gemmPatterns));

// Convert memory fill ops.
RewritePatternSet fillPatterns(&getContext());
populateLinalgMemoryFillToXeGPUPatterns(fillPatterns, options);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(fillPatterns));
(void)applyPatternsGreedily(getOperation(), std::move(fillPatterns));

// Convert other remaining ops.
RewritePatternSet patterns(&getContext());
populateLinalgEltwiseToXeGPUPatterns(patterns, options);
(void)applyPatternsAndFoldGreedily(getOperation(), std::move(patterns));
(void)applyPatternsGreedily(getOperation(), std::move(patterns));
}
};

Expand Down
3 changes: 2 additions & 1 deletion lib/gc/Transforms/GPU/Pipeline.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -154,7 +154,8 @@ void populateGPUPipeline(OpPassManager &pm,
pm.addPass(createGpuKernelOutliningPass());
pm.addPass(createConvertXeVMToLLVMPass());
pm.addPass(createGpuXeVMAttachTarget());
pm.addNestedPass<gpu::GPUModuleOp>(createConvertGpuOpsToLLVMSPVOps());
pm.addNestedPass<gpu::GPUModuleOp>(
createConvertGpuOpsToLLVMSPVOps({.use64bitIndex = true}));
pm.addNestedPass<gpu::GPUModuleOp>(createConvertIndexToLLVMPass());
pm.addNestedPass<gpu::GPUModuleOp>(createArithToLLVMConversionPass());
pm.addPass(createReconcileUnrealizedCastsPass());
Expand Down
2 changes: 1 addition & 1 deletion lib/gc/Transforms/IterativeTilingAndFusion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -813,7 +813,7 @@ void iterativeTilingAndFusionUntilExhaustion(
defaultTilingOfType(rewriter, op, isaOpTy, cfg);
if (succeeded(tilingResult)) {
tiledOps.insert(tilingResult->tiledOps[0]);
rewriter.replaceOp(op, tilingResult->replacements);
rewriter.replaceOp(op, tilingResult->mergeResult.replacements);
break;
}
}
Expand Down
11 changes: 5 additions & 6 deletions lib/gc/Transforms/LowerToTileVector.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -614,17 +614,16 @@ struct LowerToTileVectorPass
// Init patterns use to remove useless tensor operation like extract or
// insert slice.
configInit.strictMode = GreedyRewriteStrictness::ExistingOps;
(void)applyPatternsAndFoldGreedily(funcOp, std::move(patternsInit),
configInit);
(void)applyPatternsGreedily(funcOp, std::move(patternsInit), configInit);

RewritePatternSet firstPatterns(ctx);
// All the dynamic shape will reject to lower.
populateLowerToTileVectorPatterns(firstPatterns);
GreedyRewriteConfig configFirstPn;
// We only apply the lowering pattern on existing operations
configFirstPn.strictMode = GreedyRewriteStrictness::ExistingOps;
(void)applyPatternsAndFoldGreedily(funcOp, std::move(firstPatterns),
configFirstPn);
(void)applyPatternsGreedily(funcOp, std::move(firstPatterns),
configFirstPn);
// Error case:
// ```
// linalg.copy : <1x32xf32>
Expand All @@ -649,10 +648,10 @@ struct LowerToTileVectorPass
vector::populateVectorTransferPermutationMapLoweringPatterns(secondPattern);
// Remove unnessary broadcast operation
vector::populateSinkVectorOpsPatterns(secondPattern);
// Second fold (with the help of the `applyPatternsAndFoldGreedily`
// Second fold (with the help of the `applyPatternsGreedily`
// function) can help us to eliminate redundant operation like consecutive
// read and write.
(void)applyPatternsAndFoldGreedily(funcOp, std::move(secondPattern));
(void)applyPatternsGreedily(funcOp, std::move(secondPattern));
// may need other patterns to reduce redundant operations
}
};
Expand Down
2 changes: 1 addition & 1 deletion lib/gc/Transforms/MemRefToCPURuntime.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ uint64_t getMemRefSizeInBytes(MemRefType memrefType) {
if (!layout.isIdentity()) {
int64_t offset;
SmallVector<int64_t, 4> strides;
if (failed(getStridesAndOffset(memrefType, strides, offset))) {
if (failed(memrefType.getStridesAndOffset(strides, offset))) {
return UINT64_MAX;
}

Expand Down
3 changes: 1 addition & 2 deletions lib/gc/Transforms/MergeNestedForall.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -82,8 +82,7 @@ struct MergeNestedForall

patterns.add<MergeNestedForallLoops>(patterns.getContext());

if (failed(
applyPatternsAndFoldGreedily(getOperation(), std::move(patterns))))
if (failed(applyPatternsGreedily(getOperation(), std::move(patterns))))
return signalPassFailure();
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -391,7 +391,7 @@ class ConvertLinalgToMicrokernel
patterns.add<ConvertContractionOpToBrgemmRewriter<linalg::GenericOp>>(
&getContext());
FrozenRewritePatternSet patternSet(std::move(patterns));
if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet)))
if (failed(applyPatternsGreedily(getOperation(), patternSet)))
signalPassFailure();
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ class ConvertBrgemmDispatchOpRewriter
SmallVector<Value, 10> operands;
SmallVector<Type, 10> operandTypes;
IntegerType integer64 = IntegerType::get(rewriter.getContext(), 64);
FloatType float32 = FloatType::getF32(rewriter.getContext());
FloatType float32 = Float32Type::get(rewriter.getContext());

// M, N, K, LDA, LDB, LDC, stride_a, stride_b
// they are in the same order with BrgemmDispatchOp inputs
Expand Down Expand Up @@ -215,7 +215,7 @@ class ConvertMicrokernelToDnnlFunc
&getContext());

FrozenRewritePatternSet patternSet(std::move(patterns));
if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet)))
if (failed(applyPatternsGreedily(getOperation(), patternSet)))
signalPassFailure();
}
};
Expand Down
3 changes: 1 addition & 2 deletions lib/gc/Transforms/Microkernel/EarlyDispatchMicrokernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -205,8 +205,7 @@ class EarlyDispatchMicrokernel
// Ignore newly created Ops
GreedyRewriteConfig config;
config.strictMode = GreedyRewriteStrictness::ExistingOps;
if (failed(
applyPatternsAndFoldGreedily(getOperation(), patternSet, config)))
if (failed(applyPatternsGreedily(getOperation(), patternSet, config)))
signalPassFailure();
}
};
Expand Down
2 changes: 1 addition & 1 deletion lib/gc/Transforms/Microkernel/ExpandMicrokernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -275,7 +275,7 @@ class ExpandMicrokernel
patterns.add<ExpandMicrokernelBrgemmRewriter>(&getContext());

FrozenRewritePatternSet patternSet(std::move(patterns));
if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet)))
if (failed(applyPatternsGreedily(getOperation(), patternSet)))
signalPassFailure();
}
};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -296,7 +296,7 @@ class MergeBranchMicrokernelContext
patterns.add<ScfIndexSwitchRewriter>(&getContext(), dispatchAnalysis);
FrozenRewritePatternSet patternSet(std::move(patterns));

if (failed(applyPatternsAndFoldGreedily(getOperation(), patternSet))) {
if (failed(applyPatternsGreedily(getOperation(), patternSet))) {
signalPassFailure();
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -421,8 +421,7 @@ class MicrokernelInvariantCodeMotion
// Ignore newly created Ops
GreedyRewriteConfig config;
config.strictMode = GreedyRewriteStrictness::ExistingOps;
if (failed(
applyPatternsAndFoldGreedily(getOperation(), patternSet, config))) {
if (failed(applyPatternsGreedily(getOperation(), patternSet, config))) {
signalPassFailure();
}
}
Expand Down
3 changes: 1 addition & 2 deletions lib/gc/Transforms/OneDNNGraphToLinalg.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -515,8 +515,7 @@ struct ConvertOneDNNGraphToLinalg
MatMulOpBatchFlatten
// clang-format on
>(ctx);
if (failed(applyPatternsAndFoldGreedily(getOperation(),
std::move(patternsPre)))) {
if (failed(applyPatternsGreedily(getOperation(), std::move(patternsPre)))) {
signalPassFailure();
}
// ==========================================
Expand Down
2 changes: 1 addition & 1 deletion lib/gc/Transforms/Utils/ValueUtils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -110,7 +110,7 @@ FailureOr<SmallVector<int64_t>> getStrides(Value value) {
auto memrefType = cast<MemRefType>(valueType);
SmallVector<int64_t> strides;
int64_t offset;
if (failed(getStridesAndOffset(memrefType, strides, offset)))
if (failed(memrefType.getStridesAndOffset(strides, offset)))
return failure();
return strides;
}
Expand Down
7 changes: 3 additions & 4 deletions src/dnnl/JsonParser.h
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
/*
* Copyright (C) 2024 Intel Corporation
* Copyright (C) 2025 Intel Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
Expand All @@ -12,7 +12,6 @@
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* SPDX-License-Identifier: Apache-2.0
*/

Expand Down Expand Up @@ -179,8 +178,8 @@ class JsonParser {
GC_DTYPE("u8", b.getIntegerType(8, true)),
GC_DTYPE("f64", b.getF64Type()),
GC_DTYPE("boolean", b.getI1Type()),
GC_DTYPE("f8_e5m2", b.getFloat8E5M2Type()),
GC_DTYPE("f8_e4m3", b.getFloat8E4M3FNType()),
GC_DTYPE("f8_e5m2", mlir::Float8E5M2Type::get(b.getContext())),
GC_DTYPE("f8_e4m3", mlir::Float8E4M3Type::get(b.getContext())),
GC_DTYPE("s4", b.getIntegerType(4, false)),
GC_DTYPE("u4", b.getIntegerType(4, true)),
};
Expand Down
Loading
Loading