16#include "llvm/ADT/TypeSwitch.h"
17#include "llvm/Support/Debug.h"
24void XeGPUDialect::initialize() {
26#define GET_TYPEDEF_LIST
27#include <mlir/Dialect/XeGPU/IR/XeGPUTypes.cpp.inc>
31#include <mlir/Dialect/XeGPU/IR/XeGPU.cpp.inc>
34#define GET_ATTRDEF_LIST
35#include <mlir/Dialect/XeGPU/IR/XeGPUAttrs.cpp.inc>
47static SmallVector<SmallVector<Value>>
56 llvm::zip_equal(srcShape,
58 [](
const auto &t) {
return std::min(std::get<0>(t), std::get<1>(t)); });
62 llvm::zip(delinearizedId, subShape), [&](
const auto &t) ->
Value {
78 llvm::map_to_vector(llvm::zip_equal(base, distUnitLocalOffset),
79 [&](
const auto &t) ->
Value {
81 loc, std::get<0>(t), std::get<1>(t));
85 llvm::zip_equal(adds, srcShape), [&](
const auto &t) ->
Value {
91 coordinates.push_back(mods);
99 xegpu::DistributeLayoutAttr attr) {
100 assert(attr &&
"Layout attribute is missing.");
117 if (layout.size() !=
shape.size())
120 if (ratio.has_value()) {
121 newShape = ratio.value();
129 if (data.size() != shape.size())
132 if (!ratio.has_value() && rr)
134 if (!ratio.has_value())
144 auto maybeSgShape = tryDistribute(shape, attr.getEffectiveSgLayoutAsInt(),
145 attr.getEffectiveSgDataAsInt());
148 auto sgShape = maybeSgShape.value();
151 auto maybeInstShape =
152 tryDistribute(sgShape, {}, attr.getEffectiveInstDataAsInt(),
false);
155 auto instShape = maybeInstShape.value();
158 auto maybeLaneShape =
159 tryDistribute(instShape, attr.getEffectiveLaneLayoutAsInt(),
160 attr.getEffectiveLaneDataAsInt(),
false);
161 return maybeLaneShape.has_value();
167BlockTensorDescAttr BlockTensorDescAttr::get(mlir::MLIRContext *context,
168 xegpu::MemorySpace memory_space,
170 bool boundary_check) {
171 auto scopeAttr = MemorySpaceAttr::get(context, memory_space);
173 IntegerAttr::get(IntegerType::get(context, 64), array_length);
175 return Base::get(context, scopeAttr, lengthAttr, boundaryAttr);
178bool BlockTensorDescAttr::hasDefaultsOnly() {
179 return getMemorySpace().getValue() == xegpu::MemorySpace::Global &&
180 getArrayLength().getInt() == 1 && getBoundaryCheck().getValue();
187ScatterTensorDescAttr::get(mlir::MLIRContext *context,
188 xegpu::MemorySpace memory_space,
int chunk_size) {
189 auto scopeAttr = MemorySpaceAttr::get(context, memory_space);
191 IntegerAttr::get(IntegerType::get(context, 64), chunk_size);
192 return Base::get(context, scopeAttr, chunkSizeAttr);
195LogicalResult ScatterTensorDescAttr::verify(
196 llvm::function_ref<mlir::InFlightDiagnostic()>
emitError,
197 MemorySpaceAttr memory_space, IntegerAttr chunk_size) {
198 int64_t chunkSize = chunk_size.getInt();
200 return emitError() <<
"invalid chunk size";
209LayoutAttr::verify(llvm::function_ref<mlir::InFlightDiagnostic()>
emitError,
217 if (!sg_layout && !inst_data && !lane_layout) {
219 <<
"expected at least one of sg_layout, inst_data or lane_layout";
225 if (sg_layout && inst_data && sg_layout.size() != inst_data.size()) {
227 <<
"expected sg_layout and inst_data to have the same rank";
230 if (sg_layout && lane_layout && sg_layout.size() != lane_layout.size()) {
232 <<
"expected sg_layout and lane_layout to have the same rank";
235 if (inst_data && lane_layout && inst_data.size() != lane_layout.size()) {
236 return emitError() <<
"expected inst_data and lane_layout to have the same "
237 "rank, got inst_data "
238 << inst_data.size() <<
", lane_layout "
239 << lane_layout.size();
246 return emitError() <<
"expected sg_layout being used with sg_data";
247 if (sg_data.size() != sg_layout.size())
249 <<
"expected sg_data and sg_layout to have the same rank";
256 return emitError() <<
"expected lane_layout being used with lane_data";
257 if (lane_data.size() != lane_layout.size())
259 <<
"expected lane_data and lane_layout to have the same rank";
263 if (!sg_layout && !lane_layout)
265 <<
"expected sg_layout/lane_layout being used with order";
267 if (sg_layout && order.size() != sg_layout.size())
269 <<
"expected order and sg_layout to have the same rank";
271 if (lane_layout && order.size() != lane_layout.size())
273 <<
"expected order and lane_layout to have the same rank";
279FailureOr<SmallVector<Value>>
280LayoutAttr::delinearizeId(OpBuilder &builder, Location loc, Value linearId) {
282 SmallVector<int64_t> sgLayoutInt;
283 if (isForWorkgroup()) {
284 sgLayoutInt = getEffectiveSgLayoutAsInt();
285 }
else if (isForSubgroup()) {
286 sgLayoutInt = getEffectiveLaneLayoutAsInt();
294 SmallVector<int64_t> order;
295 if (orderAttr && !orderAttr.empty()) {
296 order = llvm::to_vector(
298 [](int32_t idx) { return static_cast<int64_t>(idx); }));
301 order = llvm::to_vector(
302 llvm::reverse(llvm::seq<int64_t>(0, sgLayoutInt.size())));
305 if (order.size() != sgLayoutInt.size()) {
309 SmallVector<Value>
result(sgLayoutInt.size());
310 Value remaining = linearId;
333 for (
size_t i = 0; i < order.size(); ++i) {
334 int64_t dimIdx = order[i];
335 int64_t dimSize = sgLayoutInt[dimIdx];
338 builder.
createOrFold<arith::ConstantIndexOp>(loc, dimSize);
345 builder.
createOrFold<arith::RemUIOp>(loc, remaining, dimSizeVal);
352 if (i < order.size() - 1) {
354 builder.
createOrFold<arith::DivUIOp>(loc, remaining, dimSizeVal);
363FailureOr<SmallVector<SmallVector<Value>>>
364LayoutAttr::computeDistributedCoords(OpBuilder &builder, Location loc,
365 Value linearId, ArrayRef<int64_t> shape) {
366 SmallVector<int64_t> layout;
367 SmallVector<int64_t> subShape;
368 if (isForWorkgroup()) {
369 layout = getEffectiveSgLayoutAsInt();
370 subShape = getEffectiveSgDataAsInt();
371 }
else if (isForSubgroup()) {
372 layout = getEffectiveLaneLayoutAsInt();
373 subShape = getEffectiveLaneDataAsInt();
377 if (subShape.empty()) {
379 subShape = derivedShape.value();
385 auto maybeIds = delinearizeId(builder, loc, linearId);
388 SmallVector<Value> ids = *maybeIds;
390 return genCoordinates(builder, loc, ids, layout, subShape, shape);
393bool LayoutAttr::isEqualTo(
const xegpu::DistributeLayoutAttr &other) {
394 if (dyn_cast<xegpu::SliceAttr>(other))
397 return *
this == dyn_cast<xegpu::LayoutAttr>(other);
402 auto sgDataOpt = getSgData();
403 auto instDataOpt = getInstData();
404 auto laneDataOpt = getLaneData();
406 SmallVector<int32_t> sgData;
407 SmallVector<int32_t> instData;
408 SmallVector<int32_t> laneData;
411 sgData = llvm::to_vector(sgDataOpt.asArrayRef());
414 instData = llvm::to_vector(instDataOpt.asArrayRef());
417 laneData = llvm::to_vector(laneDataOpt.asArrayRef());
420 for (
auto dim : unitDims) {
421 if (dim <
static_cast<int64_t
>(sgData.size()))
423 if (dim <
static_cast<int64_t
>(instData.size()))
425 if (dim <
static_cast<int64_t
>(laneData.size()))
429 return LayoutAttr::get(
443 auto sgLayoutOpt = getSgLayout();
444 auto laneLayoutOpt = getLaneLayout();
446 SmallVector<int32_t> sgLayout;
447 SmallVector<int32_t> laneLayout;
450 sgLayout = llvm::to_vector(sgLayoutOpt.asArrayRef());
453 laneLayout = llvm::to_vector(laneLayoutOpt.asArrayRef());
456 for (
auto dim : unitDims) {
457 if (dim <
static_cast<int64_t
>(sgLayout.size()))
459 if (dim <
static_cast<int64_t
>(laneLayout.size()))
463 return LayoutAttr::get(
467 getSgData(), getInstData(),
470 getLaneData(), getOrder());
477SliceAttr::verify(llvm::function_ref<InFlightDiagnostic()>
emitError,
479 if (!parent || !dims)
480 return emitError() <<
"expected parent layout and dims attribute";
482 int64_t rank = parent.getRank();
485 llvm::SmallDenseSet<int64_t> seen;
487 if (dim < 0 || dim >= rank)
488 return emitError() <<
"invalid dim (" << dim <<
") in slice attribute.";
489 if (!seen.insert(dim).second)
490 return emitError() <<
"repeated dim (" << dim <<
") in slice attribute.";
495SliceAttr SliceAttr::flatten()
const {
496 xegpu::DistributeLayoutAttr parent = getParent();
497 SmallVector<DenseI64ArrayAttr> slicedDims({
getDims()});
499 while (
auto sliceAttr = dyn_cast<xegpu::SliceAttr>(parent)) {
500 parent = sliceAttr.getParent();
501 slicedDims.push_back(sliceAttr.getDims());
504 auto layoutAttr = dyn_cast<xegpu::LayoutAttr>(parent);
506 llvm::to_vector(llvm::seq<int64_t>(0, layoutAttr.getRank()));
509 SmallVector<int64_t> remainingDims(
indices);
510 for (
auto dim : llvm::reverse(slicedDims))
511 remainingDims = XeGPUDialect::slice(llvm::ArrayRef<int64_t>(remainingDims),
515 SmallVector<int64_t> flattendDims = XeGPUDialect::slice(
516 llvm::ArrayRef<int64_t>(
indices), llvm::ArrayRef<int64_t>(remainingDims));
518 return xegpu::SliceAttr::get(
523FailureOr<SmallVector<Value>>
524SliceAttr::delinearizeId(OpBuilder &builder, Location loc, Value linearId) {
525 SliceAttr attr = flatten();
526 auto parent = dyn_cast<LayoutAttr>(attr.getParent());
527 return parent.delinearizeId(builder, loc, linearId);
533FailureOr<SmallVector<SmallVector<Value>>>
534SliceAttr::computeDistributedCoords(OpBuilder &builder, Location loc,
535 Value linearId, ArrayRef<int64_t> shape) {
536 assert(getRank() ==
static_cast<int64_t
>(shape.size()) &&
"invalid shape.");
537 if (!isForWorkgroup())
540 SmallVector<int64_t> layout;
541 SmallVector<int64_t> subShape;
542 if (isForWorkgroup()) {
543 layout = getEffectiveSgLayoutAsInt();
544 subShape = getEffectiveSgDataAsInt();
545 }
else if (isForSubgroup()) {
546 layout = getEffectiveLaneLayoutAsInt();
547 subShape = getEffectiveLaneDataAsInt();
552 if (subShape.empty()) {
554 subShape = derivedShape.value();
560 auto maybeIds = delinearizeId(builder, loc, linearId);
566 ArrayRef<int64_t> dims = flatten().getDims().
asArrayRef();
567 SmallVector<Value> sgIds =
568 XeGPUDialect::slice(ArrayRef<Value>(*maybeIds), dims);
570 return genCoordinates(builder, loc, sgIds, layout, subShape, shape);
573bool SliceAttr::isSliceOf(
const xegpu::DistributeLayoutAttr &other) {
574 auto flattenedThis = flatten();
577 if (
auto otherLayout = dyn_cast<xegpu::LayoutAttr>(other))
578 return flattenedThis.getParent() == otherLayout;
580 auto flattenedOther = dyn_cast<xegpu::SliceAttr>(other).flatten();
582 if (flattenedThis.getParent() != flattenedOther.getParent())
586 llvm::SmallDenseSet<int64_t> thisDims(
587 flattenedThis.getDims().asArrayRef().begin(),
588 flattenedThis.getDims().asArrayRef().end());
589 return llvm::all_of(flattenedOther.getDims().asArrayRef(),
590 [&](int64_t dim) { return thisDims.contains(dim); });
593bool SliceAttr::isEqualTo(
const xegpu::DistributeLayoutAttr &other) {
594 if (dyn_cast<xegpu::LayoutAttr>(other))
597 auto flattenedThis = flatten();
598 auto flattenedOther = dyn_cast<xegpu::SliceAttr>(other).flatten();
600 return ((flattenedThis.getParent() == flattenedOther.getParent()) &&
601 (flattenedThis.getDims() == flattenedOther.getDims()));
610 int64_t parentRank = sliceDims.size() + unitDims.size();
611 llvm::SmallDenseSet<int64_t> slicedDimsSet(sliceDims.begin(),
614 for (
int64_t i = 0; i < parentRank; ++i) {
615 if (!slicedDimsSet.contains(i))
616 nonSlicedDims.push_back(i);
621 for (
auto dim : unitDims) {
622 if (dim <
static_cast<int64_t>(nonSlicedDims.size())) {
623 adjustUnitDims.insert(nonSlicedDims[dim]);
627 return adjustUnitDims;
632 SliceAttr attr = flatten();
634 auto parent = dyn_cast<LayoutAttr>(attr.getParent());
639 return SliceAttr::get(
getContext(), parent.setUnitDimData(adjustUnitDims),
645 SliceAttr attr = flatten();
647 auto parent = dyn_cast<LayoutAttr>(attr.getParent());
652 return SliceAttr::get(
getContext(), parent.setUnitDimLayout(adjustUnitDims),
661RangeAttr::verify(llvm::function_ref<mlir::InFlightDiagnostic()>
emitError,
662 IntegerAttr startOfRange, IntegerAttr endOfRange) {
663 if (startOfRange.getInt() >= endOfRange.getInt())
664 return emitError() <<
"'end' : " << endOfRange.getInt()
665 <<
" must be greater than 'start' : "
666 << startOfRange.getInt();
675mlir::Type TensorDescType::parse(AsmParser &parser) {
676 llvm::SmallVector<int64_t> shape;
677 mlir::Type elementType;
678 mlir::FailureOr<mlir::Attribute> encoding;
679 mlir::FailureOr<mlir::Attribute> layout;
687 parser.
emitError(shapeLoc,
"failed to parse parameter 'shape'");
692 if (mlir::failed(parser.
parseType(elementType))) {
693 parser.
emitError(elemTypeLoc,
"failed to parse parameter 'elementType'");
699 mlir::Attribute attr;
701 if (mlir::succeeded(res)) {
702 if (mlir::isa<LayoutAttr>(attr)) {
706 if (mlir::isa<BlockTensorDescAttr, ScatterTensorDescAttr>(attr)) {
719 return TensorDescType::getChecked(
721 elementType, encoding.value_or(BlockTensorDescAttr::get(ctxt)),
722 layout.value_or(mlir::Attribute()));
725void TensorDescType::print(AsmPrinter &printer)
const {
729 for (int64_t dim : shape) {
730 if (mlir::ShapedType::isDynamic(dim))
739 auto encoding = getEncoding();
740 auto blockAttr = llvm::dyn_cast_if_present<BlockTensorDescAttr>(encoding);
741 if (encoding && (!blockAttr || !blockAttr.hasDefaultsOnly()))
742 printer <<
", " << encoding;
744 if (
auto layout = getLayout())
745 printer <<
", " << layout;
750TensorDescType TensorDescType::get(llvm::ArrayRef<int64_t> shape,
751 mlir::Type elementType,
int array_length,
753 MemorySpace memory_space,
754 mlir::Attribute layout) {
756 auto attr = BlockTensorDescAttr::get(context, memory_space, array_length,
758 return Base::get(context, shape, elementType, attr, layout);
761TensorDescType TensorDescType::get(llvm::ArrayRef<int64_t> shape,
762 mlir::Type elementType,
int chunk_size,
763 MemorySpace memory_space,
764 mlir::Attribute layout) {
766 auto attr = ScatterTensorDescAttr::get(context, memory_space, chunk_size);
767 return Base::get(context, shape, elementType, attr, layout);
771TensorDescType::verify(llvm::function_ref<InFlightDiagnostic()>
emitError,
772 llvm::ArrayRef<int64_t> shape, mlir::Type elementType,
773 mlir::Attribute encoding, mlir::Attribute layout) {
774 size_t rank = shape.size();
777 return emitError() <<
"expected non-zero rank tensor";
779 auto blockAttr = mlir::dyn_cast_if_present<BlockTensorDescAttr>(encoding);
781 MemorySpaceAttr memorySpaceAttr = blockAttr.getMemorySpace();
782 if (rank > 1 && memorySpaceAttr &&
783 memorySpaceAttr.getValue() == MemorySpace::SLM)
784 return emitError() <<
"SLM is only supported for 1D block tensor";
789 int chunkAlignmentFactor =
793 auto scatterAttr = mlir::dyn_cast_if_present<ScatterTensorDescAttr>(encoding);
795 int64_t chunkSize = scatterAttr.getChunkSizeAsInt();
796 if (rank == 1 && chunkSize != 1)
797 return emitError() <<
"expected non-contiguous elements for 1D tensor";
803 if (shape.back() != chunkSize)
804 return emitError() <<
"expected last dim of tensor to match chunk size";
805 if (shape.back() % chunkAlignmentFactor != 0)
806 return emitError() <<
"expected last dim of tensor to be a multiple of "
807 << chunkAlignmentFactor;
811 auto layoutAttr = llvm::dyn_cast_if_present<LayoutAttr>(layout);
813 if (rank != (
size_t)layoutAttr.getRank())
814 return emitError() <<
"expected layout rank to match tensor rank";
816 auto laneData = layoutAttr.getLaneData();
817 if (scatterAttr && laneData) {
821 int64_t chunkSize = scatterAttr.getChunkSizeAsInt();
822 if (chunkSize > 1 && laneData[rank - 1] % chunkAlignmentFactor)
824 <<
"expected last dim of lane_data to be a multiple of: "
825 << chunkAlignmentFactor;
828 if (!XeGPUDialect::isEvenlyDistributable(shape, layoutAttr)) {
829 std::string shapeStr;
830 llvm::raw_string_ostream stream(shapeStr);
831 llvm::interleaveComma(shape, stream);
832 return emitError() <<
"cannot distribute [" << shapeStr <<
"] using "
842mlir::Type MemDescType::parse(AsmParser &parser) {
843 llvm::SmallVector<int64_t> shape;
844 mlir::Type elementType;
845 mlir::FailureOr<MemLayoutAttr> layout;
853 parser.
emitError(shapeLoc,
"failed to parse parameter 'shape'");
858 if (mlir::failed(parser.
parseType(elementType))) {
859 parser.
emitError(elemTypeLoc,
"failed to parse parameter 'elementType'");
867 if (mlir::failed(res))
877 return MemDescType::getChecked(
879 elementType, layout.value_or(MemLayoutAttr()));
882void MemDescType::print(AsmPrinter &printer)
const {
889 if (
auto layout = getMemLayout())
890 printer <<
", " << layout;
899Attribute MemLayoutAttr::parse(AsmParser &parser, Type type) {
904 llvm::SmallDenseSet<StringRef> seenKeys;
905 SmallVector<NamedAttribute> attributes;
907 auto parseElt = [&]() -> ParseResult {
910 return parser.
emitError(loc,
"expected valid attribute name");
912 if (!seenKeys.insert(nameId).second)
913 return parser.
emitError(loc,
"duplicate key '")
914 << nameId <<
" in mem layout attribute";
922 attributes.emplace_back(nameId, attr);
938 loc, context, DictionaryAttr::get(context, attributes));
941void MemLayoutAttr::print(AsmPrinter &printer)
const {
943 ArrayRef<NamedAttribute> attrs = getAttrs().getValue();
944 for (
size_t i = 0; i < attrs.size(); i++) {
945 printer << attrs[i].getName().str() <<
" = " << attrs[i].getValue();
946 if (i < attrs.size() - 1)
955template <
typename ArithOp>
960 return ArithOp::create(builder, loc, aVal, bVal).getResult();
965 genBinOp<arith::DivSIOp>(a, builder.getIndexAttr(b), loc, builder)
969 genBinOp<arith::RemSIOp>(a, builder.getIndexAttr(b), loc, builder)
973 genBinOp<arith::MulIOp>(a, builder.getIndexAttr(b), loc, builder)
976#define add(a, b) genBinOp<arith::AddIOp>(a, b, loc, builder)
985 assert(offsets.size() == blockShape.size() &&
986 "offsets and blockShape must have the same size");
990 for (
auto [offset, block] : llvm::zip(offsets, blockShape)) {
991 divs.push_back(
div(offset, block));
992 rems.push_back(
rem(offset, block));
994 blockedOffsets.append(divs.begin(), divs.end());
995 blockedOffsets.append(rems.begin(), rems.end());
997 return blockedOffsets;
1005 ArrayAttr strideAttr = getStrideAttr();
1007 for (
Attribute attr : strideAttr.getValue()) {
1008 strides.push_back(cast<IntegerAttr>(attr).getInt());
1016 llvm::to_vector<4>(llvm::seq<int>(0, strides.size()));
1017 llvm::sort(perm, [&](
int a,
int b) {
return strides[a] < strides[
b]; });
1019 assert(strides[perm[0]] == 1 &&
"inner most dim must have stride 1");
1021 SmallVector<int64_t> innerBlkStride(innerBlkShape.size());
1022 innerBlkStride[perm[0]] = 1;
1023 for (
size_t i = 1; i < perm.size(); ++i)
1024 innerBlkStride[perm[i]] =
1025 innerBlkStride[perm[i - 1]] * innerBlkShape[perm[i - 1]];
1031 SmallVector<int64_t> matrixShapeOrig(matrixShape.size());
1032 SmallVector<int64_t> BlkShapeOrig(matrixShape.size());
1033 for (
size_t i = 0; i < perm.size() - 1; ++i) {
1034 matrixShapeOrig[perm[i]] = strides[perm[i + 1]] / strides[perm[i]];
1035 BlkShapeOrig[perm[i]] = matrixShapeOrig[perm[i]] / innerBlkShape[perm[i]];
1038 int64_t innerBlkSize = 1;
1039 for (
auto s : innerBlkShape)
1042 SmallVector<int64_t> outerBlkStride(matrixShape.size());
1043 outerBlkStride[perm[0]] = innerBlkSize;
1044 for (
size_t i = 0; i < perm.size() - 1; ++i) {
1045 outerBlkStride[perm[i + 1]] =
1046 outerBlkStride[perm[i]] * BlkShapeOrig[perm[i]];
1050 SmallVector<int64_t> blockedStrides;
1051 blockedStrides.append(outerBlkStride.begin(), outerBlkStride.end());
1052 blockedStrides.append(innerBlkStride.begin(), innerBlkStride.end());
1054 return blockedStrides;
1058Value MemDescType::getLinearOffsets(OpBuilder &builder, Location loc,
1059 ArrayRef<OpFoldResult> offsets) {
1062 SmallVector<int64_t> blockShape = getBlockShape();
1063 SmallVector<int64_t> strides = getStrideShape();
1064 SmallVector<OpFoldResult> blockedOffsets;
1067 if (llvm::equal(blockShape, matrixShape)) {
1069 strides.erase(strides.begin(), strides.begin() + matrixShape.size());
1071 assert(offsets.size() == blockShape.size() &&
1072 "offsets and blockShape must have the same size");
1076 SmallVector<OpFoldResult> divs, rems;
1078 for (
auto [offset, block] : llvm::zip(offsets, blockShape)) {
1079 divs.push_back(
div(offset, block));
1080 rems.push_back(
rem(offset, block));
1082 blockedOffsets.append(divs.begin(), divs.end());
1083 blockedOffsets.append(rems.begin(), rems.end());
1084 offsets = blockedOffsets;
1089 for (
size_t i = 0; i < offsets.size(); ++i) {
1090 OpFoldResult mulResult =
mul(offsets[i], strides[i]);
1092 linearOffset = arith::AddIOp::create(builder, loc, mulVal, linearOffset);
1095 return linearOffset;
1101#include <mlir/Dialect/XeGPU/IR/XeGPUDialect.cpp.inc>
1102#define GET_ATTRDEF_CLASSES
1103#include <mlir/Dialect/XeGPU/IR/XeGPUAttrs.cpp.inc>
1104#define GET_TYPEDEF_CLASSES
1105#include <mlir/Dialect/XeGPU/IR/XeGPUTypes.cpp.inc>
static Type getElementType(Type type)
Determine the element type of type.
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
virtual ParseResult parseCommaSeparatedList(Delimiter delimiter, function_ref< ParseResult()> parseElementFn, StringRef contextMessage=StringRef())=0
Parse a list of comma-separated items with an optional delimiter.
MLIRContext * getContext() const
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual ParseResult parseLess()=0
Parse a '<' token.
virtual ParseResult parseDimensionList(SmallVectorImpl< int64_t > &dimensions, bool allowDynamic=true, bool withTrailingX=true)=0
Parse a dimension list of a tensor or memref type.
virtual ParseResult parseEqual()=0
Parse a = token.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseOptionalComma()=0
Parse a , token if present.
auto getChecked(SMLoc loc, ParamsT &&...params)
Invoke the getChecked method of the given Attribute or Type class, using the provided location to emi...
virtual SMLoc getNameLoc() const =0
Return the location of the original name token.
virtual ParseResult parseGreater()=0
Parse a '>' token.
virtual ParseResult parseType(Type &result)=0
Parse a type.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
void printDimensionList(ArrayRef< int64_t > shape)
Attributes are known-constant values of operations.
static BoolAttr get(MLIRContext *context, bool value)
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
This class helps build Operations.
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
This class represents a single result from folding an operation.
A range-style iterator that allows for iterating over the offsets of all potential tiles of size tile...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Specialization of arith.constant op that returns an integer of index type.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
static DenseArrayAttrImpl get(MLIRContext *context, ArrayRef< int32_t > content)
ArrayRef< T > asArrayRef() const
auto getDims(VectorType vType)
Returns a range over the dims (size and scalability) of a VectorType.
constexpr unsigned generalPackedFormatBitSize
static SetVector< int64_t > adjustUnitDimsWithSliceDims(const SetVector< int64_t > &unitDims, ArrayRef< int64_t > sliceDims)
SmallVector< OpFoldResult > getBlockedOffsets(OpBuilder &builder, Location loc, ArrayRef< OpFoldResult > offsets, ArrayRef< int64_t > blockShape)
OpFoldResult genBinOp(OpFoldResult a, OpFoldResult b, Location loc, OpBuilder &builder)
static SmallVector< SmallVector< Value > > genCoordinates(OpBuilder &builder, Location loc, SmallVector< Value > delinearizedId, ArrayRef< int64_t > subShapesLayout, ArrayRef< int64_t > subShape, ArrayRef< int64_t > srcShape)
Include the generated interface declarations.
detail::DenseArrayAttrImpl< int64_t > DenseI64ArrayAttr
SmallVector< int64_t > computeElementwiseMul(ArrayRef< int64_t > v1, ArrayRef< int64_t > v2)
Return a vector containing llvm::zip_equal(v1, v2) multiplied elementwise.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
llvm::SetVector< T, Vector, Set, N > SetVector
detail::DenseArrayAttrImpl< int32_t > DenseI32ArrayAttr
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.