16#include "llvm/ADT/TypeSwitch.h"
17#include "llvm/Support/Debug.h"
24void XeGPUDialect::initialize() {
26#define GET_TYPEDEF_LIST
27#include <mlir/Dialect/XeGPU/IR/XeGPUTypes.cpp.inc>
31#include <mlir/Dialect/XeGPU/IR/XeGPU.cpp.inc>
34#define GET_ATTRDEF_LIST
35#include <mlir/Dialect/XeGPU/IR/XeGPUAttrs.cpp.inc>
38#define GET_OP_INTERFACE_CLASSES
39#include "mlir/Dialect/XeGPU/IR/XeGPUOpInterface.cpp.inc"
58 llvm::zip_equal(srcShape,
60 [](
const auto &t) {
return std::min(std::get<0>(t), std::get<1>(t)); });
64 llvm::zip(delinearizedId, subShape), [&](
const auto &t) ->
Value {
80 llvm::map_to_vector(llvm::zip_equal(base, distUnitLocalOffset),
81 [&](
const auto &t) ->
Value {
83 loc, std::get<0>(t), std::get<1>(t));
87 llvm::zip_equal(adds, srcShape), [&](
const auto &t) ->
Value {
93 coordinates.push_back(mods);
101 xegpu::DistributeLayoutAttr attr) {
102 assert(attr &&
"Layout attribute is missing.");
119 if (layout.size() !=
shape.size())
122 if (ratio.has_value()) {
123 newShape = ratio.value();
131 if (data.size() != shape.size())
134 if (!ratio.has_value() && rr)
136 if (!ratio.has_value())
146 auto maybeSgShape = tryDistribute(shape, attr.getEffectiveSgLayoutAsInt(),
147 attr.getEffectiveSgDataAsInt());
150 auto sgShape = maybeSgShape.value();
153 auto maybeInstShape =
154 tryDistribute(sgShape, {}, attr.getEffectiveInstDataAsInt(),
false);
157 auto instShape = maybeInstShape.value();
160 auto maybeLaneShape =
161 tryDistribute(instShape, attr.getEffectiveLaneLayoutAsInt(),
162 attr.getEffectiveLaneDataAsInt(),
false);
163 return maybeLaneShape.has_value();
169BlockTensorDescAttr BlockTensorDescAttr::get(mlir::MLIRContext *context,
170 xegpu::MemorySpace memory_space,
172 bool boundary_check) {
173 auto scopeAttr = MemorySpaceAttr::get(context, memory_space);
175 IntegerAttr::get(IntegerType::get(context, 64), array_length);
177 return Base::get(context, scopeAttr, lengthAttr, boundaryAttr);
180bool BlockTensorDescAttr::hasDefaultsOnly() {
181 return getMemorySpace().getValue() == xegpu::MemorySpace::Global &&
182 getArrayLength().getInt() == 1 && getBoundaryCheck().getValue();
189ScatterTensorDescAttr::get(mlir::MLIRContext *context,
190 xegpu::MemorySpace memory_space,
int chunk_size) {
191 auto scopeAttr = MemorySpaceAttr::get(context, memory_space);
193 IntegerAttr::get(IntegerType::get(context, 64), chunk_size);
194 return Base::get(context, scopeAttr, chunkSizeAttr);
197LogicalResult ScatterTensorDescAttr::verify(
198 llvm::function_ref<mlir::InFlightDiagnostic()>
emitError,
199 MemorySpaceAttr memory_space, IntegerAttr chunk_size) {
200 int64_t chunkSize = chunk_size.getInt();
202 return emitError() <<
"invalid chunk size";
211LayoutAttr::verify(llvm::function_ref<mlir::InFlightDiagnostic()>
emitError,
219 if (!sg_layout && !inst_data && !lane_layout) {
221 <<
"expected at least one of sg_layout, inst_data or lane_layout";
227 if (sg_layout && inst_data && sg_layout.size() != inst_data.size()) {
229 <<
"expected sg_layout and inst_data to have the same rank";
232 if (sg_layout && lane_layout && sg_layout.size() != lane_layout.size()) {
234 <<
"expected sg_layout and lane_layout to have the same rank";
237 if (inst_data && lane_layout && inst_data.size() != lane_layout.size()) {
238 return emitError() <<
"expected inst_data and lane_layout to have the same "
239 "rank, got inst_data "
240 << inst_data.size() <<
", lane_layout "
241 << lane_layout.size();
248 return emitError() <<
"expected sg_layout being used with sg_data";
249 if (sg_data.size() != sg_layout.size())
251 <<
"expected sg_data and sg_layout to have the same rank";
258 return emitError() <<
"expected lane_layout being used with lane_data";
259 if (lane_data.size() != lane_layout.size())
261 <<
"expected lane_data and lane_layout to have the same rank";
265 if (!sg_layout && !lane_layout)
267 <<
"expected sg_layout/lane_layout being used with order";
269 if (sg_layout && order.size() != sg_layout.size())
271 <<
"expected order and sg_layout to have the same rank";
273 if (lane_layout && order.size() != lane_layout.size())
275 <<
"expected order and lane_layout to have the same rank";
281FailureOr<SmallVector<Value>>
282LayoutAttr::delinearizeId(OpBuilder &builder, Location loc, Value linearId) {
284 SmallVector<int64_t> sgLayoutInt;
285 if (isForWorkgroup()) {
286 sgLayoutInt = getEffectiveSgLayoutAsInt();
287 }
else if (isForSubgroup()) {
288 sgLayoutInt = getEffectiveLaneLayoutAsInt();
296 SmallVector<int64_t> order;
297 if (orderAttr && !orderAttr.empty()) {
298 order = llvm::to_vector(
300 [](int32_t idx) { return static_cast<int64_t>(idx); }));
303 order = llvm::to_vector(
304 llvm::reverse(llvm::seq<int64_t>(0, sgLayoutInt.size())));
307 if (order.size() != sgLayoutInt.size()) {
311 SmallVector<Value>
result(sgLayoutInt.size());
312 Value remaining = linearId;
335 for (
size_t i = 0; i < order.size(); ++i) {
336 int64_t dimIdx = order[i];
337 int64_t dimSize = sgLayoutInt[dimIdx];
340 builder.
createOrFold<arith::ConstantIndexOp>(loc, dimSize);
347 builder.
createOrFold<arith::RemUIOp>(loc, remaining, dimSizeVal);
354 if (i < order.size() - 1) {
356 builder.
createOrFold<arith::DivUIOp>(loc, remaining, dimSizeVal);
365FailureOr<SmallVector<SmallVector<Value>>>
366LayoutAttr::computeDistributedCoords(OpBuilder &builder, Location loc,
367 Value linearId, ArrayRef<int64_t> shape) {
368 SmallVector<int64_t> layout;
369 SmallVector<int64_t> subShape;
370 if (isForWorkgroup()) {
371 layout = getEffectiveSgLayoutAsInt();
372 subShape = getEffectiveSgDataAsInt();
373 }
else if (isForSubgroup()) {
374 layout = getEffectiveLaneLayoutAsInt();
375 subShape = getEffectiveLaneDataAsInt();
379 if (subShape.empty()) {
381 subShape = derivedShape.value();
387 auto maybeIds = delinearizeId(builder, loc, linearId);
390 SmallVector<Value> ids = *maybeIds;
392 return genCoordinates(builder, loc, ids, layout, subShape, shape);
395bool LayoutAttr::isEqualTo(
const xegpu::DistributeLayoutAttr &other) {
396 if (dyn_cast<xegpu::SliceAttr>(other))
399 return *
this == dyn_cast<xegpu::LayoutAttr>(other);
404 auto sgDataOpt = getSgData();
405 auto instDataOpt = getInstData();
406 auto laneDataOpt = getLaneData();
408 SmallVector<int32_t> sgData;
409 SmallVector<int32_t> instData;
410 SmallVector<int32_t> laneData;
413 sgData = llvm::to_vector(sgDataOpt.asArrayRef());
416 instData = llvm::to_vector(instDataOpt.asArrayRef());
419 laneData = llvm::to_vector(laneDataOpt.asArrayRef());
422 for (
auto dim : unitDims) {
423 if (dim <
static_cast<int64_t
>(sgData.size()))
425 if (dim <
static_cast<int64_t
>(instData.size()))
427 if (dim <
static_cast<int64_t
>(laneData.size()))
431 return LayoutAttr::get(
445 auto sgLayoutOpt = getSgLayout();
446 auto laneLayoutOpt = getLaneLayout();
448 SmallVector<int32_t> sgLayout;
449 SmallVector<int32_t> laneLayout;
452 sgLayout = llvm::to_vector(sgLayoutOpt.asArrayRef());
455 laneLayout = llvm::to_vector(laneLayoutOpt.asArrayRef());
458 for (
auto dim : unitDims) {
459 if (dim <
static_cast<int64_t
>(sgLayout.size()))
461 if (dim <
static_cast<int64_t
>(laneLayout.size()))
465 return LayoutAttr::get(
469 getSgData(), getInstData(),
472 getLaneData(), getOrder());
479SliceAttr::verify(llvm::function_ref<InFlightDiagnostic()>
emitError,
481 if (!parent || !dims)
482 return emitError() <<
"expected parent layout and dims attribute";
484 int64_t rank = parent.getRank();
487 llvm::SmallDenseSet<int64_t> seen;
489 if (dim < 0 || dim >= rank)
490 return emitError() <<
"invalid dim (" << dim <<
") in slice attribute.";
491 if (!seen.insert(dim).second)
492 return emitError() <<
"repeated dim (" << dim <<
") in slice attribute.";
497SliceAttr SliceAttr::flatten()
const {
498 xegpu::DistributeLayoutAttr parent = getParent();
499 SmallVector<DenseI64ArrayAttr> slicedDims({
getDims()});
501 while (
auto sliceAttr = dyn_cast<xegpu::SliceAttr>(parent)) {
502 parent = sliceAttr.getParent();
503 slicedDims.push_back(sliceAttr.getDims());
506 auto layoutAttr = dyn_cast<xegpu::LayoutAttr>(parent);
508 llvm::to_vector(llvm::seq<int64_t>(0, layoutAttr.getRank()));
511 SmallVector<int64_t> remainingDims(
indices);
512 for (
auto dim : llvm::reverse(slicedDims))
513 remainingDims = XeGPUDialect::slice(llvm::ArrayRef<int64_t>(remainingDims),
517 SmallVector<int64_t> flattendDims = XeGPUDialect::slice(
518 llvm::ArrayRef<int64_t>(
indices), llvm::ArrayRef<int64_t>(remainingDims));
520 return xegpu::SliceAttr::get(
525FailureOr<SmallVector<Value>>
526SliceAttr::delinearizeId(OpBuilder &builder, Location loc, Value linearId) {
527 SliceAttr attr = flatten();
528 auto parent = dyn_cast<LayoutAttr>(attr.getParent());
529 return parent.delinearizeId(builder, loc, linearId);
535FailureOr<SmallVector<SmallVector<Value>>>
536SliceAttr::computeDistributedCoords(OpBuilder &builder, Location loc,
537 Value linearId, ArrayRef<int64_t> shape) {
538 assert(getRank() ==
static_cast<int64_t
>(shape.size()) &&
"invalid shape.");
539 if (!isForWorkgroup())
542 SmallVector<int64_t> layout;
543 SmallVector<int64_t> subShape;
544 if (isForWorkgroup()) {
545 layout = getEffectiveSgLayoutAsInt();
546 subShape = getEffectiveSgDataAsInt();
547 }
else if (isForSubgroup()) {
548 layout = getEffectiveLaneLayoutAsInt();
549 subShape = getEffectiveLaneDataAsInt();
554 if (subShape.empty()) {
556 subShape = derivedShape.value();
562 auto maybeIds = delinearizeId(builder, loc, linearId);
568 ArrayRef<int64_t> dims = flatten().getDims().
asArrayRef();
569 SmallVector<Value> sgIds =
570 XeGPUDialect::slice(ArrayRef<Value>(*maybeIds), dims);
572 return genCoordinates(builder, loc, sgIds, layout, subShape, shape);
575bool SliceAttr::isSliceOf(
const xegpu::DistributeLayoutAttr &other) {
576 auto flattenedThis = flatten();
579 if (
auto otherLayout = dyn_cast<xegpu::LayoutAttr>(other))
580 return flattenedThis.getParent() == otherLayout;
582 auto flattenedOther = dyn_cast<xegpu::SliceAttr>(other).flatten();
584 if (flattenedThis.getParent() != flattenedOther.getParent())
588 llvm::SmallDenseSet<int64_t> thisDims(
589 flattenedThis.getDims().asArrayRef().begin(),
590 flattenedThis.getDims().asArrayRef().end());
591 return llvm::all_of(flattenedOther.getDims().asArrayRef(),
592 [&](int64_t dim) { return thisDims.contains(dim); });
595bool SliceAttr::isEqualTo(
const xegpu::DistributeLayoutAttr &other) {
596 if (dyn_cast<xegpu::LayoutAttr>(other))
599 auto flattenedThis = flatten();
600 auto flattenedOther = dyn_cast<xegpu::SliceAttr>(other).flatten();
602 return ((flattenedThis.getParent() == flattenedOther.getParent()) &&
603 (flattenedThis.getDims() == flattenedOther.getDims()));
612 int64_t parentRank = sliceDims.size() + unitDims.size();
613 llvm::SmallDenseSet<int64_t> slicedDimsSet(sliceDims.begin(),
616 for (
int64_t i = 0; i < parentRank; ++i) {
617 if (!slicedDimsSet.contains(i))
618 nonSlicedDims.push_back(i);
623 for (
auto dim : unitDims) {
624 if (dim <
static_cast<int64_t>(nonSlicedDims.size())) {
625 adjustUnitDims.insert(nonSlicedDims[dim]);
629 return adjustUnitDims;
634 SliceAttr attr = flatten();
636 auto parent = dyn_cast<LayoutAttr>(attr.getParent());
641 return SliceAttr::get(
getContext(), parent.setUnitDimData(adjustUnitDims),
647 SliceAttr attr = flatten();
649 auto parent = dyn_cast<LayoutAttr>(attr.getParent());
654 return SliceAttr::get(
getContext(), parent.setUnitDimLayout(adjustUnitDims),
663RangeAttr::verify(llvm::function_ref<mlir::InFlightDiagnostic()>
emitError,
664 IntegerAttr startOfRange, IntegerAttr endOfRange) {
665 if (startOfRange.getInt() >= endOfRange.getInt())
666 return emitError() <<
"'end' : " << endOfRange.getInt()
667 <<
" must be greater than 'start' : "
668 << startOfRange.getInt();
677mlir::Type TensorDescType::parse(AsmParser &parser) {
678 llvm::SmallVector<int64_t> shape;
679 mlir::Type elementType;
680 mlir::FailureOr<mlir::Attribute> encoding;
681 mlir::FailureOr<mlir::Attribute> layout;
689 parser.
emitError(shapeLoc,
"failed to parse parameter 'shape'");
694 if (mlir::failed(parser.
parseType(elementType))) {
695 parser.
emitError(elemTypeLoc,
"failed to parse parameter 'elementType'");
701 mlir::Attribute attr;
703 if (mlir::succeeded(res)) {
704 if (mlir::isa<LayoutAttr>(attr)) {
708 if (mlir::isa<BlockTensorDescAttr, ScatterTensorDescAttr>(attr)) {
721 return TensorDescType::getChecked(
723 elementType, encoding.value_or(BlockTensorDescAttr::get(ctxt)),
724 layout.value_or(mlir::Attribute()));
727void TensorDescType::print(AsmPrinter &printer)
const {
731 for (int64_t dim : shape) {
732 if (mlir::ShapedType::isDynamic(dim))
741 auto encoding = getEncoding();
742 auto blockAttr = llvm::dyn_cast_if_present<BlockTensorDescAttr>(encoding);
743 if (encoding && (!blockAttr || !blockAttr.hasDefaultsOnly()))
744 printer <<
", " << encoding;
746 if (
auto layout = getLayout())
747 printer <<
", " << layout;
752TensorDescType TensorDescType::get(llvm::ArrayRef<int64_t> shape,
753 mlir::Type elementType,
int array_length,
755 MemorySpace memory_space,
756 mlir::Attribute layout) {
758 auto attr = BlockTensorDescAttr::get(context, memory_space, array_length,
760 return Base::get(context, shape, elementType, attr, layout);
763TensorDescType TensorDescType::get(llvm::ArrayRef<int64_t> shape,
764 mlir::Type elementType,
int chunk_size,
765 MemorySpace memory_space,
766 mlir::Attribute layout) {
768 auto attr = ScatterTensorDescAttr::get(context, memory_space, chunk_size);
769 return Base::get(context, shape, elementType, attr, layout);
773TensorDescType::verify(llvm::function_ref<InFlightDiagnostic()>
emitError,
774 llvm::ArrayRef<int64_t> shape, mlir::Type elementType,
775 mlir::Attribute encoding, mlir::Attribute layout) {
776 size_t rank = shape.size();
779 return emitError() <<
"expected non-zero rank tensor";
781 auto blockAttr = mlir::dyn_cast_if_present<BlockTensorDescAttr>(encoding);
783 MemorySpaceAttr memorySpaceAttr = blockAttr.getMemorySpace();
784 if (rank > 1 && memorySpaceAttr &&
785 memorySpaceAttr.getValue() == MemorySpace::SLM)
786 return emitError() <<
"SLM is only supported for 1D block tensor";
791 int chunkAlignmentFactor =
795 auto scatterAttr = mlir::dyn_cast_if_present<ScatterTensorDescAttr>(encoding);
797 int64_t chunkSize = scatterAttr.getChunkSizeAsInt();
798 if (rank == 1 && chunkSize != 1)
799 return emitError() <<
"expected non-contiguous elements for 1D tensor";
805 if (shape.back() != chunkSize)
806 return emitError() <<
"expected last dim of tensor to match chunk size";
807 if (shape.back() % chunkAlignmentFactor != 0)
808 return emitError() <<
"expected last dim of tensor to be a multiple of "
809 << chunkAlignmentFactor;
813 auto layoutAttr = llvm::dyn_cast_if_present<LayoutAttr>(layout);
815 if (rank != (
size_t)layoutAttr.getRank())
816 return emitError() <<
"expected layout rank to match tensor rank";
818 auto laneData = layoutAttr.getLaneData();
819 if (scatterAttr && laneData) {
823 int64_t chunkSize = scatterAttr.getChunkSizeAsInt();
824 if (chunkSize > 1 && laneData[rank - 1] % chunkAlignmentFactor)
826 <<
"expected last dim of lane_data to be a multiple of: "
827 << chunkAlignmentFactor;
830 if (!XeGPUDialect::isEvenlyDistributable(shape, layoutAttr)) {
831 std::string shapeStr;
832 llvm::raw_string_ostream stream(shapeStr);
833 llvm::interleaveComma(shape, stream);
834 return emitError() <<
"cannot distribute [" << shapeStr <<
"] using "
844mlir::Type MemDescType::parse(AsmParser &parser) {
845 llvm::SmallVector<int64_t> shape;
846 mlir::Type elementType;
847 mlir::FailureOr<MemLayoutAttr> layout;
855 parser.
emitError(shapeLoc,
"failed to parse parameter 'shape'");
860 if (mlir::failed(parser.
parseType(elementType))) {
861 parser.
emitError(elemTypeLoc,
"failed to parse parameter 'elementType'");
869 if (mlir::failed(res))
879 return MemDescType::getChecked(
881 elementType, layout.value_or(MemLayoutAttr()));
884void MemDescType::print(AsmPrinter &printer)
const {
891 if (
auto layout = getMemLayout())
892 printer <<
", " << layout;
901Attribute MemLayoutAttr::parse(AsmParser &parser, Type type) {
906 llvm::SmallDenseSet<StringRef> seenKeys;
907 SmallVector<NamedAttribute> attributes;
909 auto parseElt = [&]() -> ParseResult {
912 return parser.
emitError(loc,
"expected valid attribute name");
914 if (!seenKeys.insert(nameId).second)
915 return parser.
emitError(loc,
"duplicate key '")
916 << nameId <<
" in mem layout attribute";
924 attributes.emplace_back(nameId, attr);
940 loc, context, DictionaryAttr::get(context, attributes));
943void MemLayoutAttr::print(AsmPrinter &printer)
const {
945 ArrayRef<NamedAttribute> attrs = getAttrs().getValue();
946 for (
size_t i = 0; i < attrs.size(); i++) {
947 printer << attrs[i].getName().str() <<
" = " << attrs[i].getValue();
948 if (i < attrs.size() - 1)
957template <
typename ArithOp>
962 return ArithOp::create(builder, loc, aVal, bVal).getResult();
967 genBinOp<arith::DivSIOp>(a, builder.getIndexAttr(b), loc, builder)
971 genBinOp<arith::RemSIOp>(a, builder.getIndexAttr(b), loc, builder)
975 genBinOp<arith::MulIOp>(a, builder.getIndexAttr(b), loc, builder)
978#define add(a, b) genBinOp<arith::AddIOp>(a, b, loc, builder)
987 assert(offsets.size() == blockShape.size() &&
988 "offsets and blockShape must have the same size");
992 for (
auto [offset, block] : llvm::zip(offsets, blockShape)) {
993 divs.push_back(
div(offset, block));
994 rems.push_back(
rem(offset, block));
996 blockedOffsets.append(divs.begin(), divs.end());
997 blockedOffsets.append(rems.begin(), rems.end());
999 return blockedOffsets;
1007 ArrayAttr strideAttr = getStrideAttr();
1009 for (
Attribute attr : strideAttr.getValue()) {
1010 strides.push_back(cast<IntegerAttr>(attr).getInt());
1018 llvm::to_vector<4>(llvm::seq<int>(0, strides.size()));
1019 llvm::sort(perm, [&](
int a,
int b) {
return strides[a] < strides[
b]; });
1021 assert(strides[perm[0]] == 1 &&
"inner most dim must have stride 1");
1023 SmallVector<int64_t> innerBlkStride(innerBlkShape.size());
1024 innerBlkStride[perm[0]] = 1;
1025 for (
size_t i = 1; i < perm.size(); ++i)
1026 innerBlkStride[perm[i]] =
1027 innerBlkStride[perm[i - 1]] * innerBlkShape[perm[i - 1]];
1033 SmallVector<int64_t> matrixShapeOrig(matrixShape.size());
1034 SmallVector<int64_t> BlkShapeOrig(matrixShape.size());
1035 for (
size_t i = 0; i < perm.size() - 1; ++i) {
1036 matrixShapeOrig[perm[i]] = strides[perm[i + 1]] / strides[perm[i]];
1037 BlkShapeOrig[perm[i]] = matrixShapeOrig[perm[i]] / innerBlkShape[perm[i]];
1040 int64_t innerBlkSize = 1;
1041 for (
auto s : innerBlkShape)
1044 SmallVector<int64_t> outerBlkStride(matrixShape.size());
1045 outerBlkStride[perm[0]] = innerBlkSize;
1046 for (
size_t i = 0; i < perm.size() - 1; ++i) {
1047 outerBlkStride[perm[i + 1]] =
1048 outerBlkStride[perm[i]] * BlkShapeOrig[perm[i]];
1052 SmallVector<int64_t> blockedStrides;
1053 blockedStrides.append(outerBlkStride.begin(), outerBlkStride.end());
1054 blockedStrides.append(innerBlkStride.begin(), innerBlkStride.end());
1056 return blockedStrides;
1060Value MemDescType::getLinearOffsets(OpBuilder &builder, Location loc,
1061 ArrayRef<OpFoldResult> offsets) {
1064 SmallVector<int64_t> blockShape = getBlockShape();
1065 SmallVector<int64_t> strides = getStrideShape();
1066 SmallVector<OpFoldResult> blockedOffsets;
1069 if (llvm::equal(blockShape, matrixShape)) {
1071 strides.erase(strides.begin(), strides.begin() + matrixShape.size());
1073 assert(offsets.size() == blockShape.size() &&
1074 "offsets and blockShape must have the same size");
1078 SmallVector<OpFoldResult> divs, rems;
1080 for (
auto [offset, block] : llvm::zip(offsets, blockShape)) {
1081 divs.push_back(
div(offset, block));
1082 rems.push_back(
rem(offset, block));
1084 blockedOffsets.append(divs.begin(), divs.end());
1085 blockedOffsets.append(rems.begin(), rems.end());
1086 offsets = blockedOffsets;
1091 for (
size_t i = 0; i < offsets.size(); ++i) {
1092 OpFoldResult mulResult =
mul(offsets[i], strides[i]);
1094 linearOffset = arith::AddIOp::create(builder, loc, mulVal, linearOffset);
1097 return linearOffset;
1103#include <mlir/Dialect/XeGPU/IR/XeGPUDialect.cpp.inc>
1104#define GET_ATTRDEF_CLASSES
1105#include <mlir/Dialect/XeGPU/IR/XeGPUAttrs.cpp.inc>
1106#define GET_TYPEDEF_CLASSES
1107#include <mlir/Dialect/XeGPU/IR/XeGPUTypes.cpp.inc>
static Type getElementType(Type type)
Determine the element type of type.
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
virtual ParseResult parseCommaSeparatedList(Delimiter delimiter, function_ref< ParseResult()> parseElementFn, StringRef contextMessage=StringRef())=0
Parse a list of comma-separated items with an optional delimiter.
MLIRContext * getContext() const
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual ParseResult parseLess()=0
Parse a '<' token.
virtual ParseResult parseDimensionList(SmallVectorImpl< int64_t > &dimensions, bool allowDynamic=true, bool withTrailingX=true)=0
Parse a dimension list of a tensor or memref type.
virtual ParseResult parseEqual()=0
Parse a = token.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseOptionalComma()=0
Parse a , token if present.
auto getChecked(SMLoc loc, ParamsT &&...params)
Invoke the getChecked method of the given Attribute or Type class, using the provided location to emi...
virtual SMLoc getNameLoc() const =0
Return the location of the original name token.
virtual ParseResult parseGreater()=0
Parse a '>' token.
virtual ParseResult parseType(Type &result)=0
Parse a type.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
virtual ParseResult parseAttribute(Attribute &result, Type type={})=0
Parse an arbitrary attribute of a given type and return it in result.
void printDimensionList(ArrayRef< int64_t > shape)
Attributes are known-constant values of operations.
static BoolAttr get(MLIRContext *context, bool value)
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
This class helps build Operations.
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
This class represents a single result from folding an operation.
A range-style iterator that allows for iterating over the offsets of all potential tiles of size tile...
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Specialization of arith.constant op that returns an integer of index type.
static ConstantIndexOp create(OpBuilder &builder, Location location, int64_t value)
static DenseArrayAttrImpl get(MLIRContext *context, ArrayRef< int32_t > content)
ArrayRef< T > asArrayRef() const
auto getDims(VectorType vType)
Returns a range over the dims (size and scalability) of a VectorType.
constexpr unsigned generalPackedFormatBitSize
static SetVector< int64_t > adjustUnitDimsWithSliceDims(const SetVector< int64_t > &unitDims, ArrayRef< int64_t > sliceDims)
SmallVector< OpFoldResult > getBlockedOffsets(OpBuilder &builder, Location loc, ArrayRef< OpFoldResult > offsets, ArrayRef< int64_t > blockShape)
OpFoldResult genBinOp(OpFoldResult a, OpFoldResult b, Location loc, OpBuilder &builder)
static SmallVector< SmallVector< Value > > genCoordinates(OpBuilder &builder, Location loc, SmallVector< Value > delinearizedId, ArrayRef< int64_t > subShapesLayout, ArrayRef< int64_t > subShape, ArrayRef< int64_t > srcShape)
Include the generated interface declarations.
detail::DenseArrayAttrImpl< int64_t > DenseI64ArrayAttr
SmallVector< int64_t > computeElementwiseMul(ArrayRef< int64_t > v1, ArrayRef< int64_t > v2)
Return a vector containing llvm::zip_equal(v1, v2) multiplied elementwise.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
llvm::SetVector< T, Vector, Set, N > SetVector
detail::DenseArrayAttrImpl< int32_t > DenseI32ArrayAttr
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.