17#include "llvm/Support/Debug.h"
19#define DEBUG_TYPE "xegpu"
25static std::string
makeString(T array,
bool breakline =
false) {
28 llvm::raw_string_ostream os(buf);
30 for (
size_t i = 1; i < array.size(); i++) {
31 os << array[i - 1] <<
", ";
35 os << array.back() <<
"]";
41 if (
auto ty = llvm::dyn_cast<ShapedType>(type))
51 auto kind = attr.getValue();
52 return kind == CachePolicy::CACHED || kind == CachePolicy::UNCACHED ||
53 kind == CachePolicy::STREAMING || kind == CachePolicy::READ_INVALIDATE;
59 auto kind = attr.getValue();
60 return kind == CachePolicy::CACHED || kind == CachePolicy::UNCACHED ||
61 kind == CachePolicy::WRITE_BACK || kind == CachePolicy::WRITE_THROUGH;
66 VectorType valueTy,
int64_t chunkSize,
69 auto maskVecTy = dyn_cast<VectorType>(maskTy);
70 auto offsetsVecTy = dyn_cast<VectorType>(offsetsTy);
73 return emitError() <<
"Expecting chunk size == 1 for scalar result";
74 if (maskVecTy || offsetsVecTy)
75 return emitError() <<
"Expecting scalar mask and offsets.";
76 else if (maskVecTy && offsetsVecTy)
77 return emitError() <<
"Expecting a vector type result.";
81 auto valueSize = valueTy.getNumElements();
83 if (!maskVecTy && !offsetsVecTy) {
84 if (valueSize != chunkSize)
85 return emitError() <<
"value elements must match chunk size "
93 return emitError() <<
"Expecting a vector type mask.";
94 int64_t maskSize = maskVecTy.getNumElements();
97 if ((valueTy.getRank() == 1) && (valueSize != chunkSize))
98 return emitError() <<
"value elements must match chunk size "
101 if (valueSize != maskSize)
103 <<
"Mask should match value except the chunk size dim.";
109 expectedMaskShape.pop_back();
110 if (expectedMaskShape != maskShape)
111 return emitError() <<
"Mask should match value except the chunk size dim.";
118 UnitAttr subgroup_block_io, DistributeLayoutAttr layout,
122 if (subgroup_block_io)
123 return emitError() <<
"subgroup_block_io "
124 "are only allowed when result is a VectorType.";
129 if (mdescTy.getRank() < 2)
130 return emitError() <<
"mem_desc must be 2D or greater.";
136 ArrayAttr strideAttr = mdescTy.getStrideAttr();
138 for (
Attribute attr : strideAttr.getValue()) {
139 strides.push_back(cast<IntegerAttr>(attr).getInt());
141 if (subgroup_block_io && layout) {
142 auto laneData = layout.getEffectiveLaneDataAsInt();
143 auto laneLayout = layout.getEffectiveLaneLayoutAsInt();
144 if (!laneData.empty()) {
145 bool isLaneDataContiguous =
146 std::all_of(laneData.begin(), std::prev(laneData.end()),
147 [](
int x) { return x == 1; });
148 if (!isLaneDataContiguous)
149 return emitError() <<
"With subgroup_block_io, accessed data must be "
150 "contiguous and coalesced.";
151 for (
size_t i = 0; i < laneData.size(); ++i) {
152 if (laneLayout[i] != blockShape[i])
153 return emitError() <<
"With subgroup_block_io, the block shape must "
154 "match the lane layout.";
155 if (laneLayout[i] != 1 && strides[i] != 1)
156 return emitError() <<
"With subgroup_block_io, the distributed "
157 "dimensions must be contiguous.";
162 if (layout && !layout.isDistributable(
164 return emitError() <<
"Value shape is not distributable with the layout";
166 if (dataShape.size() == 2) {
167 if (llvm::any_of(llvm::zip_equal(dataShape, mdescShape),
168 [](
auto p) {
return std::get<0>(p) > std::get<1>(p); }))
169 return emitError() <<
"data shape must not exceed mem_desc shape.";
173 if (subgroup_block_io && !blockShape.size())
174 return emitError() <<
"mem_desc must have block attribute when "
175 "subgroup_block_io is set.";
178 if (subgroup_block_io && mdescTy.isColMajor())
179 return emitError() <<
"mem_desc should be row major when "
180 "subgroup_block_io is set.";
192 [[maybe_unused]]
auto ty = source.getType();
193 assert(ty.hasStaticShape() &&
"expecting a memref with static shape");
195 build(builder, state, tdesc, source,
ValueRange({}) ,
206 assert((isa<IntegerType, MemRefType>(srcTy)) &&
207 "Source has to be either int or memref.");
221 if (
auto memrefTy = dyn_cast<MemRefType>(srcTy)) {
222 auto memrefShape = memrefTy.getShape();
223 auto [memrefStrides, _] = memrefTy.getStridesAndOffset();
228 if (staticShape == memrefShape && staticStrides == memrefStrides &&
229 dynamicShape.empty() && dynamicStrides.empty()) {
235 build(builder, state, tdesc, source, dynamicShape, dynamicStrides,
236 staticShapeAttr, staticStridesAttr);
239LogicalResult CreateNdDescOp::verify() {
241 bool invalidRank = rank != getMixedStrides().size();
242 bool invalidElemTy =
false;
248 auto srcMemorySpace = getSourceMemorySpace();
249 auto tdescMemorySpace =
static_cast<unsigned>(
getType().getMemorySpace());
250 if (srcMemorySpace != tdescMemorySpace)
252 <<
" Source: " << srcMemorySpace
253 <<
", TensorDesc: " << tdescMemorySpace;
257 if (
auto memrefTy = dyn_cast<MemRefType>(getSourceType()))
260 if (llvm::isa<IntegerType>(getSourceType())) {
263 return emitOpError(
"expecting strides and shape to be present for "
269 "Expecting the rank of shape, strides, and source (if source "
270 "is a memref) should match with each other.");
274 return emitOpError(
"Expecting the TensorDesc rank is not greater than the "
275 "ranks of shape, strides or the memref source.");
278 return emitOpError(
"TensorDesc should have the same element "
279 "type with the source if it is a memref.\n");
290 xegpu::CachePolicyAttr l1_hint,
291 xegpu::CachePolicyAttr l2_hint,
292 xegpu::CachePolicyAttr l3_hint,
293 xegpu::DistributeLayoutAttr layout) {
300 build(builder, state, tensorDesc, dynamicOffsets, staticOffsetsAttr, l1_hint,
301 l2_hint, l3_hint, layout);
304LogicalResult PrefetchNdOp::verify() {
305 auto tdescTy = getTensorDescType();
308 return emitOpError(
"invalid l1_hint: ") << getL1HintAttr();
311 return emitOpError(
"invalid l2_hint: ") << getL2HintAttr();
314 return emitOpError(
"invalid l3_hint: ") << getL3HintAttr();
316 int64_t tDescRank = tdescTy.getRank();
317 int64_t offsetSize = getMixedOffsets().size();
318 if (offsetSize != tDescRank)
320 "Mismatched ranks between offsets and tensor descriptor");
322 if (
auto layout = getAnchorLayout()) {
323 if (!layout.isDistributable(
getShapeOf(tdescTy)))
325 "TensorDesc shape is not distributable with the layout");
338 xegpu::CachePolicyAttr l1_hint,
339 xegpu::CachePolicyAttr l2_hint,
340 xegpu::CachePolicyAttr l3_hint,
341 xegpu::DistributeLayoutAttr layout) {
348 build(builder, state, retType, tensorDesc, dynamicOffsets, staticOffsetsAttr,
349 packed, transpose, l1_hint, l2_hint, l3_hint,
353LogicalResult LoadNdOp::verify() {
354 auto tdescTy = getTensorDescType();
357 if (tdescTy.getRank() > 2)
358 return emitOpError(
"Expects a 1D or 2D TensorDesc.\n");
361 return emitOpError(
"Invalid result, it should be a VectorType.\n");
364 return emitOpError(
"invalid l1_hint: ") << getL1HintAttr();
367 return emitOpError(
"invalid l2_hint: ") << getL2HintAttr();
370 return emitOpError(
"invalid l3_hint: ") << getL3HintAttr();
372 int tdescElems = tdescTy.getNumElements() * tdescTy.getArrayLength();
373 int valueElems = valueTy.getNumElements();
378 if (valueElems < tdescElems && valueTy.getRank() == 1) {
380 if (tdescTy.getLayoutAttr())
382 <<
"TensorDesc doesn't need LayoutAttr for SIMT code";
387 if (tdescElems % valueElems)
390 <<
" is not a valid distribution for tensor descriptor "
400 if (getTranspose()) {
401 auto trans = getTranspose().value();
403 if (llvm::all_of(trans, [&](
size_t s) {
return s < tdescShape.size(); }))
410 if (tdescTy.getRank() == 2) {
412 auto vnni_factor = valueShape.back();
413 tdescShape[axis] /= vnni_factor;
414 tdescShape.push_back(vnni_factor);
417 <<
"Invalid Packed Attr. It is ignored (available for 2D "
427 auto array_len = tdescTy.getArrayLength();
430 if (array_len > 1 && !tdescShape.empty()) {
431 stacked2DShape[0] *= array_len;
432 threeDShape.insert(threeDShape.begin(), array_len);
435 if (valueShape != stacked2DShape && valueShape != threeDShape)
437 <<
" is not consistent with tensor descriptor "
440 int64_t tDescRank = tdescTy.getRank();
441 int64_t offsetSize = getMixedOffsets().size();
442 if (offsetSize != tDescRank)
444 "Mismatched ranks between offsets and tensor descriptor");
446 if (
auto layout = getAnchorLayout()) {
447 if (!layout.isDistributable(
getShapeOf(tdescTy)))
449 "TensorDesc shape is not distributable with the layout");
461 xegpu::CachePolicyAttr l1_hint,
462 xegpu::CachePolicyAttr l2_hint,
463 xegpu::CachePolicyAttr l3_hint,
464 xegpu::DistributeLayoutAttr layout) {
471 build(builder, state, value, tensorDesc, dynamicOffsets, staticOffsetsAttr,
472 l1_hint, l2_hint, l3_hint, layout);
475LogicalResult StoreNdOp::verify() {
476 auto dstTy = getTensorDescType();
479 if (dstTy.getRank() > 2)
480 return emitOpError(
"Expects a 1D or 2D TensorDesc.\n");
483 return emitOpError(
"Expecting a VectorType result.\n");
486 return emitOpError(
"invalid l1_hint: ") << getL1HintAttr();
489 return emitOpError(
"invalid l2_hint: ") << getL2HintAttr();
492 return emitOpError(
"invalid l3_hint: ") << getL3HintAttr();
494 auto array_len = dstTy.getArrayLength();
496 return emitOpError(
"array length is not supported by store_nd.\n");
498 auto tdescElems = dstTy.getNumElements();
499 auto valueElems = valTy.getNumElements();
504 if (valTy.getRank() == 1 && valueElems < tdescElems) {
506 if (dstTy.getLayoutAttr())
508 <<
"TensorDesc doesn't need LayoutAttr for SIMT code";
510 if (tdescElems % valueElems)
513 <<
" is not a valid distribution for tensor descriptor " << dstTy;
521 if (tdescShape != valueShape)
523 <<
" is not consistent with tensor descriptor "
526 int64_t tDescRank = dstTy.getRank();
527 int64_t offsetSize = getMixedOffsets().size();
528 if (offsetSize != tDescRank)
530 "Mismatched ranks between offsets and tensor descriptor");
532 if (
auto layout = getAnchorLayout()) {
533 if (!layout.isDistributable(tdescShape))
535 "TensorDesc shape is not distributable with the layout");
544LogicalResult PrefetchOp::verify() {
546 return emitOpError(
"invalid l1_hint: ") << getL1HintAttr();
549 return emitOpError(
"invalid l2_hint: ") << getL2HintAttr();
552 return emitOpError(
"invalid l3_hint: ") << getL3HintAttr();
554 auto srcTy = getSourceType();
555 if (srcTy.
isInteger() && !getOffsetAlignByteAttr())
556 return emitOpError(
"offset_align_byte is required with integer source.");
558 if (getOffsetAlignByteAttr() && !srcTy.
isInteger())
559 return emitOpError(
"offset_align_byte only allowed with integer source.");
561 if (
auto layout = getAnchorLayout()) {
563 auto offsetsTy = getOffsets().getType();
564 if (llvm::isa<VectorType>(offsetsTy) &&
565 !layout.isDistributable(
getShapeOf(offsetsTy)))
566 return emitOpError(
"offset shape is not distributable with the layout");
575LogicalResult LoadGatherOp::verify() {
576 auto maskTy = getMaskType();
580 return emitOpError(
"invalid l1_hint: ") << getL1HintAttr();
583 return emitOpError(
"invalid l2_hint: ") << getL2HintAttr();
586 return emitOpError(
"invalid l3_hint: ") << getL3HintAttr();
588 auto srcTy = getSourceType();
589 uint64_t chunkSize =
static_cast<int64_t>(getChunkSize().value_or(1));
590 auto memTy = dyn_cast<MemRefType>(srcTy);
593 return emitError() <<
"Value should have the same element type as MemRef.";
595 if (
auto layout = getAnchorLayout()) {
596 if (!layout.isDistributable(
getShapeOf(valueTy)))
597 return emitOpError(
"Value shape is not distributable with the layout");
600 auto offsetsTy = getOffsets().getType();
608 IntegerAttr chunk_size, xegpu::CachePolicyAttr l1_hint,
609 xegpu::CachePolicyAttr l2_hint,
610 xegpu::CachePolicyAttr l3_hint) {
611 auto loc = source.
getLoc();
613 auto type = VectorType::get(size, builder.
getIndexType());
615 auto offset = vector::FromElementsOp::create(builder, loc, type, values);
617 build(builder, state, valueType, source, offset, mask, chunk_size, l1_hint,
618 l2_hint, l3_hint,
nullptr);
624 IntegerAttr chunk_size, xegpu::CachePolicyAttr l1_hint,
625 xegpu::CachePolicyAttr l2_hint,
626 xegpu::CachePolicyAttr l3_hint,
627 DistributeLayoutAttr layout) {
628 auto loc = source.
getLoc();
630 auto type = VectorType::get(size, builder.
getIndexType());
632 auto offset = vector::FromElementsOp::create(builder, loc, type, values);
634 build(builder, state, valueType, source, offset, mask, chunk_size, l1_hint,
635 l2_hint, l3_hint, layout);
641LogicalResult StoreScatterOp::verify() {
642 auto maskTy = getMaskType();
646 return emitOpError(
"invalid l1_hint: ") << getL1HintAttr();
649 return emitOpError(
"invalid l2_hint: ") << getL2HintAttr();
652 return emitOpError(
"invalid l3_hint: ") << getL3HintAttr();
654 auto destTy = getDestType();
655 uint64_t chunkSize =
static_cast<int64_t>(getChunkSize().value_or(1));
656 auto memTy = dyn_cast<MemRefType>(destTy);
659 return emitError() <<
"Value should have the same element type as MemRef.";
661 if (
auto layout = getAnchorLayout()) {
662 if (!layout.isDistributable(
getShapeOf(valueTy)))
663 return emitOpError(
"Value shape is not distributable with the layout");
666 auto offsetsTy = getOffsets().getType();
674 IntegerAttr chunk_size,
675 xegpu::CachePolicyAttr l1_hint,
676 xegpu::CachePolicyAttr l2_hint,
677 xegpu::CachePolicyAttr l3_hint) {
680 auto type = VectorType::get(size, builder.
getIndexType());
682 auto offset = vector::FromElementsOp::create(builder, loc, type, values);
685 build(builder, state, value, dest, offset, mask, chunk_size, l1_hint, l2_hint,
689void StoreScatterOp::build(
692 xegpu::CachePolicyAttr l1_hint, xegpu::CachePolicyAttr l2_hint,
693 xegpu::CachePolicyAttr l3_hint, DistributeLayoutAttr layout) {
696 auto type = VectorType::get(size, builder.
getIndexType());
698 auto offset = vector::FromElementsOp::create(builder, loc, type, values);
701 build(builder, state, value, dest, offset, mask, chunk_size, l1_hint, l2_hint,
712 std::optional<DistributeLayoutAttr> layout,
714 if (layout && !layout->isDistributable(
717 <<
" shape is not distributable with the layout";
727 auto aRank = aShape.size();
728 auto bRank = bShape.size();
729 auto resRank = resShape.size();
730 if (aRank == 1 && bRank == 1 && resRank == 1)
735 return op->
emitOpError(
"A operand must be a 2D vector.");
736 if (bRank < 2 || bRank > 3)
737 return op->
emitOpError(
"B operand must be a 2D or 3D vector.");
739 return op->
emitOpError(
"Result must be a 2D vector.");
742 int64_t bK = bRank == 3 ? bShape[0] * bShape[2] : bShape[0];
746 return op->
emitOpError(
"K-dimension mismatch: A has K=")
747 << aShape[1] <<
" but B has K=" << bK <<
".";
750 if (aShape[0] != resShape[0])
751 return op->
emitOpError(
"M-dimension mismatch: A has M=")
752 << aShape[0] <<
" but result has M=" << resShape[0] <<
".";
755 if (bShape[1] != resShape[1])
756 return op->
emitOpError(
"N-dimension mismatch: B has N=")
757 << bShape[1] <<
" but result has N=" << resShape[1] <<
".";
765 if (accType != resultType)
766 return op->
emitOpError(
"Accumulator type must match result type.");
773LogicalResult DpasOp::verify() {
774 auto lhsShape = getLhsType().getShape();
775 auto rhsShape = getRhsType().getShape();
776 auto resShape = getResultType().getShape();
798LogicalResult ConvertLayoutOp::verify() {
799 auto srcLayout = getInputLayout();
800 auto resLayout = getTargetLayout();
808 if ((!srcLayout.isForWorkgroup() || !resLayout.isForWorkgroup()) &&
809 (!srcLayout.isForSubgroup() || !resLayout.isForSubgroup()))
810 return emitOpError(
"expected input layout and target layout be WgLayout or "
811 "SgLayout at the same time.");
813 Type srcType = getSource().getType();
814 if (llvm::isa<VectorType>(srcType)) {
816 if (!srcLayout.isDistributable(
shape))
818 "invalid input layout, data cannot be evenly distributed.");
820 if (!resLayout.isDistributable(
shape))
822 "invalid target layout, data cannot be evenly distributed.");
824 return mlir::success();
833 DistributeLayoutAttr layout) {
840 build(builder, state, res, memDesc, dynamicOffsets, staticOffsetsAttr,
844LogicalResult LoadMatrixOp::verify() {
846 auto resTy = dyn_cast<VectorType>(getRes().
getType());
847 UnitAttr subgroup_block_io = getSubgroupBlockIoAttr();
848 MemDescType mdescTy = getMemDesc().getType();
851 getLayoutAttr(), [&]() {
return emitError(); });
860 DistributeLayoutAttr layout) {
865 build(builder, state, data, memDesc, dynamicOffsets, staticOffsetsAttr,
869LogicalResult StoreMatrixOp::verify() {
871 auto dataTy = dyn_cast<VectorType>(getData().
getType());
872 UnitAttr subgroup_block_io = getSubgroupBlockIoAttr();
873 MemDescType mdescTy = getMemDesc().getType();
875 getLayoutAttr(), [&]() {
return emitError(); });
882LogicalResult TruncfOp::verify() {
883 auto sourceVecType = dyn_cast<VectorType>(getSource().
getType());
884 auto resultVecType = dyn_cast<VectorType>(getResult().
getType());
886 if (sourceVecType.getElementTypeBitWidth() <=
887 resultVecType.getElementTypeBitWidth())
888 return emitOpError(
"input type must be wider than result type.");
897LogicalResult DpasMxOp::verify() {
898 auto aShape = getAType().getShape();
899 auto bShape = getBType().getShape();
900 auto resShape = getResultType().getShape();
922 auto scaleAVecType = dyn_cast<VectorType>(getScaleAType());
925 auto scaleAShape = scaleAVecType.getShape();
927 if (scaleAVecType.getRank() != 2)
928 return emitOpError(
"Scale A must be a 2D vector when not a scalar.");
932 scaleAShape,
"ScaleA")))
936 if (scaleAShape[0] != aShape[0])
938 << scaleAShape[0] <<
"] must match A M dimension [" << aShape[0]
945 auto scaleBVecType = dyn_cast<VectorType>(getScaleBType());
948 auto scaleBShape = scaleBVecType.getShape();
950 if (scaleBVecType.getRank() != 2)
951 return emitOpError(
"Scale B must be a 2D vector when not a scalar.");
955 scaleBShape,
"ScaleB")))
959 if (scaleBShape[1] != bShape[1])
961 << scaleBShape[1] <<
"] must match B N dimension [" << bShape[1]
968 if (getScaleA() && getScaleB()) {
969 auto scaleAVecType = dyn_cast<VectorType>(getScaleAType());
970 auto scaleBVecType = dyn_cast<VectorType>(getScaleBType());
972 if (scaleAVecType && scaleBVecType) {
973 auto scaleAShape = scaleAVecType.getShape();
974 auto scaleBShape = scaleBVecType.getShape();
978 if (scaleAShape[1] != scaleBShape[0])
979 return emitOpError(
"Scale K dimension mismatch: scale_a has K=")
980 << scaleAShape[1] <<
" but scale_b has K=" << scaleBShape[0]
989#include <mlir/Dialect/XeGPU/IR/XeGPUAttrInterface.cpp.inc>
991#include <mlir/Dialect/XeGPU/IR/XeGPUEnums.cpp.inc>
992#define GET_OP_CLASSES
993#include <mlir/Dialect/XeGPU/IR/XeGPU.cpp.inc>
p<< " : "<< getMemRefType()<< ", "<< getType();}static LogicalResult verifyVectorMemoryOp(Operation *op, MemRefType memrefType, VectorType vectorType) { if(memrefType.getElementType() !=vectorType.getElementType()) return op-> emitOpError("requires memref and vector types of the same elemental type")
Given a list of lists of parsed operands, populates uniqueOperands with unique operands.
static Type getElementType(Type type)
Determine the element type of type.
static Type getValueType(Attribute attr)
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
static SmallVector< int64_t > getShapeOf(Type type)
static LogicalResult verifyDpasAccumulator(Operation *op, Type accType, Type resultType)
LogicalResult IsValidMatrixOpParams(VectorType dataTy, MemDescType mdescTy, UnitAttr subgroup_block_io, DistributeLayoutAttr layout, function_ref< InFlightDiagnostic()> emitError)
static std::string makeString(T array, bool breakline=false)
static bool isWriteHintOrNone(const CachePolicyAttr &attr)
static bool isReadHintOrNone(const CachePolicyAttr &attr)
static LogicalResult isValidGatherScatterBufferParams(Type offsetsTy, Type maskTy, VectorType valueTy, int64_t chunkSize, function_ref< InFlightDiagnostic()> emitError)
static LogicalResult verifyDpasDimensions(Operation *op, ArrayRef< int64_t > aShape, ArrayRef< int64_t > bShape, ArrayRef< int64_t > resShape)
static LogicalResult verifyLayoutDistributable(Operation *op, std::optional< DistributeLayoutAttr > layout, ArrayRef< int64_t > shape, StringRef operandName)
Attributes are known-constant values of operations.
DenseI64ArrayAttr getDenseI64ArrayAttr(ArrayRef< int64_t > values)
This class represents a diagnostic that is inflight and set to be reported.
This class helps build Operations.
Operation is the basic unit of execution within MLIR.
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isInteger() const
Return true if this is an integer type (with the specified width).
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
Location getLoc() const
Return the location of this value.
SmallVector< OpFoldResult > getMixedSizes(OpBuilder &builder, Location loc, Value value)
Return the dimensions of the given memref value.
Include the generated interface declarations.
InFlightDiagnostic emitWarning(Location loc)
Utility method to emit a warning message using this location.
detail::DenseArrayAttrImpl< int64_t > DenseI64ArrayAttr
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
SmallVector< T > applyPermutation(ArrayRef< T > input, ArrayRef< int64_t > permutation)
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
void dispatchIndexOpFoldResults(ArrayRef< OpFoldResult > ofrs, SmallVectorImpl< Value > &dynamicVec, SmallVectorImpl< int64_t > &staticVec)
Helper function to dispatch multiple OpFoldResults according to the behavior of dispatchIndexOpFoldRe...
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
llvm::function_ref< Fn > function_ref
This represents an operation in an abstracted form, suitable for use with the builder APIs.