25#include "llvm/ADT/STLExtras.h"
26#include "llvm/ADT/SmallVector.h"
53struct RemoveEmptyKernelEnvironment
55 using OpRewritePattern<acc::KernelEnvironmentOp>::OpRewritePattern;
57 LogicalResult matchAndRewrite(acc::KernelEnvironmentOp op,
58 PatternRewriter &rewriter)
const override {
59 assert(op->getNumRegions() == 1 &&
"expected op to have one region");
61 Block &block = op.getRegion().front();
70 if (
auto deviceTypeAttr = op.getWaitOperandsDeviceTypeAttr()) {
71 for (
auto attr : deviceTypeAttr) {
72 if (
auto dtAttr = mlir::dyn_cast<acc::DeviceTypeAttr>(attr)) {
73 if (dtAttr.getValue() != mlir::acc::DeviceType::None)
80 if (
auto hasDevnumAttr = op.getHasWaitDevnumAttr()) {
81 for (
auto attr : hasDevnumAttr) {
82 if (
auto boolAttr = mlir::dyn_cast<mlir::BoolAttr>(attr)) {
83 if (boolAttr.getValue())
90 if (
auto segmentsAttr = op.getWaitOperandsSegmentsAttr()) {
91 if (segmentsAttr.size() > 1)
97 if (!op.getWaitOperands().empty() || op.getWaitOnlyAttr())
110template <
typename EffectTy>
115 for (
unsigned i = 0, e = operand.
size(); i < e; ++i)
116 effects.emplace_back(EffectTy::get(), &operand[i]);
119template <
typename EffectTy>
124 effects.emplace_back(EffectTy::get(), mlir::cast<mlir::OpResult>(
result));
127static int64_t gpuProcessorIndex(gpu::Processor p) {
129 case gpu::Processor::Sequential:
131 case gpu::Processor::ThreadX:
133 case gpu::Processor::ThreadY:
135 case gpu::Processor::ThreadZ:
137 case gpu::Processor::BlockX:
139 case gpu::Processor::BlockY:
141 case gpu::Processor::BlockZ:
144 llvm_unreachable(
"unhandled gpu::Processor");
147static gpu::Processor indexToGpuProcessor(
int64_t idx) {
150 return gpu::Processor::Sequential;
152 return gpu::Processor::ThreadX;
154 return gpu::Processor::ThreadY;
156 return gpu::Processor::ThreadZ;
158 return gpu::Processor::BlockX;
160 return gpu::Processor::BlockY;
162 return gpu::Processor::BlockZ;
164 return gpu::Processor::Sequential;
169 return GPUParallelDimAttr::get(
170 context, IntegerAttr::get(IndexType::get(context), dimInt));
173static GPUParallelDimAttr processorParDim(
MLIRContext *context,
174 gpu::Processor proc) {
175 return GPUParallelDimAttr::get(
177 IntegerAttr::get(IndexType::get(context), gpuProcessorIndex(proc)));
180static ParseResult parseProcessorValue(
AsmParser &parser,
181 GPUParallelDimAttr &dim) {
186 auto maybeProcessor = gpu::symbolizeProcessor(keyword);
189 <<
"expected one of ::mlir::gpu::Processor enum names";
190 dim = intToParDim(parser.
getContext(), gpuProcessorIndex(*maybeProcessor));
194static void printProcessorValue(
AsmPrinter &printer,
195 const GPUParallelDimAttr &attr) {
196 gpu::Processor processor = indexToGpuProcessor(attr.getValue().getInt());
197 printer << gpu::stringifyProcessor(processor);
206void KernelEnvironmentOp::getSuccessorRegions(
216void KernelEnvironmentOp::getCanonicalizationPatterns(
218 results.
add<RemoveEmptyKernelEnvironment>(context);
221template <
typename ComputeConstructT>
223KernelEnvironmentOp::createAndPopulate(ComputeConstructT computeConstruct,
225 auto kernelEnvironment = KernelEnvironmentOp::create(
226 builder, computeConstruct->getLoc(),
227 computeConstruct.getDataClauseOperands(),
228 computeConstruct.getAsyncOperands(),
229 computeConstruct.getAsyncOperandsDeviceTypeAttr(),
230 computeConstruct.getAsyncOnlyAttr(), computeConstruct.getWaitOperands(),
231 computeConstruct.getWaitOperandsSegmentsAttr(),
232 computeConstruct.getWaitOperandsDeviceTypeAttr(),
233 computeConstruct.getHasWaitDevnumAttr(),
234 computeConstruct.getWaitOnlyAttr());
235 Block &block = kernelEnvironment.getRegion().emplaceBlock();
237 return kernelEnvironment;
240template KernelEnvironmentOp
241KernelEnvironmentOp::createAndPopulate<ParallelOp>(ParallelOp,
OpBuilder &);
242template KernelEnvironmentOp
243KernelEnvironmentOp::createAndPopulate<KernelsOp>(KernelsOp,
OpBuilder &);
244template KernelEnvironmentOp
245KernelEnvironmentOp::createAndPopulate<SerialOp>(SerialOp,
OpBuilder &);
251LogicalResult FirstprivateMapInitialOp::verify() {
253 return emitError(
"data clause associated with firstprivate operation must "
256 return emitError(
"must have var operand");
257 if (!mlir::isa<mlir::acc::PointerLikeType>(
getVar().
getType()) &&
259 return emitError(
"var must be mappable or pointer-like");
260 if (mlir::isa<mlir::acc::PointerLikeType>(
getVar().
getType()) &&
262 return emitError(
"varType must capture the element type of var");
263 if (getModifiers() != acc::DataClauseModifier::none)
264 return emitError(
"no data clause modifiers are allowed");
268void FirstprivateMapInitialOp::getEffects(
281void ReductionInitOp::getSuccessorRegions(
287void ReductionInitOp::getRegionInvocationBounds(
290 invocationBounds.emplace_back(1, 1);
297LogicalResult ReductionInitOp::verify() {
299 if (
auto yieldOp = dyn_cast<acc::YieldOp>(block.
getTerminator())) {
300 if (yieldOp.getNumOperands() != 1)
302 "region must yield exactly one value (private storage)");
304 return emitOpError(
"yielded value type must match var type");
313void ReductionCombineRegionOp::getSuccessorRegions(
319void ReductionCombineRegionOp::getRegionInvocationBounds(
322 invocationBounds.emplace_back(1, 1);
326ReductionCombineRegionOp::getSuccessorInputs(
RegionSuccessor successor) {
330LogicalResult ReductionCombineRegionOp::verify() {
332 if (
auto yieldOp = dyn_cast<acc::YieldOp>(block.
getTerminator())) {
333 if (yieldOp.getNumOperands() != 0)
334 return emitOpError(
"region must be terminated by acc.yield with no "
344void ReductionCombineOp::getEffects(
360 GPUParallelDimAttr parDim) {
361 for (
auto launchArg : op.getLaunchArgs()) {
362 auto parOp = launchArg.getDefiningOp<ParWidthOp>();
365 auto launchArgDim = cast<GPUParallelDimAttr>(parOp.getParDim());
366 if (launchArgDim == parDim)
372std::optional<Value> ComputeRegionOp::getLaunchArg(GPUParallelDimAttr parDim) {
374 return parWidthOp.getResult();
379ComputeRegionOp::getKnownLaunchArg(GPUParallelDimAttr parDim) {
381 if (parWidthOp.getLaunchArg())
382 return parWidthOp.getLaunchArg();
386std::optional<uint64_t>
387ComputeRegionOp::getKnownConstantLaunchArg(GPUParallelDimAttr parDim) {
388 auto knownParWidth = getKnownLaunchArg(parDim);
389 if (knownParWidth.has_value())
395 getInputArgsMutable().append(value);
396 return getBody()->addArgument(value.
getType(), getLoc());
399bool ComputeRegionOp::isEffectivelySerial() {
402 if (getLaunchArg(GPUParallelDimAttr::seqDim(ctx)))
405 auto checkDim = [&](GPUParallelDimAttr dim) ->
bool {
406 auto val = getKnownConstantLaunchArg(dim);
407 return val && *val == 1;
410 return checkDim(GPUParallelDimAttr::threadXDim(ctx)) &&
411 checkDim(GPUParallelDimAttr::threadYDim(ctx)) &&
412 checkDim(GPUParallelDimAttr::threadZDim(ctx)) &&
413 checkDim(GPUParallelDimAttr::blockXDim(ctx)) &&
414 checkDim(GPUParallelDimAttr::blockYDim(ctx)) &&
415 checkDim(GPUParallelDimAttr::blockZDim(ctx));
418BlockArgument ComputeRegionOp::parDimToWidth(GPUParallelDimAttr parDim) {
419 for (
auto [pos, launchArg] : llvm::enumerate(getLaunchArgs())) {
420 auto parOp = launchArg.getDefiningOp<ParWidthOp>();
422 auto launchArgDim = cast<GPUParallelDimAttr>(parOp.getParDim());
423 if (launchArgDim == parDim) {
424 assert(pos < getRegion().front().getNumArguments() &&
425 "launch arg position out of range");
426 return getRegion().front().getArgument(pos);
429 llvm_unreachable(
"attempting to get unspecified parDim");
434 for (
auto launchArg : getLaunchArgs()) {
435 auto parOp = launchArg.getDefiningOp<ParWidthOp>();
436 auto launchArgDim = cast<GPUParallelDimAttr>(parOp.getParDim());
437 int64_t dimInt = launchArgDim.getValue().getInt();
438 parDims.push_back(intToParDim(
getContext(), dimInt));
445 unsigned numLaunchArgs = getLaunchArgs().size();
446 assert(argNumber < (numLaunchArgs + getInputArgs().size()) &&
447 "invalid block argument");
448 if (argNumber < numLaunchArgs)
449 return getLaunchArgs()[argNumber];
450 return getInputArgs()[argNumber - numLaunchArgs];
453BlockArgument ComputeRegionOp::gpuParWidth(gpu::Processor processor) {
454 return parDimToWidth(GPUParallelDimAttr::get(
getContext(), processor));
457LogicalResult ComputeRegionOp::verify() {
458 for (
auto op : getLaunchArgs())
459 if (!op.getDefiningOp<acc::ParWidthOp>())
461 "launch arguments must be results of acc.par_width operations");
463 unsigned expectedBlockArgs = getLaunchArgs().size() + getInputArgs().size();
464 unsigned actualBlockArgs = getRegion().front().getNumArguments();
465 if (expectedBlockArgs != actualBlockArgs)
467 << expectedBlockArgs <<
" block arguments (launch + input), got "
474 ValueRange regionArgs = getBody()->getArguments();
478 assert(regionArgs.size() == (launchArgs.size() + inputArgs.size()) &&
479 "region args mismatch");
482 p <<
" stream(" << getStream() <<
" : " << getStream().getType() <<
")";
485 if (!launchArgs.empty()) {
487 for (
size_t j = 0;
j < launchArgs.size(); ++
j, ++i) {
488 p << regionArgs[i] <<
" = " << launchArgs[
j];
489 if (
j < launchArgs.size() - 1)
494 if (!inputArgs.empty()) {
496 for (
size_t j = 0;
j < inputArgs.size(); ++
j, ++i) {
497 p << regionArgs[i] <<
" = " << inputArgs[
j];
498 if (
j < inputArgs.size() - 1)
502 for (
size_t j = 0;
j < inputArgs.size(); ++
j) {
503 p << inputArgs[
j].getType();
504 if (
j < inputArgs.size() - 1)
513 getOperandSegmentSizeAttr());
516ParseResult ComputeRegionOp::parse(
OpAsmParser &parser,
527 bool hasStream =
false;
540 for (
size_t i = 0; i < regionArgs.size(); ++i)
541 types.push_back(indexType);
554 for (
auto [iterArg, type] : llvm::zip_equal(regionArgs, types))
561 const size_t numLaunchOperands = launchOperands.size();
562 const size_t numInputOperands = inputOperands.size();
563 assert(numLaunchOperands + numInputOperands == regionArgs.size() &&
564 "compute region args mismatch");
567 ComputeRegionOp::getOperandSegmentSizeAttr(),
569 static_cast<int32_t>(numInputOperands),
570 hasStream ? 1 : 0}));
572 for (
size_t i = 0; i < numLaunchOperands; ++i) {
577 for (
size_t i = numLaunchOperands; i < regionArgs.size(); ++i) {
578 if (parser.
resolveOperand(inputOperands[i - numLaunchOperands], types[i],
598GPUParallelDimAttr GPUParallelDimAttr::get(
MLIRContext *context,
599 gpu::Processor proc) {
600 return processorParDim(context, proc);
603GPUParallelDimAttr GPUParallelDimAttr::seqDim(
MLIRContext *context) {
604 return processorParDim(context, gpu::Processor::Sequential);
607GPUParallelDimAttr GPUParallelDimAttr::threadXDim(
MLIRContext *context) {
608 return processorParDim(context, gpu::Processor::ThreadX);
611GPUParallelDimAttr GPUParallelDimAttr::threadYDim(
MLIRContext *context) {
612 return processorParDim(context, gpu::Processor::ThreadY);
615GPUParallelDimAttr GPUParallelDimAttr::threadZDim(
MLIRContext *context) {
616 return processorParDim(context, gpu::Processor::ThreadZ);
619GPUParallelDimAttr GPUParallelDimAttr::blockXDim(
MLIRContext *context) {
620 return processorParDim(context, gpu::Processor::BlockX);
623GPUParallelDimAttr GPUParallelDimAttr::blockYDim(
MLIRContext *context) {
624 return processorParDim(context, gpu::Processor::BlockY);
627GPUParallelDimAttr GPUParallelDimAttr::blockZDim(
MLIRContext *context) {
628 return processorParDim(context, gpu::Processor::BlockZ);
632 GPUParallelDimAttr dim;
633 if (parser.
parseLess() || parseProcessorValue(parser, dim) ||
636 "expected format `<` processor_name `>`");
642void GPUParallelDimAttr::print(
AsmPrinter &printer)
const {
644 printProcessorValue(printer, *
this);
648GPUParallelDimAttr GPUParallelDimAttr::threadDim(
MLIRContext *context,
650 assert(
index <= 2 &&
"thread dimension index must be 0, 1, or 2");
653 return threadXDim(context);
655 return threadYDim(context);
657 return threadZDim(context);
659 llvm_unreachable(
"validated thread dimension index");
662GPUParallelDimAttr GPUParallelDimAttr::blockDim(
MLIRContext *context,
664 assert(
index <= 2 &&
"block dimension index must be 0, 1, or 2");
667 return blockXDim(context);
669 return blockYDim(context);
671 return blockZDim(context);
673 llvm_unreachable(
"validated block dimension index");
676gpu::Processor GPUParallelDimAttr::getProcessor()
const {
677 return indexToGpuProcessor(getValue().getInt());
680int GPUParallelDimAttr::getOrder()
const {
681 return gpuProcessorIndex(getProcessor());
684GPUParallelDimAttr GPUParallelDimAttr::getOneHigher()
const {
685 int order = getOrder();
691GPUParallelDimAttr GPUParallelDimAttr::getOneLower()
const {
692 int order = getOrder();
698bool GPUParallelDimAttr::isSeq()
const {
699 return getProcessor() == gpu::Processor::Sequential;
701bool GPUParallelDimAttr::isThreadX()
const {
702 return getProcessor() == gpu::Processor::ThreadX;
704bool GPUParallelDimAttr::isThreadY()
const {
705 return getProcessor() == gpu::Processor::ThreadY;
707bool GPUParallelDimAttr::isThreadZ()
const {
708 return getProcessor() == gpu::Processor::ThreadZ;
710bool GPUParallelDimAttr::isBlockX()
const {
711 return getProcessor() == gpu::Processor::BlockX;
713bool GPUParallelDimAttr::isBlockY()
const {
714 return getProcessor() == gpu::Processor::BlockY;
716bool GPUParallelDimAttr::isBlockZ()
const {
717 return getProcessor() == gpu::Processor::BlockZ;
719bool GPUParallelDimAttr::isAnyThread()
const {
720 return isThreadX() || isThreadY() || isThreadZ();
722bool GPUParallelDimAttr::isAnyBlock()
const {
723 return isBlockX() || isBlockY() || isBlockZ();
730GPUParallelDimsAttr GPUParallelDimsAttr::seq(
MLIRContext *ctx) {
731 return GPUParallelDimsAttr::get(ctx, {GPUParallelDimAttr::seqDim(ctx)});
734bool GPUParallelDimsAttr::isSeq()
const {
735 assert(!getArray().empty() &&
"no par_dims found");
736 if (getArray().size() == 1) {
737 auto parDim = dyn_cast<GPUParallelDimAttr>(getArray()[0]);
738 assert(parDim &&
"expected GPUParallelDimAttr");
739 return parDim.isSeq();
744bool GPUParallelDimsAttr::isParallel()
const {
return !isSeq(); }
746bool GPUParallelDimsAttr::isMultiDim()
const {
return getArray().size() > 1; }
748bool GPUParallelDimsAttr::hasAnyBlockLevel()
const {
750 getArray(), [](
const GPUParallelDimAttr &p) {
return p.isAnyBlock(); });
753bool GPUParallelDimsAttr::hasOnlyBlockLevel()
const {
754 return !getArray().empty() &&
755 llvm::all_of(getArray(), [](
const GPUParallelDimAttr &p) {
756 return p.isAnyBlock();
760bool GPUParallelDimsAttr::hasOnlyThreadYLevel()
const {
761 return !getArray().empty() &&
762 llvm::all_of(getArray(), [](
const GPUParallelDimAttr &p) {
763 return p.isThreadY();
767bool GPUParallelDimsAttr::hasOnlyThreadXLevel()
const {
768 return !getArray().empty() &&
769 llvm::all_of(getArray(), [](
const GPUParallelDimAttr &p) {
770 return p.isThreadX();
777 auto parseParDim = [&]() -> ParseResult {
778 GPUParallelDimAttr dim;
779 if (parseProcessorValue(parser, dim))
781 parDims.push_back(dim);
785 "list of OpenACC GPU parallel dimensions"))
787 return GPUParallelDimsAttr::get(parser.
getContext(), parDims);
790void GPUParallelDimsAttr::print(
AsmPrinter &printer)
const {
792 llvm::interleaveComma(getArray(), printer,
793 [&printer](
const GPUParallelDimAttr &p) {
794 printProcessorValue(printer, p);
p<< " : "<< getMemRefType()<< ", "<< getType();}static LogicalResult verifyVectorMemoryOp(Operation *op, MemRefType memrefType, VectorType vectorType) { if(memrefType.getElementType() !=vectorType.getElementType()) return op-> emitOpError("requires memref and vector types of the same elemental type")
Given a list of lists of parsed operands, populates uniqueOperands with unique operands.
static void addOperandEffect(SmallVectorImpl< SideEffects::EffectInstance< MemoryEffects::Effect > > &effects, MutableOperandRange operand)
Helper to add an effect on an operand, referenced by its mutable range.
static void addResultEffect(SmallVectorImpl< SideEffects::EffectInstance< MemoryEffects::Effect > > &effects, Value result)
Helper to add an effect on a result value.
static void getSingleRegionOpSuccessorRegions(Operation *op, Region ®ion, RegionBranchPoint point, SmallVectorImpl< RegionSuccessor > ®ions)
Generic helper for single-region OpenACC ops that execute their body once and then return to the pare...
static ValueRange getSingleRegionSuccessorInputs(Operation *op, RegionSuccessor successor)
static ParWidthOp getParWidthOpForLaunchArg(ComputeRegionOp op, GPUParallelDimAttr parDim)
This base class exposes generic asm parser hooks, usable across the various derived parsers.
@ Square
Square brackets surrounding zero or more operands.
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseCommaSeparatedList(Delimiter delimiter, function_ref< ParseResult()> parseElementFn, StringRef contextMessage=StringRef())=0
Parse a list of comma-separated items with an optional delimiter.
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
virtual ParseResult parseOptionalKeyword(StringRef keyword)=0
Parse the given keyword if present.
MLIRContext * getContext() const
virtual ParseResult parseRParen()=0
Parse a ) token.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
ParseResult parseKeywordOrString(std::string *result)
Parse a keyword or a quoted string.
virtual ParseResult parseLess()=0
Parse a '<' token.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseColon()=0
Parse a : token.
virtual ParseResult parseGreater()=0
Parse a '>' token.
virtual ParseResult parseLParen()=0
Parse a ( token.
virtual ParseResult parseType(Type &result)=0
Parse a type.
virtual ParseResult parseOptionalArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an optional arrow followed by a type list.
ParseResult parseTypeList(SmallVectorImpl< Type > &result)
Parse a type list.
This base class exposes generic asm printer hooks, usable across the various derived printers.
void printOptionalArrowTypeList(TypeRange &&types)
Print an optional arrow followed by a type list.
Attributes are known-constant values of operations.
This class represents an argument of a Block.
unsigned getArgNumber() const
Returns the number of this argument.
Block represents an ordered list of Operations.
Operation * getTerminator()
Get the terminator operation of this block.
DenseI32ArrayAttr getDenseI32ArrayAttr(ArrayRef< int32_t > values)
MLIRContext is the top-level object for a collection of MLIR operations.
This class provides a mutable adaptor for a range of operands.
unsigned size() const
Returns the current size of the range.
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
virtual ParseResult parseRegion(Region ®ion, ArrayRef< Argument > arguments={}, bool enableNameShadowing=false)=0
Parses a region.
ParseResult parseAssignmentList(SmallVectorImpl< Argument > &lhs, SmallVectorImpl< UnresolvedOperand > &rhs)
Parse a list of assignments of the form (x1 = y1, x2 = y2, ...)
virtual ParseResult resolveOperand(const UnresolvedOperand &operand, Type type, SmallVectorImpl< Value > &result)=0
Resolve an operand to an SSA value, emitting an error on failure.
virtual ParseResult parseOperand(UnresolvedOperand &result, bool allowResultNumber=true)=0
Parse a single SSA value operand name along with a result number if allowResultNumber is true.
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
virtual void printRegion(Region &blocks, bool printEntryBlockArgs=true, bool printBlockTerminators=true, bool printEmptyBlock=false)=0
Prints a region.
This class helps build Operations.
void setInsertionPointToStart(Block *block)
Sets the insertion point to the start of the specified block.
Operation is the basic unit of execution within MLIR.
result_range getResults()
This class represents a point being branched from in the methods of the RegionBranchOpInterface.
bool isParent() const
Returns true if branching from the parent op.
This class represents a successor of a region.
static RegionSuccessor parent()
Initialize a successor that branches after/out of the parent operation.
bool isParent() const
Return true if the successor is the parent operation.
This class contains a list of basic blocks and a link to the parent operation it is attached to.
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class represents a specific instance of an effect.
static DerivedEffect * get()
static CurrentDeviceIdResource * get()
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
mlir::Value getAccVar(mlir::Operation *accDataClauseOp)
Used to obtain the accVar from a data clause operation.
mlir::Value getVar(mlir::Operation *accDataClauseOp)
Used to obtain the var from a data clause operation.
std::optional< mlir::acc::DataClause > getDataClause(mlir::Operation *accDataEntryOp)
Used to obtain the dataClause from a data entry operation.
mlir::Type getVarType(mlir::Operation *accDataClauseOp)
Used to obtains the varType from a data clause operation which records the type of variable.
Include the generated interface declarations.
std::optional< int64_t > getConstantIntValue(OpFoldResult ofr)
If ofr is a constant integer or an IntegerAttr, return the integer.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
This is the representation of an operand reference.
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
This represents an operation in an abstracted form, suitable for use with the builder APIs.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.