MLIR
22.0.0git
|
Specialization of arith.constant
op that returns an integer of index type.
More...
#include "mlir/Dialect/Arith/IR/Arith.h"
Public Member Functions | |
int64_t | value () |
Static Public Member Functions | |
::mlir::TypeID | resolveTypeID () |
static void | build (OpBuilder &builder, OperationState &result, int64_t value) |
Build a constant int op that produces an index. More... | |
static ConstantIndexOp | create (OpBuilder &builder, Location location, int64_t value) |
static ConstantIndexOp | create (ImplicitLocOpBuilder &builder, int64_t value) |
static bool | classof (Operation *op) |
Specialization of arith.constant
op that returns an integer of index type.
|
static |
Build a constant int op that produces an index.
Definition at line 353 of file ArithOps.cpp.
References mlir::Builder::getIndexAttr(), and mlir::Builder::getIndexType().
|
static |
Definition at line 374 of file ArithOps.cpp.
|
static |
Definition at line 370 of file ArithOps.cpp.
References mlir::ImplicitLocOpBuilder::getLoc().
|
static |
Definition at line 359 of file ArithOps.cpp.
Referenced by allocBuffer(), mlir::transform::gpu::alterGpuLaunch(), HopperBuilder::buildAndInitBarrierInSharedMemory(), buildArithValue(), HopperBuilder::buildBarrierArriveTx(), buildNumReadElements(), HopperBuilder::buildPredicateLoadsOnThread0(), buildPredicates(), HopperBuilder::buildTmaAsyncLoad(), HopperBuilder::buildTryWaitParity(), mlir::collapseParallelLoops(), mlir::sparse_tensor::constantIndex(), createAsyncDispatchFunction(), mlir::linalg::createDestinationPassingStyleInitOperand(), createFullPartialLinalgCopy(), createFullPartialVectorTransferRead(), mlir::transform::gpu::createGpuLaunch(), mlir::arm_sme::createLoopOverTileSlices(), createParallelComputeFunction(), mlir::shard::createProcessLinearIndex(), mlir::vector::createReadOrMaskedRead(), defaultAllocBufferCallBack(), mlir::gpu::WarpDistributionPattern::delinearizeLaneId(), deriveStaticUpperBound(), doAsyncDispatch(), doSequentialDispatch(), dynamicallyExtractSubVector(), dynamicallyInsertSubVector(), emitIsPositiveIndexAssertion(), mlir::sparse_tensor::foreachInSparseConstant(), generateCopy(), mlir::xegpu::genOffsetsComputingInsts(), mlir::vector::getAsValues(), getCollapsedIndices(), getLocationToWriteFullVec(), mlir::getOrCreateRanges(), getOrCreateStep(), getValueFromOpFoldResult(), mlir::getValueOrCreateConstantIndexOp(), getZero(), mlir::inferExpandShapeOutputShape(), insertCopyLoops(), mlir::sparse_tensor::insertYieldOp(), mlir::vector::makeVscaleConstantBuilder(), mlir::transform::gpu::mapForallToBlocksImpl(), mlir::transform::gpu::mapNestedForallToThreadsImpl(), mlir::ComposeCollapseOfExpandOp< CollapseOpTy, ExpandOpTy, CastOpTy, DimOpTy, TensorTy >::matchAndRewrite(), FoldLaunchArguments::matchAndRewrite(), mlir::linalg::DecomposePadOpPattern::matchAndRewrite(), mlir::affine::normalizeMemRef(), permuteVectorOffset(), mlir::affine::promoteIfSingleIteration(), reifyOrComputeDynamicSizes(), replaceConstantUsesOf(), replaceOpWithPredicatedOp(), mlir::linalg::rewriteInDestinationPassingStyle(), PadOpVectorizationWithInsertSlicePattern::rewriteUser(), mlir::linalg::LinalgTilingOptions::setTileSizes(), sliceLoadStoreIndices(), specializeParallelLoopForUnrolling(), vectorizeAsInsertSliceOp(), and mlir::linalg::vectorizeCopy().
|
inlinestatic |