40 #include "llvm/ADT/STLExtras.h"
42 #define DEBUG_TYPE "gpu-to-llvm"
45 #define GEN_PASS_DEF_GPUTOLLVMCONVERSIONPASS
46 #include "mlir/Conversion/Passes.h.inc"
52 class GpuToLLVMConversionPass
53 :
public impl::GpuToLLVMConversionPassBase<GpuToLLVMConversionPass> {
57 Base::getDependentDialects(registry);
61 void runOnOperation()
override;
64 template <
typename OpTy>
67 explicit ConvertOpToGpuRuntimeCallPattern(
75 if (type.hasStaticShape())
77 rewriter, loc, indexType, type.getNumElements());
79 uint64_t rank = type.getRank();
80 Value numElements = desc.
size(rewriter, loc, 0);
81 for (
unsigned i = 1; i < rank; i++)
82 numElements = LLVM::MulOp::create(rewriter, loc, numElements,
83 desc.
size(rewriter, loc, i));
87 MLIRContext *context = &this->getTypeConverter()->getContext();
97 context, this->getTypeConverter()->getPointerBitwidth(0));
100 "mgpuStreamCreate", llvmPointerType , {}};
102 "mgpuStreamDestroy", llvmVoidType, {llvmPointerType }};
104 "mgpuStreamSynchronize",
108 "mgpuStreamWaitEvent",
110 {llvmPointerType , llvmPointerType }};
112 "mgpuEventCreate", llvmPointerType , {}};
114 "mgpuEventDestroy", llvmVoidType, {llvmPointerType }};
116 "mgpuEventSynchronize",
122 {llvmPointerType , llvmPointerType }};
124 "mgpuMemHostRegisterMemRef",
130 "mgpuMemHostUnregisterMemRef",
144 {llvmPointerType , llvmPointerType }};
148 {llvmPointerType , llvmPointerType ,
161 {llvmPointerType , llvmInt32Type ,
165 "mgpuSetDefaultDevice",
171 {llvmIntPtrType, llvmPointerType, llvmInt32Type,
176 {llvmPointerType, llvmPointerType }};
180 {llvmIntPtrType, llvmIntPtrType, llvmPointerType, llvmInt32Type,
185 {llvmPointerType, llvmPointerType }};
189 {llvmIntPtrType, llvmIntPtrType, llvmIntPtrType, llvmPointerType,
190 llvmPointerType, llvmPointerType, llvmInt32Type, llvmInt32Type,
195 {llvmIntPtrType, llvmIntPtrType, llvmIntPtrType, llvmPointerType,
196 llvmPointerType, llvmInt32Type, llvmInt32Type,
201 {llvmIntPtrType, llvmIntPtrType, llvmIntPtrType, llvmPointerType,
202 llvmPointerType, llvmPointerType, llvmInt32Type, llvmInt32Type,
203 llvmInt32Type, llvmPointerType }};
207 {llvmIntPtrType, llvmIntPtrType, llvmIntPtrType, llvmPointerType,
208 llvmPointerType, llvmPointerType, llvmInt32Type, llvmInt32Type,
209 llvmInt32Type, llvmPointerType }};
213 {llvmIntPtrType, llvmIntPtrType, llvmIntPtrType, llvmIntPtrType,
214 llvmIntPtrType, llvmPointerType, llvmPointerType, llvmPointerType,
215 llvmInt32Type, llvmInt32Type, llvmInt32Type,
220 {llvmPointerType, llvmPointerType }};
222 "mgpuSpMVBufferSize",
224 {llvmInt32Type, llvmPointerType, llvmPointerType, llvmPointerType,
225 llvmInt32Type, llvmPointerType }};
229 {llvmInt32Type, llvmPointerType, llvmPointerType, llvmPointerType,
230 llvmInt32Type, llvmPointerType, llvmPointerType }};
232 "mgpuSpMMBufferSize",
234 {llvmInt32Type, llvmInt32Type, llvmPointerType, llvmPointerType,
235 llvmPointerType, llvmInt32Type, llvmPointerType }};
239 {llvmInt32Type, llvmInt32Type, llvmPointerType, llvmPointerType,
240 llvmPointerType, llvmInt32Type, llvmPointerType,
243 "mgpuSDDMMBufferSize",
245 {llvmInt32Type, llvmInt32Type, llvmPointerType, llvmPointerType,
246 llvmPointerType, llvmInt32Type, llvmPointerType }};
250 {llvmInt32Type, llvmInt32Type, llvmPointerType, llvmPointerType,
251 llvmPointerType, llvmInt32Type, llvmPointerType,
254 "mgpuCreateCuSparseLtDnMat",
256 {llvmPointerType, llvmIntPtrType, llvmIntPtrType, llvmPointerType,
257 llvmInt32Type, llvmPointerType }};
259 "mgpuDestroyCuSparseLtSpMat",
261 {llvmPointerType, llvmPointerType }};
263 "mgpuDestroyCuSparseLtDnMat",
265 {llvmPointerType, llvmPointerType }};
267 "mgpuCusparseLtCreate2To4SpMat",
269 {llvmPointerType, llvmIntPtrType, llvmIntPtrType, llvmPointerType,
270 llvmInt32Type, llvmPointerType }};
272 "mgpuCuSparseLtSpMMBufferSize",
274 {llvmPointerType, llvmInt32Type, llvmInt32Type, llvmPointerType,
275 llvmPointerType, llvmPointerType, llvmInt32Type, llvmInt32Type,
278 "mgpuCuSparseLtSpMM",
280 {llvmPointerType, llvmPointerType, llvmPointerType, llvmPointerType,
281 llvmPointerType, llvmPointerType, llvmPointerType }};
283 "mgpuSpGEMMCreateDescr",
287 "mgpuSpGEMMDestroyDescr",
289 {llvmPointerType , llvmPointerType }};
291 "mgpuSpGEMMWorkEstimation",
293 {llvmPointerType , llvmInt32Type , llvmInt32Type ,
294 llvmPointerType , llvmPointerType , llvmPointerType ,
295 llvmInt32Type , llvmIntPtrType , llvmPointerType ,
300 {llvmPointerType , llvmInt32Type , llvmInt32Type ,
301 llvmPointerType , llvmPointerType , llvmPointerType ,
302 llvmInt32Type , llvmIntPtrType , llvmPointerType ,
307 {llvmPointerType , llvmInt32Type , llvmInt32Type ,
308 llvmPointerType , llvmPointerType , llvmPointerType ,
309 llvmInt32Type , llvmPointerType }};
313 {llvmPointerType , llvmPointerType , llvmPointerType ,
314 llvmPointerType , llvmPointerType }};
316 "mgpuSetCsrPointers",
318 {llvmPointerType , llvmPointerType ,
319 llvmPointerType , llvmPointerType ,
325 class ConvertHostRegisterOpToGpuRuntimeCallPattern
326 :
public ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp> {
328 ConvertHostRegisterOpToGpuRuntimeCallPattern(
330 : ConvertOpToGpuRuntimeCallPattern<gpu::HostRegisterOp>(typeConverter) {}
334 matchAndRewrite(gpu::HostRegisterOp hostRegisterOp, OpAdaptor adaptor,
338 class ConvertHostUnregisterOpToGpuRuntimeCallPattern
339 :
public ConvertOpToGpuRuntimeCallPattern<gpu::HostUnregisterOp> {
341 ConvertHostUnregisterOpToGpuRuntimeCallPattern(
343 : ConvertOpToGpuRuntimeCallPattern<gpu::HostUnregisterOp>(typeConverter) {
348 matchAndRewrite(gpu::HostUnregisterOp hostUnregisterOp, OpAdaptor adaptor,
354 class ConvertAllocOpToGpuRuntimeCallPattern
355 :
public ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp> {
358 : ConvertOpToGpuRuntimeCallPattern<gpu::AllocOp>(typeConverter) {}
362 matchAndRewrite(gpu::AllocOp allocOp, OpAdaptor adaptor,
368 class ConvertDeallocOpToGpuRuntimeCallPattern
369 :
public ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp> {
371 ConvertDeallocOpToGpuRuntimeCallPattern(
373 : ConvertOpToGpuRuntimeCallPattern<gpu::DeallocOp>(typeConverter) {}
377 matchAndRewrite(gpu::DeallocOp deallocOp, OpAdaptor adaptor,
381 class ConvertAsyncYieldToGpuRuntimeCallPattern
382 :
public ConvertOpToGpuRuntimeCallPattern<async::YieldOp> {
384 ConvertAsyncYieldToGpuRuntimeCallPattern(
386 : ConvertOpToGpuRuntimeCallPattern<async::YieldOp>(typeConverter) {}
390 matchAndRewrite(async::YieldOp yieldOp, OpAdaptor adaptor,
396 class ConvertWaitOpToGpuRuntimeCallPattern
397 :
public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> {
400 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {}
404 matchAndRewrite(gpu::WaitOp waitOp, OpAdaptor adaptor,
410 class ConvertWaitAsyncOpToGpuRuntimeCallPattern
411 :
public ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp> {
413 ConvertWaitAsyncOpToGpuRuntimeCallPattern(
415 : ConvertOpToGpuRuntimeCallPattern<gpu::WaitOp>(typeConverter) {}
419 matchAndRewrite(gpu::WaitOp waitOp, OpAdaptor adaptor,
424 class LegalizeLaunchFuncOpPattern
425 :
public ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp> {
428 bool kernelBarePtrCallConv,
429 bool kernelIntersperseSizeCallConv)
430 : ConvertOpToGpuRuntimeCallPattern<gpu::LaunchFuncOp>(typeConverter),
431 kernelBarePtrCallConv(kernelBarePtrCallConv),
432 kernelIntersperseSizeCallConv(kernelIntersperseSizeCallConv) {}
436 matchAndRewrite(gpu::LaunchFuncOp launchOp, OpAdaptor adaptor,
439 bool kernelBarePtrCallConv;
440 bool kernelIntersperseSizeCallConv;
445 class ConvertMemcpyOpToGpuRuntimeCallPattern
446 :
public ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp> {
449 : ConvertOpToGpuRuntimeCallPattern<gpu::MemcpyOp>(typeConverter) {}
453 matchAndRewrite(gpu::MemcpyOp memcpyOp, OpAdaptor adaptor,
459 class ConvertMemsetOpToGpuRuntimeCallPattern
460 :
public ConvertOpToGpuRuntimeCallPattern<gpu::MemsetOp> {
463 : ConvertOpToGpuRuntimeCallPattern<gpu::MemsetOp>(typeConverter) {}
467 matchAndRewrite(gpu::MemsetOp memsetOp, OpAdaptor adaptor,
473 class ConvertSetDefaultDeviceOpToGpuRuntimeCallPattern
474 :
public ConvertOpToGpuRuntimeCallPattern<gpu::SetDefaultDeviceOp> {
476 ConvertSetDefaultDeviceOpToGpuRuntimeCallPattern(
478 : ConvertOpToGpuRuntimeCallPattern<gpu::SetDefaultDeviceOp>(
482 matchAndRewrite(gpu::SetDefaultDeviceOp op, OpAdaptor adaptor,
488 #define DECLARE_CONVERT_OP_TO_GPU_RUNTIME_CALL_PATTERN(op_name) \
489 class Convert##op_name##ToGpuRuntimeCallPattern \
490 : public ConvertOpToGpuRuntimeCallPattern<gpu::op_name> { \
492 Convert##op_name##ToGpuRuntimeCallPattern( \
493 const LLVMTypeConverter &typeConverter) \
494 : ConvertOpToGpuRuntimeCallPattern<gpu::op_name>(typeConverter) {} \
498 matchAndRewrite(gpu::op_name op, OpAdaptor adaptor, \
499 ConversionPatternRewriter &rewriter) const override; \
526 void GpuToLLVMConversionPass::runOnOperation() {
539 return signalPassFailure();
543 options.useBarePtrCallConv = hostBarePtrCallConv;
546 target.addLegalDialect<LLVM::LLVMDialect>();
552 auto iface = dyn_cast<ConvertToLLVMPatternInterface>(dialect);
555 iface->populateConvertToLLVMConversionPatterns(target, converter,
patterns);
560 target.addLegalOp<gpu::GPUModuleOp, gpu::BinaryOp>();
562 target.addDynamicallyLegalOp<gpu::LaunchFuncOp>(
563 [&](gpu::LaunchFuncOp op) ->
bool {
return converter.isLegal(op); });
571 kernelBarePtrCallConv,
572 kernelIntersperseSizeCallConv);
582 auto function = [&] {
583 if (
auto function = module.lookupSymbol<LLVM::LLVMFuncOp>(
functionName))
588 return LLVM::CallOp::create(builder, loc,
function, arguments);
605 llvm_unreachable(
"unsupported type");
611 if (llvm::isa<ComplexType>(type)) {
613 auto elementType = cast<ComplexType>(type).getElementType();
614 if (elementType.isBF16())
616 if (elementType.isF16())
618 if (elementType.isF32())
620 if (elementType.isF64())
622 if (elementType.isInteger(8))
624 if (elementType.isInteger(16))
626 if (elementType.isInteger(32))
644 llvm_unreachable(
"unsupported element type");
648 return spMat.
getDefiningOp<gpu::Create2To4SpMatOp>().getPruneFlag();
673 llvm_unreachable(
"cannot find spmat def");
678 auto spmmOp = dyn_cast<gpu::SpMMOp>(user);
691 if (!llvm::all_of(operands, [](
Value value) {
695 op,
"Cannot convert if operands aren't of LLVM type.");
701 gpu::AsyncOpInterface op) {
702 if (op.getAsyncDependencies().size() != 1)
704 op,
"Can only convert with exactly one async dependency.");
706 if (!op.getAsyncToken())
712 LogicalResult ConvertHostRegisterOpToGpuRuntimeCallPattern::matchAndRewrite(
713 gpu::HostRegisterOp hostRegisterOp, OpAdaptor adaptor,
715 auto *op = hostRegisterOp.getOperation();
721 auto memRefType = hostRegisterOp.getValue().getType();
722 auto elementType = cast<UnrankedMemRefType>(memRefType).getElementType();
725 auto arguments = getTypeConverter()->promoteOperands(
726 loc, op->getOperands(), adaptor.getOperands(), rewriter);
727 arguments.push_back(elementSize);
728 hostRegisterCallBuilder.create(loc, rewriter, arguments);
734 LogicalResult ConvertHostUnregisterOpToGpuRuntimeCallPattern::matchAndRewrite(
735 gpu::HostUnregisterOp hostUnregisterOp, OpAdaptor adaptor,
737 Operation *op = hostUnregisterOp.getOperation();
743 auto memRefType = hostUnregisterOp.getValue().getType();
744 auto elementType = cast<UnrankedMemRefType>(memRefType).getElementType();
747 auto arguments = getTypeConverter()->promoteOperands(
748 loc, op->
getOperands(), adaptor.getOperands(), rewriter);
749 arguments.push_back(elementSize);
750 hostUnregisterCallBuilder.create(loc, rewriter, arguments);
756 LogicalResult ConvertAllocOpToGpuRuntimeCallPattern::matchAndRewrite(
757 gpu::AllocOp allocOp, OpAdaptor adaptor,
760 MemRefType memRefType = allocOp.getType();
763 !isConvertibleAndHasIdentityMaps(memRefType))
766 auto loc = allocOp.getLoc();
768 bool isShared = allocOp.getHostShared();
770 if (isShared && allocOp.getAsyncToken())
772 allocOp,
"Host Shared allocation cannot be done async");
781 getMemRefDescriptorSizes(loc, memRefType, adaptor.getDynamicSizes(), rewriter,
782 shape, strides, sizeBytes);
786 auto nullPtr = mlir::LLVM::ZeroOp::create(rewriter, loc, llvmPointerType);
787 Value stream = adaptor.getAsyncDependencies().empty()
789 : adaptor.getAsyncDependencies().front();
791 auto isHostShared = mlir::LLVM::ConstantOp::create(
795 allocCallBuilder.create(loc, rewriter, {sizeBytes, stream, isHostShared})
799 Value alignedPtr = allocatedPtr;
802 auto memRefDescriptor = this->createMemRefDescriptor(
803 loc, memRefType, allocatedPtr, alignedPtr, shape, strides, rewriter);
805 if (allocOp.getAsyncToken()) {
807 rewriter.
replaceOp(allocOp, {memRefDescriptor, stream});
809 rewriter.
replaceOp(allocOp, {memRefDescriptor});
815 LogicalResult ConvertDeallocOpToGpuRuntimeCallPattern::matchAndRewrite(
816 gpu::DeallocOp deallocOp, OpAdaptor adaptor,
826 Value stream = adaptor.getAsyncDependencies().front();
827 deallocCallBuilder.create(loc, rewriter, {pointer, stream});
834 return isa<gpu::AsyncTokenType>(value.
getType());
841 LogicalResult ConvertAsyncYieldToGpuRuntimeCallPattern::matchAndRewrite(
842 async::YieldOp yieldOp, OpAdaptor adaptor,
849 llvm::SmallDenseSet<Value> streams;
850 for (
auto &operand : yieldOp->getOpOperands()) {
853 auto idx = operand.getOperandNumber();
854 auto stream = adaptor.getOperands()[idx];
855 auto event = eventCreateCallBuilder.create(loc, rewriter, {}).getResult();
856 eventRecordCallBuilder.create(loc, rewriter, {event, stream});
857 newOperands[idx] = event;
858 streams.insert(stream);
860 for (
auto stream : streams)
861 streamDestroyCallBuilder.create(loc, rewriter, {stream});
863 rewriter.
modifyOpInPlace(yieldOp, [&] { yieldOp->setOperands(newOperands); });
869 assert(isa<LLVM::LLVMPointerType>(value.
getType()));
871 return *defOp.getCallee() == functionName;
879 LogicalResult ConvertWaitOpToGpuRuntimeCallPattern::matchAndRewrite(
880 gpu::WaitOp waitOp, OpAdaptor adaptor,
882 if (waitOp.getAsyncToken())
887 for (
auto operand : adaptor.getOperands()) {
890 streamSynchronizeCallBuilder.create(loc, rewriter, {operand});
891 streamDestroyCallBuilder.create(loc, rewriter, {operand});
895 eventSynchronizeCallBuilder.create(loc, rewriter, {operand});
896 eventDestroyCallBuilder.create(loc, rewriter, {operand});
909 LogicalResult ConvertWaitAsyncOpToGpuRuntimeCallPattern::matchAndRewrite(
910 gpu::WaitOp waitOp, OpAdaptor adaptor,
912 if (!waitOp.getAsyncToken())
920 llvm::zip(waitOp.getAsyncDependencies(), adaptor.getOperands())) {
921 auto operand = std::get<1>(pair);
925 auto *defOp = std::get<0>(pair).getDefiningOp();
927 auto event = eventCreateCallBuilder.create(loc, rewriter, {}).getResult();
928 eventRecordCallBuilder.create(loc, rewriter, {event, operand});
929 events.push_back(event);
933 events.push_back(operand);
937 auto stream = streamCreateCallBuilder.create(loc, rewriter, {}).getResult();
938 for (
auto event : events)
939 streamWaitEventCallBuilder.create(loc, rewriter, {stream,
event});
940 for (
auto event : events)
941 eventDestroyCallBuilder.create(loc, rewriter, {
event});
948 LogicalResult LegalizeLaunchFuncOpPattern::matchAndRewrite(
949 gpu::LaunchFuncOp launchOp, OpAdaptor adaptor,
954 if (launchOp.getAsyncDependencies().size() > 1)
956 launchOp,
"Cannot convert with more than one async dependency.");
961 if (!launchOp.getAsyncToken() && !launchOp.getAsyncDependencies().empty())
963 launchOp,
"Cannot convert non-async op with async dependencies.");
968 if (!adaptor.getAsyncDependencies().empty())
969 stream = adaptor.getAsyncDependencies().front();
972 else if (launchOp.getAsyncToken())
973 stream = streamCreateCallBuilder.create(loc, rewriter, {}).getResult();
978 OperandRange origArguments = launchOp.getKernelOperands();
980 loc, origArguments, adaptor.getKernelOperands(), rewriter,
981 kernelBarePtrCallConv);
985 if (kernelIntersperseSizeCallConv) {
986 if (origArguments.size() != llvmArguments.size()) {
990 "Cannot add sizes to arguments with one-to-many LLVM IR expansion.");
993 llvmArgumentsWithSizes.reserve(llvmArguments.size() * 2);
994 for (
auto [llvmArg, origArg] : zip_equal(llvmArguments, origArguments)) {
995 auto memrefTy = dyn_cast<MemRefType>(origArg.getType());
998 launchOp,
"Operand to launch op is not a memref.");
1001 if (!memrefTy.hasStaticShape() ||
1002 !memrefTy.getElementType().isIntOrFloat()) {
1004 launchOp,
"Operand to launch op is not a memref with a static "
1005 "shape and an integer or float element type.");
1008 unsigned bitwidth = memrefTy.getElementTypeBitWidth();
1009 if (bitwidth % 8 != 0) {
1011 launchOp,
"Operand to launch op is not a memref with a "
1012 "byte-aligned element type.");
1015 uint64_t staticSize =
static_cast<uint64_t
>(bitwidth / 8) *
1016 static_cast<uint64_t
>(memrefTy.getNumElements());
1018 Value sizeArg = LLVM::ConstantOp::create(
1019 rewriter, loc, getIndexType(), rewriter.
getIndexAttr(staticSize));
1020 llvmArgumentsWithSizes.push_back(llvmArg);
1021 llvmArgumentsWithSizes.push_back(sizeArg);
1025 std::optional<gpu::KernelDim3> clusterSize = std::nullopt;
1026 if (launchOp.hasClusterSize()) {
1029 adaptor.getClusterSizeZ()};
1031 gpu::LaunchFuncOp::create(
1032 rewriter, launchOp.getLoc(), launchOp.getKernelAttr(),
1034 adaptor.getGridSizeZ()},
1036 adaptor.getBlockSizeZ()},
1037 adaptor.getDynamicSharedMemorySize(),
1038 llvmArgumentsWithSizes.empty() ? llvmArguments : llvmArgumentsWithSizes,
1039 stream, clusterSize);
1040 if (launchOp.getAsyncToken())
1049 LLVM::LLVMPointerType destinationType,
1052 auto sourceTy = cast<LLVM::LLVMPointerType>(sourcePtr.
getType());
1053 if (destinationType.getAddressSpace() != sourceTy.getAddressSpace())
1054 sourcePtr = LLVM::AddrSpaceCastOp::create(
1057 destinationType.getAddressSpace()),
1062 LogicalResult ConvertMemcpyOpToGpuRuntimeCallPattern::matchAndRewrite(
1063 gpu::MemcpyOp memcpyOp, OpAdaptor adaptor,
1065 auto memRefType = cast<MemRefType>(memcpyOp.getSrc().getType());
1068 !isConvertibleAndHasIdentityMaps(memRefType) ||
1072 auto loc = memcpyOp.getLoc();
1078 Value nullPtr = LLVM::ZeroOp::create(rewriter, loc, elementPtrType);
1079 Value gepPtr = LLVM::GEPOp::create(
1080 rewriter, loc, elementPtrType,
1081 typeConverter->convertType(memRefType.getElementType()), nullPtr,
1084 LLVM::PtrToIntOp::create(rewriter, loc, getIndexType(), gepPtr);
1087 srcDesc.alignedPtr(rewriter, loc),
1088 *getTypeConverter());
1090 loc, rewriter, llvmPointerType,
1092 *getTypeConverter());
1094 auto stream = adaptor.getAsyncDependencies().front();
1095 memcpyCallBuilder.create(loc, rewriter, {dst, src, sizeBytes, stream});
1102 LogicalResult ConvertMemsetOpToGpuRuntimeCallPattern::matchAndRewrite(
1103 gpu::MemsetOp memsetOp, OpAdaptor adaptor,
1105 auto memRefType = cast<MemRefType>(memsetOp.getDst().getType());
1108 !isConvertibleAndHasIdentityMaps(memRefType) ||
1112 auto loc = memsetOp.getLoc();
1114 Type valueType = adaptor.getValue().getType();
1117 if (!valueType.
isIntOrFloat() || (bitWidth != 16 && bitWidth != 32)) {
1119 memsetOp,
"value must be a 16 or 32 bit int or float");
1123 Type bitCastType = valueTypeWidth == 32 ? llvmInt32Type : llvmInt16Type;
1129 LLVM::BitcastOp::create(rewriter, loc, bitCastType, adaptor.getValue());
1131 dstDesc.alignedPtr(rewriter, loc),
1132 *getTypeConverter());
1134 auto stream = adaptor.getAsyncDependencies().front();
1136 valueTypeWidth == 32 ? memset32CallBuilder : memset16CallBuilder;
1137 builder.
create(loc, rewriter, {dst, value, numElements, stream});
1143 LogicalResult ConvertSetDefaultDeviceOpToGpuRuntimeCallPattern::matchAndRewrite(
1144 gpu::SetDefaultDeviceOp op, OpAdaptor adaptor,
1147 auto call = setDefaultDeviceCallBuilder.create(loc, rewriter,
1148 {adaptor.getDevIndex()});
1153 template <
typename T>
1156 return LLVM::ConstantOp::create(builder, loc, llvmInt32Type,
1157 static_cast<int32_t
>(tValue));
1160 template <
typename T>
1163 return LLVM::ConstantOp::create(
1164 builder, loc, llvmFloat32Type,
1168 LogicalResult ConvertCreateDnTensorOpToGpuRuntimeCallPattern::matchAndRewrite(
1169 gpu::CreateDnTensorOp op, OpAdaptor adaptor,
1175 auto stream = adaptor.getAsyncDependencies().front();
1178 Type dType = op.getMemref().getType().getElementType();
1182 for (
Value dim : adaptor.getDims()) {
1183 dims.push_back(dim);
1193 if (dims.size() == 2) {
1195 auto handleSz = LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1197 handle = LLVM::AllocaOp::create(rewriter, loc, llvmPointerType,
1198 llvmInt8Type, handleSz, 16);
1199 handle = LLVM::BitcastOp::create(rewriter, loc, llvmPointerType, handle);
1201 createLtDnMatCallBuilder
1202 .create(loc, rewriter,
1203 {handle, dims[0], dims[1], pTensor, dtp, stream})
1207 createDnMatCallBuilder
1208 .create(loc, rewriter, {dims[0], dims[1], pTensor, dtp, stream})
1212 assert(dims.size() == 1 &&
"Only 1D and 2D tensors are supported");
1213 handle = createDnVecCallBuilder
1214 .create(loc, rewriter, {dims[0], pTensor, dtp, stream})
1217 rewriter.
replaceOp(op, {handle, stream});
1221 LogicalResult ConvertDestroyDnTensorOpToGpuRuntimeCallPattern::matchAndRewrite(
1222 gpu::DestroyDnTensorOp op, OpAdaptor adaptor,
1228 auto stream = adaptor.getAsyncDependencies().front();
1229 auto definingOp = op.getDnTensor().
getDefiningOp<gpu::CreateDnTensorOp>();
1231 for (
Value dim : definingOp.getDims()) {
1232 dims.push_back(dim);
1234 if (dims.size() == 2) {
1238 destroyCuSparseLtDnMatBuilder.create(loc, rewriter,
1239 {adaptor.getDnTensor(), stream});
1241 destroyDnMatCallBuilder.create(loc, rewriter,
1242 {adaptor.getDnTensor(), stream});
1245 assert(dims.size() == 1 &&
"Only 1D and 2D tensors are supported");
1246 destroyDnVecCallBuilder.create(loc, rewriter,
1247 {adaptor.getDnTensor(), stream});
1253 LogicalResult ConvertCreateCooOpToGpuRuntimeCallPattern::matchAndRewrite(
1254 gpu::CreateCooOp op, OpAdaptor adaptor,
1260 auto stream = adaptor.getAsyncDependencies().front();
1268 llvm::cast<MemRefType>(op.getColIdxs().getType()).getElementType();
1270 llvm::cast<MemRefType>(op.getValues().getType()).getElementType();
1274 createCooCallBuilder
1275 .create(loc, rewriter,
1276 {adaptor.getRows(), adaptor.getCols(), adaptor.getNnz(),
1277 pRowIdxs, pColIdxs, pValues, itp, dtp, stream})
1279 rewriter.
replaceOp(op, {handle, stream});
1283 LogicalResult ConvertCreateCooAoSOpToGpuRuntimeCallPattern::matchAndRewrite(
1284 gpu::CreateCooAoSOp op, OpAdaptor adaptor,
1290 auto stream = adaptor.getAsyncDependencies().front();
1294 Type iType = llvm::cast<MemRefType>(op.getIdxs().getType()).getElementType();
1296 llvm::cast<MemRefType>(op.getValues().getType()).getElementType();
1300 createCooAoSCallBuilder
1301 .create(loc, rewriter,
1302 {adaptor.getRows(), adaptor.getCols(), adaptor.getNnz(),
1303 pIdxs, pValues, itp, dtp, stream})
1305 rewriter.
replaceOp(op, {handle, stream});
1309 LogicalResult ConvertCreateCsrOpToGpuRuntimeCallPattern::matchAndRewrite(
1310 gpu::CreateCsrOp op, OpAdaptor adaptor,
1316 auto stream = adaptor.getAsyncDependencies().front();
1324 llvm::cast<MemRefType>(op.getRowPos().getType()).getElementType();
1326 llvm::cast<MemRefType>(op.getColIdxs().getType()).getElementType();
1328 llvm::cast<MemRefType>(op.getValues().getType()).getElementType();
1333 createCsrCallBuilder
1334 .create(loc, rewriter,
1335 {adaptor.getRows(), adaptor.getCols(), adaptor.getNnz(),
1336 pRowPos, pColIdxs, pValues, ptp, itp, dtp, stream})
1338 rewriter.
replaceOp(op, {handle, stream});
1342 LogicalResult ConvertCreate2To4SpMatOpToGpuRuntimeCallPattern::matchAndRewrite(
1343 gpu::Create2To4SpMatOp op, OpAdaptor adaptor,
1349 auto stream = adaptor.getAsyncDependencies().front();
1353 llvm::cast<MemRefType>(op.getMemref().getType()).getElementType();
1357 auto handleSz = LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1359 Value handle = LLVM::AllocaOp::create(
1360 rewriter, loc, llvmPointerType, llvmInt8Type, handleSz, 16);
1361 handle = LLVM::BitcastOp::create(rewriter, loc, llvmPointerType, handle);
1363 create2To4SpMatCallBuilder
1364 .create(loc, rewriter,
1365 {handle, adaptor.getRows(), adaptor.getCols(), pMat, dtp, stream})
1367 rewriter.
replaceOp(op, {handle, stream});
1371 LogicalResult ConvertDestroySpMatOpToGpuRuntimeCallPattern::matchAndRewrite(
1372 gpu::DestroySpMatOp op, OpAdaptor adaptor,
1378 auto stream = adaptor.getAsyncDependencies().front();
1381 destroyCuSparseLtSpMatBuilder.create(loc, rewriter,
1382 {adaptor.getSpmat(), stream});
1385 destroySpMatCallBuilder.create(loc, rewriter, {adaptor.getSpmat(), stream});
1391 LogicalResult ConvertSpMVBufferSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
1392 gpu::SpMVBufferSizeOp op, OpAdaptor adaptor,
1401 auto stream = adaptor.getAsyncDependencies().front();
1402 auto bufferSize = spMVBufferSizeCallBuilder
1403 .create(loc, rewriter,
1404 {modeA, adaptor.getSpmatA(), adaptor.getDnX(),
1405 adaptor.getDnY(), computeType, stream})
1407 rewriter.
replaceOp(op, {bufferSize, stream});
1411 LogicalResult ConvertSpMVOpToGpuRuntimeCallPattern::matchAndRewrite(
1412 gpu::SpMVOp op, OpAdaptor adaptor,
1421 auto stream = adaptor.getAsyncDependencies().front();
1424 spMVCallBuilder.create(loc, rewriter,
1425 {modeA, adaptor.getSpmatA(), adaptor.getDnX(),
1426 adaptor.getDnY(), computeType, pBuf, stream});
1431 LogicalResult ConvertSpMMBufferSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
1432 gpu::SpMMBufferSizeOp op, OpAdaptor adaptor,
1440 auto stream = adaptor.getAsyncDependencies().front();
1447 auto three = LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1450 LLVM::AllocaOp::create(rewriter, loc, llvmPointerType, llvmPointerType,
1452 createCuSparseLtSpMMBufferSizeBuilder
1453 .create(loc, rewriter,
1454 {bufferSize, modeA, modeB, adaptor.getSpmatA(),
1455 adaptor.getDnmatB(), adaptor.getDnmatC(), computeType,
1459 auto bufferSizePtr1 = LLVM::GEPOp::create(
1460 rewriter, loc, llvmPointerType, llvmPointerType, bufferSize,
1461 ValueRange{LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1463 auto bufferSizePtr2 = LLVM::GEPOp::create(
1464 rewriter, loc, llvmPointerType, llvmPointerType, bufferSize,
1465 ValueRange{LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1468 LLVM::LoadOp::create(rewriter, loc, llvmInt64Type, bufferSize);
1470 LLVM::LoadOp::create(rewriter, loc, llvmInt64Type, bufferSizePtr1);
1472 LLVM::LoadOp::create(rewriter, loc, llvmInt64Type, bufferSizePtr2);
1474 rewriter.
replaceOp(op, {bufferSize0, bufferSize1, bufferSize2, stream});
1479 createSpMMBufferSizeCallBuilder
1480 .create(loc, rewriter,
1481 {modeA, modeB, adaptor.getSpmatA(), adaptor.getDnmatB(),
1482 adaptor.getDnmatC(), computeType, stream})
1484 rewriter.
replaceOp(op, {bufferSize, stream});
1489 LogicalResult ConvertSDDMMBufferSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
1490 gpu::SDDMMBufferSizeOp op, OpAdaptor adaptor,
1500 auto stream = adaptor.getAsyncDependencies().front();
1502 createSDDMMBufferSizeCallBuilder
1503 .create(loc, rewriter,
1504 {modeA, modeB, adaptor.getDnmatA(), adaptor.getDnmatB(),
1505 adaptor.getSpmatC(), computeType, stream})
1507 rewriter.
replaceOp(op, {bufferSize, stream});
1511 LogicalResult ConvertSpMMOpToGpuRuntimeCallPattern::matchAndRewrite(
1512 gpu::SpMMOp op, OpAdaptor adaptor,
1523 auto stream = adaptor.getAsyncDependencies().front();
1528 for (
Value buffer : adaptor.getBuffers()) {
1530 pBufs.push_back(pBuf);
1532 createCuSparseLtSpMMBuilder.create(
1534 {adaptor.getSpmatA(), adaptor.getDnmatB(), adaptor.getDnmatC(),
1535 pBufs[0], pBufs[1], pBufs[2], stream});
1539 createSpMMCallBuilder.create(loc, rewriter,
1540 {modeA, modeB, adaptor.getSpmatA(),
1541 adaptor.getDnmatB(), adaptor.getDnmatC(),
1542 computeType, pBuf, stream});
1548 template <
typename T>
1555 LogicalResult ConvertSDDMMOpToGpuRuntimeCallPattern::matchAndRewrite(
1556 gpu::SDDMMOp op, OpAdaptor adaptor,
1566 auto stream = adaptor.getAsyncDependencies().front();
1569 createSDDMMCallBuilder.create(loc, rewriter,
1570 {modeA, modeB, adaptor.getDnmatA(),
1571 adaptor.getDnmatB(), adaptor.getSpmatC(),
1572 computeType, pBuf, stream});
1578 ConvertSpGEMMCreateDescrOpToGpuRuntimeCallPattern::matchAndRewrite(
1579 gpu::SpGEMMCreateDescrOp op, OpAdaptor adaptor,
1585 auto stream = adaptor.getAsyncDependencies().front();
1586 Value descr = createSpGEMMCreateDescrBuilder.create(loc, rewriter, {stream})
1588 rewriter.
replaceOp(op, {descr, stream});
1593 ConvertSpGEMMDestroyDescrOpToGpuRuntimeCallPattern::matchAndRewrite(
1594 gpu::SpGEMMDestroyDescrOp op, OpAdaptor adaptor,
1600 auto stream = adaptor.getAsyncDependencies().front();
1601 createSpGEMMDestroyDescrBuilder.create(loc, rewriter,
1602 {adaptor.getDesc(), stream});
1608 ConvertSpGEMMWorkEstimationOrComputeOpToGpuRuntimeCallPattern::matchAndRewrite(
1609 gpu::SpGEMMWorkEstimationOrComputeOp op, OpAdaptor adaptor,
1619 auto stream = adaptor.getAsyncDependencies().front();
1623 Value bufferSizeNew;
1625 if (adaptor.getKind() ==
1626 gpu::SpGEMMWorkEstimationOrComputeKind::WORK_ESTIMATION) {
1628 createSpGEMMWorkEstimationBuilder
1629 .create(loc, rewriter,
1630 {adaptor.getDesc(), modeA, modeB, adaptor.getSpmatA(),
1631 adaptor.getSpmatB(), adaptor.getSpmatC(), computeType,
1632 adaptor.getBufferSz(), pBuf, stream})
1636 createSpGEMMComputeBuilder
1637 .create(loc, rewriter,
1638 {adaptor.getDesc(), modeA, modeB, adaptor.getSpmatA(),
1639 adaptor.getSpmatB(), adaptor.getSpmatC(), computeType,
1640 adaptor.getBufferSz(), pBuf, stream})
1643 rewriter.
replaceOp(op, {bufferSizeNew, stream});
1647 LogicalResult ConvertSpGEMMCopyOpToGpuRuntimeCallPattern::matchAndRewrite(
1648 gpu::SpGEMMCopyOp op, OpAdaptor adaptor,
1658 auto stream = adaptor.getAsyncDependencies().front();
1659 createSpGEMMCopyBuilder.create(loc, rewriter,
1660 {adaptor.getDesc(), modeA, modeB,
1661 adaptor.getSpmatA(), adaptor.getSpmatB(),
1662 adaptor.getSpmatC(), computeType, stream});
1667 LogicalResult ConvertSpMatGetSizeOpToGpuRuntimeCallPattern::matchAndRewrite(
1668 gpu::SpMatGetSizeOp op, OpAdaptor adaptor,
1674 auto stream = adaptor.getAsyncDependencies().front();
1676 auto three = LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1678 auto buffer = LLVM::AllocaOp::create(rewriter, loc, llvmPointerType,
1679 llvmInt64Type, three, 16);
1681 auto rowsPtr = LLVM::GEPOp::create(
1682 rewriter, loc, llvmPointerType, llvmPointerType, buffer,
1683 ValueRange{LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1685 auto colsPtr = LLVM::GEPOp::create(
1686 rewriter, loc, llvmPointerType, llvmPointerType, buffer,
1687 ValueRange{LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1689 auto nnzsPtr = LLVM::GEPOp::create(
1690 rewriter, loc, llvmPointerType, llvmPointerType, buffer,
1691 ValueRange{LLVM::ConstantOp::create(rewriter, loc, getIndexType(),
1693 createSpMatGetSizeBuilder.create(
1694 loc, rewriter, {adaptor.getSpmat(), rowsPtr, colsPtr, nnzsPtr, stream});
1695 auto rows = LLVM::LoadOp::create(rewriter, loc, llvmInt64Type, rowsPtr);
1696 auto cols = LLVM::LoadOp::create(rewriter, loc, llvmInt64Type, colsPtr);
1697 auto nnzs = LLVM::LoadOp::create(rewriter, loc, llvmInt64Type, nnzsPtr);
1703 LogicalResult ConvertSetCsrPointersOpToGpuRuntimeCallPattern::matchAndRewrite(
1704 gpu::SetCsrPointersOp op, OpAdaptor adaptor,
1710 auto stream = adaptor.getAsyncDependencies().front();
1717 createSetCsrPointersBuilder.create(
1718 loc, rewriter, {adaptor.getSpmat(), pPos, pCrd, pVal, stream});
1723 LogicalResult ConvertCreateCscOpToGpuRuntimeCallPattern::matchAndRewrite(
1724 gpu::CreateCscOp op, OpAdaptor adaptor,
1730 auto stream = adaptor.getAsyncDependencies().front();
1738 llvm::cast<MemRefType>(op.getColPos().getType()).getElementType();
1740 llvm::cast<MemRefType>(op.getRowIdxs().getType()).getElementType();
1742 llvm::cast<MemRefType>(op.getValues().getType()).getElementType();
1747 createCscCallBuilder
1748 .create(loc, rewriter,
1749 {adaptor.getRows(), adaptor.getCols(), adaptor.getNnz(),
1750 pColPos, pRowIdxs, pValues, ptp, itp, dtp, stream})
1752 rewriter.
replaceOp(op, {handle, stream});
1756 LogicalResult ConvertCreateBsrOpToGpuRuntimeCallPattern::matchAndRewrite(
1757 gpu::CreateBsrOp op, OpAdaptor adaptor,
1763 auto stream = adaptor.getAsyncDependencies().front();
1771 llvm::cast<MemRefType>(op.getBRowPos().getType()).getElementType();
1773 llvm::cast<MemRefType>(op.getBColIdxs().getType()).getElementType();
1775 llvm::cast<MemRefType>(op.getValues().getType()).getElementType();
1780 createBsrCallBuilder
1781 .create(loc, rewriter,
1782 {adaptor.getBrows(), adaptor.getBcols(), adaptor.getBnnz(),
1783 adaptor.getRBlockSize(), adaptor.getCBlockSize(), pRowPos,
1784 pColIdxs, pValues, ptp, itp, dtp, stream})
1786 rewriter.
replaceOp(op, {handle, stream});
1792 bool kernelBarePtrCallConv,
bool kernelIntersperseSizeCallConv) {
1793 addOpaquePointerConversion<gpu::AsyncTokenType>(converter);
1794 addOpaquePointerConversion<gpu::SparseDnTensorHandleType>(converter);
1795 addOpaquePointerConversion<gpu::SparseSpMatHandleType>(converter);
1796 addOpaquePointerConversion<gpu::SparseSpGEMMOpHandleType>(converter);
1798 patterns.add<ConvertAllocOpToGpuRuntimeCallPattern,
1799 ConvertDeallocOpToGpuRuntimeCallPattern,
1800 ConvertHostRegisterOpToGpuRuntimeCallPattern,
1801 ConvertHostUnregisterOpToGpuRuntimeCallPattern,
1802 ConvertMemcpyOpToGpuRuntimeCallPattern,
1803 ConvertMemsetOpToGpuRuntimeCallPattern,
1804 ConvertSetDefaultDeviceOpToGpuRuntimeCallPattern,
1805 ConvertWaitAsyncOpToGpuRuntimeCallPattern,
1806 ConvertWaitOpToGpuRuntimeCallPattern,
1807 ConvertAsyncYieldToGpuRuntimeCallPattern,
1808 ConvertCreateDnTensorOpToGpuRuntimeCallPattern,
1809 ConvertDestroyDnTensorOpToGpuRuntimeCallPattern,
1810 ConvertCreateCooOpToGpuRuntimeCallPattern,
1811 ConvertCreateCooAoSOpToGpuRuntimeCallPattern,
1812 ConvertCreateCsrOpToGpuRuntimeCallPattern,
1813 ConvertCreateCscOpToGpuRuntimeCallPattern,
1814 ConvertCreateBsrOpToGpuRuntimeCallPattern,
1815 ConvertCreate2To4SpMatOpToGpuRuntimeCallPattern,
1816 ConvertDestroySpMatOpToGpuRuntimeCallPattern,
1817 ConvertSpMVBufferSizeOpToGpuRuntimeCallPattern,
1818 ConvertSpMVOpToGpuRuntimeCallPattern,
1819 ConvertSpMMBufferSizeOpToGpuRuntimeCallPattern,
1820 ConvertSDDMMBufferSizeOpToGpuRuntimeCallPattern,
1821 ConvertSpMMOpToGpuRuntimeCallPattern,
1822 ConvertSDDMMOpToGpuRuntimeCallPattern,
1823 ConvertSpGEMMCreateDescrOpToGpuRuntimeCallPattern,
1824 ConvertSpGEMMDestroyDescrOpToGpuRuntimeCallPattern,
1825 ConvertSpGEMMWorkEstimationOrComputeOpToGpuRuntimeCallPattern,
1826 ConvertSpGEMMCopyOpToGpuRuntimeCallPattern,
1827 ConvertSpMatGetSizeOpToGpuRuntimeCallPattern,
1828 ConvertSetCsrPointersOpToGpuRuntimeCallPattern>(converter);
1829 patterns.add<LegalizeLaunchFuncOpPattern>(converter, kernelBarePtrCallConv,
1830 kernelIntersperseSizeCallConv);
1838 struct GPUModuleOpConvertToLLVMInterface
1839 :
public ConvertToLLVMOpInterface::ExternalModel<
1840 GPUModuleOpConvertToLLVMInterface, gpu::GPUModuleOp> {
1842 void getConvertToLLVMConversionAttrs(
1847 void GPUModuleOpConvertToLLVMInterface::getConvertToLLVMConversionAttrs(
1849 auto module = cast<gpu::GPUModuleOp>(op);
1850 ArrayAttr targetsAttr = module.getTargetsAttr();
1852 if (!targetsAttr || targetsAttr.size() != 1)
1854 if (
auto patternAttr = dyn_cast<ConvertToLLVMAttrInterface>(targetsAttr[0]))
1855 attrs.push_back(patternAttr);
1860 gpu::GPUModuleOp::attachInterface<GPUModuleOpConvertToLLVMInterface>(*ctx);
static void addOpaquePointerConversion(LLVMTypeConverter &converter)
static Value genConstFloat32From(OpBuilder &builder, Location loc, T tValue)
static int32_t getCuSparseDataTypeFrom(Type type)
static LogicalResult areAllLLVMTypes(Operation *op, ValueRange operands, ConversionPatternRewriter &rewriter)
static Value genConstInt32From(OpBuilder &builder, Location loc, T tValue)
static gpu::Prune2To4SpMatFlag get2To4PruneFlag(Value spMat)
static bool isGpuAsyncTokenType(Value value)
#define DECLARE_CONVERT_OP_TO_GPU_RUNTIME_CALL_PATTERN(op_name)
Generic rewriting rule for operation on sparse matrices.
static int32_t getCuSparseLtDataTypeFrom(Type type)
static bool isDefinedByCallTo(Value value, StringRef functionName)
static Value bitAndAddrspaceCast(Location loc, ConversionPatternRewriter &rewriter, LLVM::LLVMPointerType destinationType, Value sourcePtr, const LLVMTypeConverter &typeConverter)
static bool isSpMMCusparseLtOp(Value op)
static int32_t getCuSparseIndexTypeFrom(Type type)
static bool is2To4Sparsity(Value spMat)
static LogicalResult isAsyncWithOneDependency(ConversionPatternRewriter &rewriter, gpu::AsyncOpInterface op)
static MLIRContext * getContext(OpFoldResult val)
static int64_t getNumElements(Type t)
Compute the total number of elements in the given type, also taking into account nested types.
llvm::Value * getSizeInBytes(DataLayout &dl, const mlir::Type &type, Operation *clauseOp, llvm::Value *basePointer, llvm::Type *baseType, llvm::IRBuilderBase &builder, LLVM::ModuleTranslation &moduleTranslation)
static llvm::ManagedStatic< PassManagerOptions > options
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
IntegerAttr getIndexAttr(int64_t value)
IntegerType getIntegerType(unsigned width)
MLIRContext * getContext() const
FloatAttr getF32FloatAttr(float value)
IntegerAttr getI8IntegerAttr(int8_t value)
This class implements a pattern rewriter for use with ConversionPatterns.
void replaceOp(Operation *op, ValueRange newValues) override
Replace the given operation with the new values.
void eraseOp(Operation *op) override
PatternRewriter hook for erasing a dead operation.
This class describes a specific conversion target.
Utility class for operation conversions targeting the LLVM dialect that match exactly one source oper...
Type getIndexType() const
Gets the MLIR type wrapping the LLVM integer type whose bit width is defined by the used type convert...
static Value createIndexAttrConstant(OpBuilder &builder, Location loc, Type resultType, int64_t value)
Create a constant Op producing a value of resultType from an index-typed integer attribute.
The DialectRegistry maps a dialect namespace to a constructor for the matching dialect.
bool addExtension(TypeID extensionID, std::unique_ptr< DialectExtensionBase > extension)
Add the given extension to the registry.
Dialects are groups of MLIR operations, types and attributes, as well as behavior associated with the...
Conversion from types to the LLVM IR dialect.
MLIRContext & getContext() const
Returns the MLIR context.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Options to control the LLVM lowering.
MLIRContext is the top-level object for a collection of MLIR operations.
std::vector< Dialect * > getLoadedDialects()
Return information about all IR dialects loaded in the context.
Helper class to produce LLVM dialect operations extracting or inserting elements of a MemRef descript...
Value alignedPtr(OpBuilder &builder, Location loc)
Builds IR extracting the aligned pointer from the descriptor.
Value allocatedPtr(OpBuilder &builder, Location loc)
Builds IR extracting the allocated pointer from the descriptor.
Value size(OpBuilder &builder, Location loc, unsigned pos)
Builds IR extracting the pos-th size from the descriptor.
This class helps build Operations.
InsertPoint saveInsertionPoint() const
Return a saved insertion point.
static OpBuilder atBlockEnd(Block *block, Listener *listener=nullptr)
Create a builder and set the insertion point to after the last operation in the block but still insid...
void restoreInsertionPoint(InsertPoint ip)
Restore the insert point to a previously saved point.
Block * getBlock() const
Returns the current block of the builder.
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
This class implements the operand iterators for the Operation class.
Operation is the basic unit of execution within MLIR.
Location getLoc()
The source location the operation was defined or derived from.
void print(raw_ostream &os, const OpPrintingFlags &flags={})
operand_range getOperands()
Returns an iterator on the underlying Value's.
ParentT getParentOfType()
Find the first parent operation of the given type, or nullptr if there is no ancestor operation.
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
void modifyOpInPlace(Operation *root, CallableT &&callable)
This method is a utility wrapper around an in-place modification of an operation.
void addConversion(FnT &&callback)
Register a conversion function.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
bool isInteger() const
Return true if this is an integer type (with the specified width).
bool isIntOrFloat() const
Return true if this is an integer (of any signedness) or a float type.
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
This class provides an abstraction over the different types of ranges over Values.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Type getType() const
Return the type of this value.
user_range getUsers() const
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
bool isCompatibleType(Type type)
Returns true if the given type is compatible with the LLVM dialect.
void registerConvertGpuToLLVMInterface(DialectRegistry ®istry)
Registers the ConvertToLLVMOpInterface interface on the gpu::GPUModuleOP operation.
static Type getElementPtrType(Type type, ValueRange indices, Location baseLoc)
void populateVectorTransferLoweringPatterns(RewritePatternSet &patterns, std::optional< unsigned > maxTransferRank=std::nullopt, PatternBenefit benefit=1)
Populate the pattern set with the following patterns:
void populateVectorFromElementsLoweringPatterns(RewritePatternSet &patterns, PatternBenefit benefit=1)
Populate the pattern set with the following patterns:
Include the generated interface declarations.
LogicalResult applyPatternsGreedily(Region ®ion, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
void populateFinalizeMemRefToLLVMConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns, SymbolTableCollection *symbolTables=nullptr)
Collect a set of patterns to convert memory-related operations from the MemRef dialect to the LLVM di...
void populateGpuToLLVMConversionPatterns(LLVMTypeConverter &converter, RewritePatternSet &patterns, bool kernelBarePtrCallConv=false, bool kernelIntersperseSizeCallConv=false)
Collect a set of patterns to convert from the GPU dialect to LLVM and populate converter for gpu type...
const FrozenRewritePatternSet & patterns
void registerConvertToLLVMDependentDialectLoading(DialectRegistry ®istry)
Register the extension that will load dependent dialects for LLVM conversion.
void populateAsyncStructuralTypeConversionsAndLegality(TypeConverter &typeConverter, RewritePatternSet &patterns, ConversionTarget &target)
Populates patterns for async structural type conversions.
void populateVectorToLLVMConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns, bool reassociateFPReductions=false, bool force32BitVectorIndices=false, bool useVectorAlignment=false)
Collect a set of patterns to convert from the Vector dialect to LLVM.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult applyPartialConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Below we define several entry points for operation conversion.
LLVM::LLVMFunctionType functionType
LLVM::CallOp create(Location loc, OpBuilder &builder, ArrayRef< Value > arguments) const
Utility class for the GPU dialect to represent triples of Values accessible through ....