MLIR 23.0.0git
GPUToLLVMSPV.cpp
Go to the documentation of this file.
1//===- GPUToLLVMSPV.cpp - Convert GPU operations to LLVM dialect ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
25#include "mlir/IR/Matchers.h"
27#include "mlir/IR/SymbolTable.h"
28#include "mlir/Pass/Pass.h"
29#include "mlir/Support/LLVM.h"
31
32#include "llvm/ADT/TypeSwitch.h"
33#include "llvm/Support/FormatVariadic.h"
34
35#define DEBUG_TYPE "gpu-to-llvm-spv"
36
37using namespace mlir;
38
39namespace mlir {
40#define GEN_PASS_DEF_CONVERTGPUOPSTOLLVMSPVOPS
41#include "mlir/Conversion/Passes.h.inc"
42} // namespace mlir
43
44//===----------------------------------------------------------------------===//
45// Helper Functions
46//===----------------------------------------------------------------------===//
47
48static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
49 StringRef name,
50 ArrayRef<Type> paramTypes,
51 Type resultType, bool isMemNone,
52 bool isConvergent) {
53 auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
54 SymbolTable::lookupSymbolIn(symbolTable, name));
55 if (!func) {
56 OpBuilder b(symbolTable->getRegion(0));
57 func = LLVM::LLVMFuncOp::create(
58 b, symbolTable->getLoc(), name,
59 LLVM::LLVMFunctionType::get(resultType, paramTypes));
60 func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
61 func.setNoUnwind(true);
62 func.setWillReturn(true);
63
64 if (isMemNone) {
65 // no externally observable effects
66 constexpr auto noModRef = mlir::LLVM::ModRefInfo::NoModRef;
67 auto memAttr = b.getAttr<LLVM::MemoryEffectsAttr>(
68 /*other=*/noModRef,
69 /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef,
70 /*errnoMem=*/noModRef,
71 /*targetMem0=*/noModRef,
72 /*targetMem1=*/noModRef);
73 func.setMemoryEffectsAttr(memAttr);
74 }
75
76 func.setConvergent(isConvergent);
77 }
78 return func;
79}
80
81static LLVM::CallOp createSPIRVBuiltinCall(Location loc,
82 ConversionPatternRewriter &rewriter,
83 LLVM::LLVMFuncOp func,
84 ValueRange args) {
85 auto call = LLVM::CallOp::create(rewriter, loc, func, args);
86 call.setCConv(func.getCConv());
87 call.setConvergentAttr(func.getConvergentAttr());
88 call.setNoUnwindAttr(func.getNoUnwindAttr());
89 call.setWillReturnAttr(func.getWillReturnAttr());
90 call.setMemoryEffectsAttr(func.getMemoryEffectsAttr());
91 return call;
92}
93
94namespace {
95//===----------------------------------------------------------------------===//
96// Barriers
97//===----------------------------------------------------------------------===//
98
99/// Replace `gpu.barrier` with an `llvm.call` to `barrier` using
100/// `CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE`, ensuring that all memory
101/// accesses are visible to all work-items in the work-group.
102/// ```
103/// // gpu.barrier
104/// // 3 = CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE
105/// %c3 = llvm.mlir.constant(3: i32) : i32
106/// llvm.call spir_funccc @_Z7barrierj(%c3) : (i32) -> ()
107/// ```
108struct GPUBarrierConversion final : ConvertOpToLLVMPattern<gpu::BarrierOp> {
110
111 LogicalResult
112 matchAndRewrite(gpu::BarrierOp op, OpAdaptor adaptor,
113 ConversionPatternRewriter &rewriter) const final {
114 constexpr StringLiteral funcName = "_Z7barrierj";
115
116 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
117 assert(moduleOp && "Expecting module");
118 Type flagTy = rewriter.getI32Type();
119 Type voidTy = rewriter.getType<LLVM::LLVMVoidType>();
120 LLVM::LLVMFuncOp func =
121 lookupOrCreateSPIRVFn(moduleOp, funcName, flagTy, voidTy,
122 /*isMemNone=*/false, /*isConvergent=*/true);
123
124 // Values used by SPIR-V backend to represent `CLK_LOCAL_MEM_FENCE` and
125 // `CLK_GLOBAL_MEM_FENCE`. See `llvm/lib/Target/SPIRV/SPIRVBuiltins.td`.
126 constexpr int64_t localMemFenceFlag = 1;
127 constexpr int64_t globalMemFenceFlag = 2;
128 int64_t memFenceFlag = 0;
129 std::optional<ArrayAttr> addressSpaces = adaptor.getAddressSpaces();
130 if (addressSpaces) {
131 for (Attribute attr : addressSpaces.value()) {
132 auto addressSpace = cast<gpu::AddressSpaceAttr>(attr).getValue();
133 switch (addressSpace) {
134 case gpu::AddressSpace::Global:
135 memFenceFlag = memFenceFlag | globalMemFenceFlag;
136 break;
137 case gpu::AddressSpace::Workgroup:
138 memFenceFlag = memFenceFlag | localMemFenceFlag;
139 break;
140 case gpu::AddressSpace::Private:
141 case gpu::AddressSpace::Constant:
142 // Private is thread-local, constant is read-only; no fencing needed.
143 break;
144 }
145 }
146 } else {
147 memFenceFlag = localMemFenceFlag | globalMemFenceFlag;
148 }
149 Location loc = op->getLoc();
150 Value flag = LLVM::ConstantOp::create(rewriter, loc, flagTy, memFenceFlag);
151 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, flag));
152 return success();
153 }
154};
155
156//===----------------------------------------------------------------------===//
157// SPIR-V Builtins
158//===----------------------------------------------------------------------===//
159
160/// Replace `gpu.*` with an `llvm.call` to the corresponding SPIR-V builtin with
161/// a constant argument for the `dimension` attribute. Return type will depend
162/// on index width option:
163/// ```
164/// // %thread_id_y = gpu.thread_id y
165/// %c1 = llvm.mlir.constant(1: i32) : i32
166/// %0 = llvm.call spir_funccc @_Z12get_local_idj(%c1) : (i32) -> i64
167/// ```
168struct LaunchConfigConversion : ConvertToLLVMPattern {
169 LaunchConfigConversion(StringRef funcName, StringRef rootOpName,
170 MLIRContext *context,
171 const LLVMTypeConverter &typeConverter,
172 PatternBenefit benefit)
173 : ConvertToLLVMPattern(rootOpName, context, typeConverter, benefit),
174 funcName(funcName) {}
175
176 virtual gpu::Dimension getDimension(Operation *op) const = 0;
177
178 LogicalResult
179 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
180 ConversionPatternRewriter &rewriter) const final {
181 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
182 assert(moduleOp && "Expecting module");
183 Type dimTy = rewriter.getI32Type();
184 Type indexTy = getTypeConverter()->getIndexType();
185 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy,
186 indexTy, /*isMemNone=*/true,
187 /*isConvergent=*/false);
188
189 Location loc = op->getLoc();
190 gpu::Dimension dim = getDimension(op);
191 Value dimVal = LLVM::ConstantOp::create(rewriter, loc, dimTy,
192 static_cast<int64_t>(dim));
193 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, dimVal));
194 return success();
195 }
196
197 StringRef funcName;
198};
199
200template <typename SourceOp>
201struct LaunchConfigOpConversion final : LaunchConfigConversion {
202 static StringRef getFuncName();
203
204 explicit LaunchConfigOpConversion(const LLVMTypeConverter &typeConverter,
205 PatternBenefit benefit = 1)
206 : LaunchConfigConversion(getFuncName(), SourceOp::getOperationName(),
207 &typeConverter.getContext(), typeConverter,
208 benefit) {}
209
210 gpu::Dimension getDimension(Operation *op) const final {
211 return cast<SourceOp>(op).getDimension();
212 }
213};
214
215template <>
216StringRef LaunchConfigOpConversion<gpu::BlockIdOp>::getFuncName() {
217 return "_Z12get_group_idj";
218}
219
220template <>
221StringRef LaunchConfigOpConversion<gpu::GridDimOp>::getFuncName() {
222 return "_Z14get_num_groupsj";
223}
224
225template <>
226StringRef LaunchConfigOpConversion<gpu::BlockDimOp>::getFuncName() {
227 return "_Z14get_local_sizej";
228}
229
230template <>
231StringRef LaunchConfigOpConversion<gpu::ThreadIdOp>::getFuncName() {
232 return "_Z12get_local_idj";
233}
234
235template <>
236StringRef LaunchConfigOpConversion<gpu::GlobalIdOp>::getFuncName() {
237 return "_Z13get_global_idj";
238}
239
240//===----------------------------------------------------------------------===//
241// Shuffles
242//===----------------------------------------------------------------------===//
243
244/// Replace `gpu.shuffle` with an `llvm.call` to the corresponding SPIR-V
245/// builtin for `shuffleResult`, keeping `value` and `offset` arguments, and a
246/// `true` constant for the `valid` result type. Conversion will only take place
247/// if `width` is constant and equal to the `subgroup` pass option:
248/// ```
249/// // %0 = gpu.shuffle idx %value, %offset, %width : f64
250/// %0 = llvm.call spir_funccc @_Z17sub_group_shuffledj(%value, %offset)
251/// : (f64, i32) -> f64
252/// ```
253struct GPUShuffleConversion final : ConvertOpToLLVMPattern<gpu::ShuffleOp> {
255
256 static StringRef getBaseName(gpu::ShuffleMode mode) {
257 switch (mode) {
258 case gpu::ShuffleMode::IDX:
259 return "sub_group_shuffle";
260 case gpu::ShuffleMode::XOR:
261 return "sub_group_shuffle_xor";
262 case gpu::ShuffleMode::UP:
263 return "sub_group_shuffle_up";
264 case gpu::ShuffleMode::DOWN:
265 return "sub_group_shuffle_down";
266 }
267 llvm_unreachable("Unhandled shuffle mode");
268 }
269
270 static std::optional<StringRef> getTypeMangling(Type type) {
272 .Case([](Float16Type) { return "Dhj"; })
273 .Case([](Float32Type) { return "fj"; })
274 .Case([](Float64Type) { return "dj"; })
275 .Case([](IntegerType intTy) -> std::optional<StringRef> {
276 switch (intTy.getWidth()) {
277 case 8:
278 return "cj";
279 case 16:
280 return "sj";
281 case 32:
282 return "ij";
283 case 64:
284 return "lj";
285 }
286 return std::nullopt;
287 })
288 .Default(std::nullopt);
289 }
290
291 static std::optional<std::string> getFuncName(gpu::ShuffleMode mode,
292 Type type) {
293 StringRef baseName = getBaseName(mode);
294 std::optional<StringRef> typeMangling = getTypeMangling(type);
295 if (!typeMangling)
296 return std::nullopt;
297 return llvm::formatv("_Z{}{}{}", baseName.size(), baseName,
298 typeMangling.value());
299 }
300
301 /// Get the subgroup size from the target or return a default.
302 static std::optional<int> getSubgroupSize(Operation *op) {
303 auto parentFunc = op->getParentOfType<LLVM::LLVMFuncOp>();
304 if (!parentFunc)
305 return std::nullopt;
306 return parentFunc.getIntelReqdSubGroupSize();
307 }
308
309 static bool hasValidWidth(gpu::ShuffleOp op, int subgroupSize) {
310 llvm::APInt val;
311 Value width = op.getWidth();
312 return matchPattern(width, m_ConstantInt(&val)) && val == subgroupSize;
313 }
314
315 static Value bitcastOrExtBeforeShuffle(Value oldVal, Location loc,
316 ConversionPatternRewriter &rewriter) {
317 return TypeSwitch<Type, Value>(oldVal.getType())
318 .Case([&](BFloat16Type) {
319 return LLVM::BitcastOp::create(rewriter, loc, rewriter.getI16Type(),
320 oldVal);
321 })
322 .Case([&](IntegerType intTy) -> Value {
323 if (intTy.getWidth() == 1)
324 return LLVM::ZExtOp::create(rewriter, loc, rewriter.getI8Type(),
325 oldVal);
326 return oldVal;
327 })
328 .Default(oldVal);
329 }
330
331 static Value bitcastOrTruncAfterShuffle(Value oldVal, Type newTy,
332 Location loc,
333 ConversionPatternRewriter &rewriter) {
334 return TypeSwitch<Type, Value>(newTy)
335 .Case([&](BFloat16Type) {
336 return LLVM::BitcastOp::create(rewriter, loc, newTy, oldVal);
337 })
338 .Case([&](IntegerType intTy) -> Value {
339 if (intTy.getWidth() == 1)
340 return LLVM::TruncOp::create(rewriter, loc, newTy, oldVal);
341 return oldVal;
342 })
343 .Default(oldVal);
344 }
345
346 LogicalResult
347 matchAndRewrite(gpu::ShuffleOp op, OpAdaptor adaptor,
348 ConversionPatternRewriter &rewriter) const final {
349 auto maybeSubgroupSize = getSubgroupSize(op);
350 if (maybeSubgroupSize && !hasValidWidth(op, maybeSubgroupSize.value()))
351 return rewriter.notifyMatchFailure(
352 op, "shuffle width and subgroup size mismatch");
353
354 Location loc = op->getLoc();
355 Value inValue =
356 bitcastOrExtBeforeShuffle(adaptor.getValue(), loc, rewriter);
357 std::optional<std::string> funcName =
358 getFuncName(op.getMode(), inValue.getType());
359 if (!funcName)
360 return rewriter.notifyMatchFailure(op, "unsupported value type");
361
362 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
363 assert(moduleOp && "Expecting module");
364 Type valueType = inValue.getType();
365 Type offsetType = adaptor.getOffset().getType();
366 Type resultType = valueType;
367 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
368 moduleOp, funcName.value(), {valueType, offsetType}, resultType,
369 /*isMemNone=*/false, /*isConvergent=*/true);
370
371 std::array<Value, 2> args{inValue, adaptor.getOffset()};
372 Value result =
373 createSPIRVBuiltinCall(loc, rewriter, func, args).getResult();
374 Value resultOrConversion =
375 bitcastOrTruncAfterShuffle(result, op.getType(0), loc, rewriter);
376
377 Value trueVal =
378 LLVM::ConstantOp::create(rewriter, loc, rewriter.getI1Type(), true);
379 rewriter.replaceOp(op, {resultOrConversion, trueVal});
380 return success();
381 }
382};
383
384class MemorySpaceToOpenCLMemorySpaceConverter final : public TypeConverter {
385public:
386 MemorySpaceToOpenCLMemorySpaceConverter(MLIRContext *ctx) {
387 addConversion([](Type t) { return t; });
388 addConversion([ctx](BaseMemRefType memRefType) -> std::optional<Type> {
389 // Attach global addr space attribute to memrefs with no addr space attr
390 Attribute memSpaceAttr = memRefType.getMemorySpace();
391 if (memSpaceAttr)
392 return std::nullopt;
393
394 unsigned globalAddrspace = storageClassToAddressSpace(
395 spirv::ClientAPI::OpenCL, spirv::StorageClass::CrossWorkgroup);
396 Attribute addrSpaceAttr =
397 IntegerAttr::get(IntegerType::get(ctx, 64), globalAddrspace);
398 if (auto rankedType = dyn_cast<MemRefType>(memRefType)) {
399 return MemRefType::get(memRefType.getShape(),
400 memRefType.getElementType(),
401 rankedType.getLayout(), addrSpaceAttr);
402 }
403 return UnrankedMemRefType::get(memRefType.getElementType(),
404 addrSpaceAttr);
405 });
406 addConversion([this](FunctionType type) {
407 auto inputs = llvm::map_to_vector(
408 type.getInputs(), [this](Type ty) { return convertType(ty); });
409 auto results = llvm::map_to_vector(
410 type.getResults(), [this](Type ty) { return convertType(ty); });
411 return FunctionType::get(type.getContext(), inputs, results);
412 });
413 }
414};
415
416//===----------------------------------------------------------------------===//
417// Subgroup query ops.
418//===----------------------------------------------------------------------===//
419
420template <typename SubgroupOp>
421struct GPUSubgroupOpConversion final : ConvertOpToLLVMPattern<SubgroupOp> {
422 using ConvertOpToLLVMPattern<SubgroupOp>::ConvertOpToLLVMPattern;
424
425 LogicalResult
426 matchAndRewrite(SubgroupOp op, typename SubgroupOp::Adaptor adaptor,
427 ConversionPatternRewriter &rewriter) const final {
428 constexpr StringRef funcName = [] {
429 if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupIdOp>) {
430 return "_Z16get_sub_group_id";
431 } else if constexpr (std::is_same_v<SubgroupOp, gpu::LaneIdOp>) {
432 return "_Z22get_sub_group_local_id";
433 } else if constexpr (std::is_same_v<SubgroupOp, gpu::NumSubgroupsOp>) {
434 return "_Z18get_num_sub_groups";
435 } else if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupSizeOp>) {
436 return "_Z18get_sub_group_size";
437 }
438 }();
439
440 Operation *moduleOp =
441 op->template getParentWithTrait<OpTrait::SymbolTable>();
442 Type resultTy = rewriter.getI32Type();
443 LLVM::LLVMFuncOp func =
444 lookupOrCreateSPIRVFn(moduleOp, funcName, {}, resultTy,
445 /*isMemNone=*/false, /*isConvergent=*/false);
446
447 Location loc = op->getLoc();
448 Value result = createSPIRVBuiltinCall(loc, rewriter, func, {}).getResult();
449
450 Type indexTy = getTypeConverter()->getIndexType();
451 if (resultTy != indexTy) {
452 if (indexTy.getIntOrFloatBitWidth() < resultTy.getIntOrFloatBitWidth()) {
453 return failure();
454 }
455 result = LLVM::ZExtOp::create(rewriter, loc, indexTy, result);
456 }
457
458 rewriter.replaceOp(op, result);
459 return success();
460 }
461};
462
463//===----------------------------------------------------------------------===//
464// GPU To LLVM-SPV Pass.
465//===----------------------------------------------------------------------===//
466
467struct GPUToLLVMSPVConversionPass final
468 : impl::ConvertGpuOpsToLLVMSPVOpsBase<GPUToLLVMSPVConversionPass> {
469 using Base::Base;
470
471 void runOnOperation() final {
472 MLIRContext *context = &getContext();
473 RewritePatternSet patterns(context);
474
475 LowerToLLVMOptions options(context);
476 options.overrideIndexBitwidth(this->use64bitIndex ? 64 : 32);
477 LLVMTypeConverter converter(context, options);
478 LLVMConversionTarget target(*context);
479
480 // Force OpenCL address spaces when they are not present
481 {
482 MemorySpaceToOpenCLMemorySpaceConverter converter(context);
483 AttrTypeReplacer replacer;
484 replacer.addReplacement([&converter](BaseMemRefType origType)
485 -> std::optional<BaseMemRefType> {
486 return converter.convertType<BaseMemRefType>(origType);
487 });
488
489 replacer.recursivelyReplaceElementsIn(getOperation(),
490 /*replaceAttrs=*/true,
491 /*replaceLocs=*/false,
492 /*replaceTypes=*/true);
493 }
494
495 target.addIllegalOp<gpu::BarrierOp, gpu::BlockDimOp, gpu::BlockIdOp,
496 gpu::GPUFuncOp, gpu::GlobalIdOp, gpu::GridDimOp,
497 gpu::LaneIdOp, gpu::NumSubgroupsOp, gpu::ReturnOp,
498 gpu::ShuffleOp, gpu::SubgroupIdOp, gpu::SubgroupSizeOp,
499 gpu::ThreadIdOp, gpu::PrintfOp>();
500
501 populateGpuToLLVMSPVConversionPatterns(converter, patterns);
503 patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/2,
504 LLVM::cconv::CConv::SPIR_FUNC,
505 "_Z6printfPU3AS2Kcz");
506
507 if (failed(applyPartialConversion(getOperation(), target,
508 std::move(patterns))))
509 signalPassFailure();
510 }
511};
512} // namespace
513
514//===----------------------------------------------------------------------===//
515// GPU To LLVM-SPV Patterns.
516//===----------------------------------------------------------------------===//
517
518namespace mlir {
519namespace {
520static unsigned
521gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace addressSpace) {
522 constexpr spirv::ClientAPI clientAPI = spirv::ClientAPI::OpenCL;
523 return storageClassToAddressSpace(clientAPI,
524 addressSpaceToStorageClass(addressSpace));
525}
526} // namespace
527
529 const LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
530 patterns.add<GPUBarrierConversion, GPUReturnOpLowering, GPUShuffleConversion,
531 GPUSubgroupOpConversion<gpu::LaneIdOp>,
532 GPUSubgroupOpConversion<gpu::NumSubgroupsOp>,
533 GPUSubgroupOpConversion<gpu::SubgroupIdOp>,
534 GPUSubgroupOpConversion<gpu::SubgroupSizeOp>,
535 LaunchConfigOpConversion<gpu::BlockDimOp>,
536 LaunchConfigOpConversion<gpu::BlockIdOp>,
537 LaunchConfigOpConversion<gpu::GlobalIdOp>,
538 LaunchConfigOpConversion<gpu::GridDimOp>,
539 LaunchConfigOpConversion<gpu::ThreadIdOp>>(typeConverter);
540 MLIRContext *context = &typeConverter.getContext();
541 unsigned privateAddressSpace =
542 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Private);
543 unsigned localAddressSpace =
544 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Workgroup);
545 OperationName llvmFuncOpName(LLVM::LLVMFuncOp::getOperationName(), context);
546 StringAttr kernelBlockSizeAttributeName =
547 LLVM::LLVMFuncOp::getReqdWorkGroupSizeAttrName(llvmFuncOpName);
548 patterns.add<GPUFuncOpLowering>(
549 typeConverter,
551 privateAddressSpace, localAddressSpace,
552 /*kernelAttributeName=*/{}, kernelBlockSizeAttributeName,
553 /*kernelClusterSizeAttributeName=*/{}, LLVM::CConv::SPIR_KERNEL,
554 LLVM::CConv::SPIR_FUNC,
555 /*encodeWorkgroupAttributionsAsArguments=*/true});
556}
557
560 gpuAddressSpaceToOCLAddressSpace);
561}
562} // namespace mlir
return success()
static LLVM::CallOp createSPIRVBuiltinCall(Location loc, ConversionPatternRewriter &rewriter, LLVM::LLVMFuncOp func, ValueRange args)
static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable, StringRef name, ArrayRef< Type > paramTypes, Type resultType, bool isMemNone, bool isConvergent)
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
b getContext())
static llvm::ManagedStatic< PassManagerOptions > options
ArrayRef< int64_t > getShape() const
Returns the shape of this memref type.
Attribute getMemorySpace() const
Returns the memory space in which data referred to by this memref resides.
Type getElementType() const
Returns the element type of this memref type.
Utility class for operation conversions targeting the LLVM dialect that match exactly one source oper...
Definition Pattern.h:227
ConvertOpToLLVMPattern(const LLVMTypeConverter &typeConverter, PatternBenefit benefit=1)
Definition Pattern.h:233
Base class for operation conversions targeting the LLVM IR dialect.
Definition Pattern.h:106
const LLVMTypeConverter * getTypeConverter() const
Definition Pattern.cpp:29
Conversion from types to the LLVM IR dialect.
MLIRContext & getContext() const
Returns the MLIR context.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
This class helps build Operations.
Definition Builders.h:209
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition Operation.h:712
Operation * getParentWithTrait()
Returns the closest surrounding parent operation with trait Trait.
Definition Operation.h:274
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:241
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition Operation.h:256
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
static Operation * lookupSymbolIn(Operation *op, StringAttr symbol)
Returns the operation registered with the given symbol name with the regions of 'symbolTableOp'.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
Definition Types.cpp:124
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:389
Type getType() const
Return the type of this value.
Definition Value.h:105
void recursivelyReplaceElementsIn(Operation *op, bool replaceAttrs=true, bool replaceLocs=false, bool replaceTypes=false)
Replace the elements within the given operation, and all nested operations.
void addReplacement(ReplaceFn< Attribute > fn)
AttrTypeReplacerBase.
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:717
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition Matchers.h:527
unsigned storageClassToAddressSpace(spirv::ClientAPI clientAPI, spirv::StorageClass storageClass)
void populateGpuToLLVMSPVConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns)
spirv::StorageClass addressSpaceToStorageClass(gpu::AddressSpace addressSpace)
void populateGpuMemorySpaceAttributeConversions(TypeConverter &typeConverter, const MemorySpaceMapping &mapping)
Populates memory space attribute conversion rules for lowering gpu.address_space to integer values.
llvm::TypeSwitch< T, ResultT > TypeSwitch
Definition LLVM.h:139