MLIR 23.0.0git
GPUToLLVMSPV.cpp
Go to the documentation of this file.
1//===- GPUToLLVMSPV.cpp - Convert GPU operations to LLVM dialect ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
25#include "mlir/IR/Matchers.h"
27#include "mlir/IR/SymbolTable.h"
28#include "mlir/Pass/Pass.h"
29#include "mlir/Support/LLVM.h"
31
32#include "llvm/ADT/TypeSwitch.h"
33#include "llvm/Support/FormatVariadic.h"
34
35#define DEBUG_TYPE "gpu-to-llvm-spv"
36
37using namespace mlir;
38
39namespace mlir {
40#define GEN_PASS_DEF_CONVERTGPUOPSTOLLVMSPVOPS
41#include "mlir/Conversion/Passes.h.inc"
42} // namespace mlir
43
44//===----------------------------------------------------------------------===//
45// Helper Functions
46//===----------------------------------------------------------------------===//
47
48static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
49 StringRef name,
50 ArrayRef<Type> paramTypes,
51 Type resultType, bool isMemNone,
52 bool isConvergent) {
53 auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
54 SymbolTable::lookupSymbolIn(symbolTable, name));
55 if (!func) {
56 OpBuilder b(symbolTable->getRegion(0));
57 func = LLVM::LLVMFuncOp::create(
58 b, symbolTable->getLoc(), name,
59 LLVM::LLVMFunctionType::get(resultType, paramTypes));
60 func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
61 func.setNoUnwind(true);
62 func.setWillReturn(true);
63
64 if (isMemNone) {
65 // no externally observable effects
66 constexpr auto noModRef = mlir::LLVM::ModRefInfo::NoModRef;
67 auto memAttr = b.getAttr<LLVM::MemoryEffectsAttr>(
68 /*other=*/noModRef,
69 /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef,
70 /*errnoMem=*/noModRef,
71 /*targetMem0=*/noModRef,
72 /*targetMem1=*/noModRef);
73 func.setMemoryEffectsAttr(memAttr);
74 }
75
76 func.setConvergent(isConvergent);
77 }
78 return func;
79}
80
81static LLVM::CallOp createSPIRVBuiltinCall(Location loc,
82 ConversionPatternRewriter &rewriter,
83 LLVM::LLVMFuncOp func,
84 ValueRange args) {
85 auto call = LLVM::CallOp::create(rewriter, loc, func, args);
86 call.setCConv(func.getCConv());
87 call.setConvergentAttr(func.getConvergentAttr());
88 call.setNoUnwindAttr(func.getNoUnwindAttr());
89 call.setWillReturnAttr(func.getWillReturnAttr());
90 call.setMemoryEffectsAttr(func.getMemoryEffectsAttr());
91 return call;
92}
93
94namespace {
95//===----------------------------------------------------------------------===//
96// Barriers
97//===----------------------------------------------------------------------===//
98
99/// Replace `gpu.barrier` with an `llvm.call` to `barrier` using
100/// `CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE`, ensuring that all memory
101/// accesses are visible to all work-items in the work-group.
102/// ```
103/// // gpu.barrier
104/// // 3 = CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE
105/// %c3 = llvm.mlir.constant(3: i32) : i32
106/// llvm.call spir_funccc @_Z7barrierj(%c3) : (i32) -> ()
107/// ```
108struct GPUBarrierConversion final : ConvertOpToLLVMPattern<gpu::BarrierOp> {
110
111 LogicalResult
112 matchAndRewrite(gpu::BarrierOp op, OpAdaptor adaptor,
113 ConversionPatternRewriter &rewriter) const final {
114 constexpr StringLiteral funcName = "_Z7barrierj";
115
116 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
117 assert(moduleOp && "Expecting module");
118 Type flagTy = rewriter.getI32Type();
119 Type voidTy = rewriter.getType<LLVM::LLVMVoidType>();
120 LLVM::LLVMFuncOp func =
121 lookupOrCreateSPIRVFn(moduleOp, funcName, flagTy, voidTy,
122 /*isMemNone=*/false, /*isConvergent=*/true);
123
124 // Values used by SPIR-V backend to represent `CLK_LOCAL_MEM_FENCE` and
125 // `CLK_GLOBAL_MEM_FENCE`. See `llvm/lib/Target/SPIRV/SPIRVBuiltins.td`.
126 constexpr int64_t localMemFenceFlag = 1;
127 constexpr int64_t globalMemFenceFlag = 2;
128 int64_t memFenceFlag = 0;
129 std::optional<ArrayAttr> addressSpaces = adaptor.getAddressSpaces();
130 if (addressSpaces) {
131 for (Attribute attr : addressSpaces.value()) {
132 auto addressSpace = cast<gpu::AddressSpaceAttr>(attr).getValue();
133 switch (addressSpace) {
134 case gpu::AddressSpace::Global:
135 memFenceFlag = memFenceFlag | globalMemFenceFlag;
136 break;
137 case gpu::AddressSpace::Workgroup:
138 memFenceFlag = memFenceFlag | localMemFenceFlag;
139 break;
140 case gpu::AddressSpace::Private:
141 break;
142 }
143 }
144 } else {
145 memFenceFlag = localMemFenceFlag | globalMemFenceFlag;
146 }
147 Location loc = op->getLoc();
148 Value flag = LLVM::ConstantOp::create(rewriter, loc, flagTy, memFenceFlag);
149 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, flag));
150 return success();
151 }
152};
153
154//===----------------------------------------------------------------------===//
155// SPIR-V Builtins
156//===----------------------------------------------------------------------===//
157
158/// Replace `gpu.*` with an `llvm.call` to the corresponding SPIR-V builtin with
159/// a constant argument for the `dimension` attribute. Return type will depend
160/// on index width option:
161/// ```
162/// // %thread_id_y = gpu.thread_id y
163/// %c1 = llvm.mlir.constant(1: i32) : i32
164/// %0 = llvm.call spir_funccc @_Z12get_local_idj(%c1) : (i32) -> i64
165/// ```
166struct LaunchConfigConversion : ConvertToLLVMPattern {
167 LaunchConfigConversion(StringRef funcName, StringRef rootOpName,
168 MLIRContext *context,
169 const LLVMTypeConverter &typeConverter,
170 PatternBenefit benefit)
171 : ConvertToLLVMPattern(rootOpName, context, typeConverter, benefit),
172 funcName(funcName) {}
173
174 virtual gpu::Dimension getDimension(Operation *op) const = 0;
175
176 LogicalResult
177 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
178 ConversionPatternRewriter &rewriter) const final {
179 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
180 assert(moduleOp && "Expecting module");
181 Type dimTy = rewriter.getI32Type();
182 Type indexTy = getTypeConverter()->getIndexType();
183 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy,
184 indexTy, /*isMemNone=*/true,
185 /*isConvergent=*/false);
186
187 Location loc = op->getLoc();
188 gpu::Dimension dim = getDimension(op);
189 Value dimVal = LLVM::ConstantOp::create(rewriter, loc, dimTy,
190 static_cast<int64_t>(dim));
191 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, dimVal));
192 return success();
193 }
194
195 StringRef funcName;
196};
197
198template <typename SourceOp>
199struct LaunchConfigOpConversion final : LaunchConfigConversion {
200 static StringRef getFuncName();
201
202 explicit LaunchConfigOpConversion(const LLVMTypeConverter &typeConverter,
203 PatternBenefit benefit = 1)
204 : LaunchConfigConversion(getFuncName(), SourceOp::getOperationName(),
205 &typeConverter.getContext(), typeConverter,
206 benefit) {}
207
208 gpu::Dimension getDimension(Operation *op) const final {
209 return cast<SourceOp>(op).getDimension();
210 }
211};
212
213template <>
214StringRef LaunchConfigOpConversion<gpu::BlockIdOp>::getFuncName() {
215 return "_Z12get_group_idj";
216}
217
218template <>
219StringRef LaunchConfigOpConversion<gpu::GridDimOp>::getFuncName() {
220 return "_Z14get_num_groupsj";
221}
222
223template <>
224StringRef LaunchConfigOpConversion<gpu::BlockDimOp>::getFuncName() {
225 return "_Z14get_local_sizej";
226}
227
228template <>
229StringRef LaunchConfigOpConversion<gpu::ThreadIdOp>::getFuncName() {
230 return "_Z12get_local_idj";
231}
232
233template <>
234StringRef LaunchConfigOpConversion<gpu::GlobalIdOp>::getFuncName() {
235 return "_Z13get_global_idj";
236}
237
238//===----------------------------------------------------------------------===//
239// Shuffles
240//===----------------------------------------------------------------------===//
241
242/// Replace `gpu.shuffle` with an `llvm.call` to the corresponding SPIR-V
243/// builtin for `shuffleResult`, keeping `value` and `offset` arguments, and a
244/// `true` constant for the `valid` result type. Conversion will only take place
245/// if `width` is constant and equal to the `subgroup` pass option:
246/// ```
247/// // %0 = gpu.shuffle idx %value, %offset, %width : f64
248/// %0 = llvm.call spir_funccc @_Z17sub_group_shuffledj(%value, %offset)
249/// : (f64, i32) -> f64
250/// ```
251struct GPUShuffleConversion final : ConvertOpToLLVMPattern<gpu::ShuffleOp> {
253
254 static StringRef getBaseName(gpu::ShuffleMode mode) {
255 switch (mode) {
256 case gpu::ShuffleMode::IDX:
257 return "sub_group_shuffle";
258 case gpu::ShuffleMode::XOR:
259 return "sub_group_shuffle_xor";
260 case gpu::ShuffleMode::UP:
261 return "sub_group_shuffle_up";
262 case gpu::ShuffleMode::DOWN:
263 return "sub_group_shuffle_down";
264 }
265 llvm_unreachable("Unhandled shuffle mode");
266 }
267
268 static std::optional<StringRef> getTypeMangling(Type type) {
270 .Case([](Float16Type) { return "Dhj"; })
271 .Case([](Float32Type) { return "fj"; })
272 .Case([](Float64Type) { return "dj"; })
273 .Case([](IntegerType intTy) -> std::optional<StringRef> {
274 switch (intTy.getWidth()) {
275 case 8:
276 return "cj";
277 case 16:
278 return "sj";
279 case 32:
280 return "ij";
281 case 64:
282 return "lj";
283 }
284 return std::nullopt;
285 })
286 .Default(std::nullopt);
287 }
288
289 static std::optional<std::string> getFuncName(gpu::ShuffleMode mode,
290 Type type) {
291 StringRef baseName = getBaseName(mode);
292 std::optional<StringRef> typeMangling = getTypeMangling(type);
293 if (!typeMangling)
294 return std::nullopt;
295 return llvm::formatv("_Z{}{}{}", baseName.size(), baseName,
296 typeMangling.value());
297 }
298
299 /// Get the subgroup size from the target or return a default.
300 static std::optional<int> getSubgroupSize(Operation *op) {
301 auto parentFunc = op->getParentOfType<LLVM::LLVMFuncOp>();
302 if (!parentFunc)
303 return std::nullopt;
304 return parentFunc.getIntelReqdSubGroupSize();
305 }
306
307 static bool hasValidWidth(gpu::ShuffleOp op) {
308 llvm::APInt val;
309 Value width = op.getWidth();
310 return matchPattern(width, m_ConstantInt(&val)) &&
311 val == getSubgroupSize(op);
312 }
313
314 static Value bitcastOrExtBeforeShuffle(Value oldVal, Location loc,
315 ConversionPatternRewriter &rewriter) {
316 return TypeSwitch<Type, Value>(oldVal.getType())
317 .Case([&](BFloat16Type) {
318 return LLVM::BitcastOp::create(rewriter, loc, rewriter.getI16Type(),
319 oldVal);
320 })
321 .Case([&](IntegerType intTy) -> Value {
322 if (intTy.getWidth() == 1)
323 return LLVM::ZExtOp::create(rewriter, loc, rewriter.getI8Type(),
324 oldVal);
325 return oldVal;
326 })
327 .Default(oldVal);
328 }
329
330 static Value bitcastOrTruncAfterShuffle(Value oldVal, Type newTy,
331 Location loc,
332 ConversionPatternRewriter &rewriter) {
333 return TypeSwitch<Type, Value>(newTy)
334 .Case([&](BFloat16Type) {
335 return LLVM::BitcastOp::create(rewriter, loc, newTy, oldVal);
336 })
337 .Case([&](IntegerType intTy) -> Value {
338 if (intTy.getWidth() == 1)
339 return LLVM::TruncOp::create(rewriter, loc, newTy, oldVal);
340 return oldVal;
341 })
342 .Default(oldVal);
343 }
344
345 LogicalResult
346 matchAndRewrite(gpu::ShuffleOp op, OpAdaptor adaptor,
347 ConversionPatternRewriter &rewriter) const final {
348 if (!hasValidWidth(op))
349 return rewriter.notifyMatchFailure(
350 op, "shuffle width and subgroup size mismatch");
351
352 Location loc = op->getLoc();
353 Value inValue =
354 bitcastOrExtBeforeShuffle(adaptor.getValue(), loc, rewriter);
355 std::optional<std::string> funcName =
356 getFuncName(op.getMode(), inValue.getType());
357 if (!funcName)
358 return rewriter.notifyMatchFailure(op, "unsupported value type");
359
360 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
361 assert(moduleOp && "Expecting module");
362 Type valueType = inValue.getType();
363 Type offsetType = adaptor.getOffset().getType();
364 Type resultType = valueType;
365 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
366 moduleOp, funcName.value(), {valueType, offsetType}, resultType,
367 /*isMemNone=*/false, /*isConvergent=*/true);
368
369 std::array<Value, 2> args{inValue, adaptor.getOffset()};
370 Value result =
371 createSPIRVBuiltinCall(loc, rewriter, func, args).getResult();
372 Value resultOrConversion =
373 bitcastOrTruncAfterShuffle(result, op.getType(0), loc, rewriter);
374
375 Value trueVal =
376 LLVM::ConstantOp::create(rewriter, loc, rewriter.getI1Type(), true);
377 rewriter.replaceOp(op, {resultOrConversion, trueVal});
378 return success();
379 }
380};
381
382class MemorySpaceToOpenCLMemorySpaceConverter final : public TypeConverter {
383public:
384 MemorySpaceToOpenCLMemorySpaceConverter(MLIRContext *ctx) {
385 addConversion([](Type t) { return t; });
386 addConversion([ctx](BaseMemRefType memRefType) -> std::optional<Type> {
387 // Attach global addr space attribute to memrefs with no addr space attr
388 Attribute memSpaceAttr = memRefType.getMemorySpace();
389 if (memSpaceAttr)
390 return std::nullopt;
391
392 unsigned globalAddrspace = storageClassToAddressSpace(
393 spirv::ClientAPI::OpenCL, spirv::StorageClass::CrossWorkgroup);
394 Attribute addrSpaceAttr =
395 IntegerAttr::get(IntegerType::get(ctx, 64), globalAddrspace);
396 if (auto rankedType = dyn_cast<MemRefType>(memRefType)) {
397 return MemRefType::get(memRefType.getShape(),
398 memRefType.getElementType(),
399 rankedType.getLayout(), addrSpaceAttr);
400 }
401 return UnrankedMemRefType::get(memRefType.getElementType(),
402 addrSpaceAttr);
403 });
404 addConversion([this](FunctionType type) {
405 auto inputs = llvm::map_to_vector(
406 type.getInputs(), [this](Type ty) { return convertType(ty); });
407 auto results = llvm::map_to_vector(
408 type.getResults(), [this](Type ty) { return convertType(ty); });
409 return FunctionType::get(type.getContext(), inputs, results);
410 });
411 }
412};
413
414//===----------------------------------------------------------------------===//
415// Subgroup query ops.
416//===----------------------------------------------------------------------===//
417
418template <typename SubgroupOp>
419struct GPUSubgroupOpConversion final : ConvertOpToLLVMPattern<SubgroupOp> {
420 using ConvertOpToLLVMPattern<SubgroupOp>::ConvertOpToLLVMPattern;
422
423 LogicalResult
424 matchAndRewrite(SubgroupOp op, typename SubgroupOp::Adaptor adaptor,
425 ConversionPatternRewriter &rewriter) const final {
426 constexpr StringRef funcName = [] {
427 if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupIdOp>) {
428 return "_Z16get_sub_group_id";
429 } else if constexpr (std::is_same_v<SubgroupOp, gpu::LaneIdOp>) {
430 return "_Z22get_sub_group_local_id";
431 } else if constexpr (std::is_same_v<SubgroupOp, gpu::NumSubgroupsOp>) {
432 return "_Z18get_num_sub_groups";
433 } else if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupSizeOp>) {
434 return "_Z18get_sub_group_size";
435 }
436 }();
437
438 Operation *moduleOp =
439 op->template getParentWithTrait<OpTrait::SymbolTable>();
440 Type resultTy = rewriter.getI32Type();
441 LLVM::LLVMFuncOp func =
442 lookupOrCreateSPIRVFn(moduleOp, funcName, {}, resultTy,
443 /*isMemNone=*/false, /*isConvergent=*/false);
444
445 Location loc = op->getLoc();
446 Value result = createSPIRVBuiltinCall(loc, rewriter, func, {}).getResult();
447
448 Type indexTy = getTypeConverter()->getIndexType();
449 if (resultTy != indexTy) {
450 if (indexTy.getIntOrFloatBitWidth() < resultTy.getIntOrFloatBitWidth()) {
451 return failure();
452 }
453 result = LLVM::ZExtOp::create(rewriter, loc, indexTy, result);
454 }
455
456 rewriter.replaceOp(op, result);
457 return success();
458 }
459};
460
461//===----------------------------------------------------------------------===//
462// GPU To LLVM-SPV Pass.
463//===----------------------------------------------------------------------===//
464
465struct GPUToLLVMSPVConversionPass final
466 : impl::ConvertGpuOpsToLLVMSPVOpsBase<GPUToLLVMSPVConversionPass> {
467 using Base::Base;
468
469 void runOnOperation() final {
470 MLIRContext *context = &getContext();
471 RewritePatternSet patterns(context);
472
473 LowerToLLVMOptions options(context);
474 options.overrideIndexBitwidth(this->use64bitIndex ? 64 : 32);
475 LLVMTypeConverter converter(context, options);
476 LLVMConversionTarget target(*context);
477
478 // Force OpenCL address spaces when they are not present
479 {
480 MemorySpaceToOpenCLMemorySpaceConverter converter(context);
481 AttrTypeReplacer replacer;
482 replacer.addReplacement([&converter](BaseMemRefType origType)
483 -> std::optional<BaseMemRefType> {
484 return converter.convertType<BaseMemRefType>(origType);
485 });
486
487 replacer.recursivelyReplaceElementsIn(getOperation(),
488 /*replaceAttrs=*/true,
489 /*replaceLocs=*/false,
490 /*replaceTypes=*/true);
491 }
492
493 target.addIllegalOp<gpu::BarrierOp, gpu::BlockDimOp, gpu::BlockIdOp,
494 gpu::GPUFuncOp, gpu::GlobalIdOp, gpu::GridDimOp,
495 gpu::LaneIdOp, gpu::NumSubgroupsOp, gpu::ReturnOp,
496 gpu::ShuffleOp, gpu::SubgroupIdOp, gpu::SubgroupSizeOp,
497 gpu::ThreadIdOp, gpu::PrintfOp>();
498
501 patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/2,
502 LLVM::cconv::CConv::SPIR_FUNC,
503 "_Z6printfPU3AS2Kcz");
504
505 if (failed(applyPartialConversion(getOperation(), target,
506 std::move(patterns))))
507 signalPassFailure();
508 }
509};
510} // namespace
511
512//===----------------------------------------------------------------------===//
513// GPU To LLVM-SPV Patterns.
514//===----------------------------------------------------------------------===//
515
516namespace mlir {
517namespace {
518static unsigned
519gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace addressSpace) {
520 constexpr spirv::ClientAPI clientAPI = spirv::ClientAPI::OpenCL;
521 return storageClassToAddressSpace(clientAPI,
522 addressSpaceToStorageClass(addressSpace));
523}
524} // namespace
525
527 const LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
528 patterns.add<GPUBarrierConversion, GPUReturnOpLowering, GPUShuffleConversion,
529 GPUSubgroupOpConversion<gpu::LaneIdOp>,
530 GPUSubgroupOpConversion<gpu::NumSubgroupsOp>,
531 GPUSubgroupOpConversion<gpu::SubgroupIdOp>,
532 GPUSubgroupOpConversion<gpu::SubgroupSizeOp>,
533 LaunchConfigOpConversion<gpu::BlockDimOp>,
534 LaunchConfigOpConversion<gpu::BlockIdOp>,
535 LaunchConfigOpConversion<gpu::GlobalIdOp>,
536 LaunchConfigOpConversion<gpu::GridDimOp>,
537 LaunchConfigOpConversion<gpu::ThreadIdOp>>(typeConverter);
538 MLIRContext *context = &typeConverter.getContext();
539 unsigned privateAddressSpace =
540 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Private);
541 unsigned localAddressSpace =
542 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Workgroup);
543 OperationName llvmFuncOpName(LLVM::LLVMFuncOp::getOperationName(), context);
544 StringAttr kernelBlockSizeAttributeName =
545 LLVM::LLVMFuncOp::getReqdWorkGroupSizeAttrName(llvmFuncOpName);
547 typeConverter,
549 privateAddressSpace, localAddressSpace,
550 /*kernelAttributeName=*/{}, kernelBlockSizeAttributeName,
551 /*kernelClusterSizeAttributeName=*/{}, LLVM::CConv::SPIR_KERNEL,
552 LLVM::CConv::SPIR_FUNC,
553 /*encodeWorkgroupAttributionsAsArguments=*/true});
554}
555
558 gpuAddressSpaceToOCLAddressSpace);
559}
560} // namespace mlir
return success()
static LLVM::CallOp createSPIRVBuiltinCall(Location loc, ConversionPatternRewriter &rewriter, LLVM::LLVMFuncOp func, ValueRange args)
static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable, StringRef name, ArrayRef< Type > paramTypes, Type resultType, bool isMemNone, bool isConvergent)
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
b getContext())
static llvm::ManagedStatic< PassManagerOptions > options
ArrayRef< int64_t > getShape() const
Returns the shape of this memref type.
Attribute getMemorySpace() const
Returns the memory space in which data referred to by this memref resides.
Type getElementType() const
Returns the element type of this memref type.
Utility class for operation conversions targeting the LLVM dialect that match exactly one source oper...
Definition Pattern.h:216
ConvertOpToLLVMPattern(const LLVMTypeConverter &typeConverter, PatternBenefit benefit=1)
Definition Pattern.h:222
Base class for operation conversions targeting the LLVM IR dialect.
Definition Pattern.h:95
const LLVMTypeConverter * getTypeConverter() const
Definition Pattern.cpp:27
Conversion from types to the LLVM IR dialect.
MLIRContext & getContext() const
Returns the MLIR context.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
This class helps build Operations.
Definition Builders.h:207
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition Operation.h:686
Operation * getParentWithTrait()
Returns the closest surrounding parent operation with trait Trait.
Definition Operation.h:248
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:223
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition Operation.h:238
static Operation * lookupSymbolIn(Operation *op, StringAttr symbol)
Returns the operation registered with the given symbol name with the regions of 'symbolTableOp'.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
Definition Types.cpp:122
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
Type getType() const
Return the type of this value.
Definition Value.h:105
void recursivelyReplaceElementsIn(Operation *op, bool replaceAttrs=true, bool replaceLocs=false, bool replaceTypes=false)
Replace the elements within the given operation, and all nested operations.
void addReplacement(ReplaceFn< Attribute > fn)
AttrTypeReplacerBase.
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:573
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition Matchers.h:527
unsigned storageClassToAddressSpace(spirv::ClientAPI clientAPI, spirv::StorageClass storageClass)
void populateGpuToLLVMSPVConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns)
spirv::StorageClass addressSpaceToStorageClass(gpu::AddressSpace addressSpace)
const FrozenRewritePatternSet & patterns
void populateGpuMemorySpaceAttributeConversions(TypeConverter &typeConverter, const MemorySpaceMapping &mapping)
Populates memory space attribute conversion rules for lowering gpu.address_space to integer values.
llvm::TypeSwitch< T, ResultT > TypeSwitch
Definition LLVM.h:136