MLIR 22.0.0git
GPUToLLVMSPV.cpp
Go to the documentation of this file.
1//===- GPUToLLVMSPV.cpp - Convert GPU operations to LLVM dialect ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
25#include "mlir/IR/Matchers.h"
27#include "mlir/IR/SymbolTable.h"
28#include "mlir/Pass/Pass.h"
29#include "mlir/Support/LLVM.h"
31
32#include "llvm/ADT/TypeSwitch.h"
33#include "llvm/Support/FormatVariadic.h"
34
35#define DEBUG_TYPE "gpu-to-llvm-spv"
36
37using namespace mlir;
38
39namespace mlir {
40#define GEN_PASS_DEF_CONVERTGPUOPSTOLLVMSPVOPS
41#include "mlir/Conversion/Passes.h.inc"
42} // namespace mlir
43
44//===----------------------------------------------------------------------===//
45// Helper Functions
46//===----------------------------------------------------------------------===//
47
48static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
49 StringRef name,
50 ArrayRef<Type> paramTypes,
51 Type resultType, bool isMemNone,
52 bool isConvergent) {
53 auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
54 SymbolTable::lookupSymbolIn(symbolTable, name));
55 if (!func) {
56 OpBuilder b(symbolTable->getRegion(0));
57 func = LLVM::LLVMFuncOp::create(
58 b, symbolTable->getLoc(), name,
59 LLVM::LLVMFunctionType::get(resultType, paramTypes));
60 func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
61 func.setNoUnwind(true);
62 func.setWillReturn(true);
63
64 if (isMemNone) {
65 // no externally observable effects
66 constexpr auto noModRef = mlir::LLVM::ModRefInfo::NoModRef;
67 auto memAttr = b.getAttr<LLVM::MemoryEffectsAttr>(
68 /*other=*/noModRef,
69 /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef,
70 /*errnoMem=*/noModRef,
71 /*targetMem0=*/noModRef,
72 /*targetMem1=*/noModRef);
73 func.setMemoryEffectsAttr(memAttr);
74 }
75
76 func.setConvergent(isConvergent);
77 }
78 return func;
79}
80
81static LLVM::CallOp createSPIRVBuiltinCall(Location loc,
82 ConversionPatternRewriter &rewriter,
83 LLVM::LLVMFuncOp func,
84 ValueRange args) {
85 auto call = LLVM::CallOp::create(rewriter, loc, func, args);
86 call.setCConv(func.getCConv());
87 call.setConvergentAttr(func.getConvergentAttr());
88 call.setNoUnwindAttr(func.getNoUnwindAttr());
89 call.setWillReturnAttr(func.getWillReturnAttr());
90 call.setMemoryEffectsAttr(func.getMemoryEffectsAttr());
91 return call;
92}
93
94namespace {
95//===----------------------------------------------------------------------===//
96// Barriers
97//===----------------------------------------------------------------------===//
98
99/// Replace `gpu.barrier` with an `llvm.call` to `barrier` using
100/// `CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE`, ensuring that all memory
101/// accesses are visible to all work-items in the work-group.
102/// ```
103/// // gpu.barrier
104/// // 3 = CLK_LOCAL_MEM_FENCE | CLK_GLOBAL_MEM_FENCE
105/// %c3 = llvm.mlir.constant(3: i32) : i32
106/// llvm.call spir_funccc @_Z7barrierj(%c3) : (i32) -> ()
107/// ```
108struct GPUBarrierConversion final : ConvertOpToLLVMPattern<gpu::BarrierOp> {
110
111 LogicalResult
112 matchAndRewrite(gpu::BarrierOp op, OpAdaptor adaptor,
113 ConversionPatternRewriter &rewriter) const final {
114 constexpr StringLiteral funcName = "_Z7barrierj";
115
116 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
117 assert(moduleOp && "Expecting module");
118 Type flagTy = rewriter.getI32Type();
119 Type voidTy = rewriter.getType<LLVM::LLVMVoidType>();
120 LLVM::LLVMFuncOp func =
121 lookupOrCreateSPIRVFn(moduleOp, funcName, flagTy, voidTy,
122 /*isMemNone=*/false, /*isConvergent=*/true);
123
124 // Values used by SPIR-V backend to represent a combination of
125 // `CLK_LOCAL_MEM_FENCE` and `CLK_GLOBAL_MEM_FENCE`.
126 // See `llvm/lib/Target/SPIRV/SPIRVBuiltins.td`.
127 constexpr int64_t localMemFenceFlag = 1;
128 constexpr int64_t globalMemFenceFlag = 2;
129 constexpr int64_t localGlobalMemFenceFlag =
130 localMemFenceFlag | globalMemFenceFlag;
131 Location loc = op->getLoc();
132 Value flag = LLVM::ConstantOp::create(rewriter, loc, flagTy,
133 localGlobalMemFenceFlag);
134 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, flag));
135 return success();
136 }
137};
138
139//===----------------------------------------------------------------------===//
140// SPIR-V Builtins
141//===----------------------------------------------------------------------===//
142
143/// Replace `gpu.*` with an `llvm.call` to the corresponding SPIR-V builtin with
144/// a constant argument for the `dimension` attribute. Return type will depend
145/// on index width option:
146/// ```
147/// // %thread_id_y = gpu.thread_id y
148/// %c1 = llvm.mlir.constant(1: i32) : i32
149/// %0 = llvm.call spir_funccc @_Z12get_local_idj(%c1) : (i32) -> i64
150/// ```
151struct LaunchConfigConversion : ConvertToLLVMPattern {
152 LaunchConfigConversion(StringRef funcName, StringRef rootOpName,
153 MLIRContext *context,
154 const LLVMTypeConverter &typeConverter,
155 PatternBenefit benefit)
156 : ConvertToLLVMPattern(rootOpName, context, typeConverter, benefit),
157 funcName(funcName) {}
158
159 virtual gpu::Dimension getDimension(Operation *op) const = 0;
160
161 LogicalResult
162 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
163 ConversionPatternRewriter &rewriter) const final {
164 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
165 assert(moduleOp && "Expecting module");
166 Type dimTy = rewriter.getI32Type();
167 Type indexTy = getTypeConverter()->getIndexType();
168 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy,
169 indexTy, /*isMemNone=*/true,
170 /*isConvergent=*/false);
171
172 Location loc = op->getLoc();
173 gpu::Dimension dim = getDimension(op);
174 Value dimVal = LLVM::ConstantOp::create(rewriter, loc, dimTy,
175 static_cast<int64_t>(dim));
176 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, dimVal));
177 return success();
178 }
179
180 StringRef funcName;
181};
182
183template <typename SourceOp>
184struct LaunchConfigOpConversion final : LaunchConfigConversion {
185 static StringRef getFuncName();
186
187 explicit LaunchConfigOpConversion(const LLVMTypeConverter &typeConverter,
188 PatternBenefit benefit = 1)
189 : LaunchConfigConversion(getFuncName(), SourceOp::getOperationName(),
190 &typeConverter.getContext(), typeConverter,
191 benefit) {}
192
193 gpu::Dimension getDimension(Operation *op) const final {
194 return cast<SourceOp>(op).getDimension();
195 }
196};
197
198template <>
199StringRef LaunchConfigOpConversion<gpu::BlockIdOp>::getFuncName() {
200 return "_Z12get_group_idj";
201}
202
203template <>
204StringRef LaunchConfigOpConversion<gpu::GridDimOp>::getFuncName() {
205 return "_Z14get_num_groupsj";
206}
207
208template <>
209StringRef LaunchConfigOpConversion<gpu::BlockDimOp>::getFuncName() {
210 return "_Z14get_local_sizej";
211}
212
213template <>
214StringRef LaunchConfigOpConversion<gpu::ThreadIdOp>::getFuncName() {
215 return "_Z12get_local_idj";
216}
217
218template <>
219StringRef LaunchConfigOpConversion<gpu::GlobalIdOp>::getFuncName() {
220 return "_Z13get_global_idj";
221}
222
223//===----------------------------------------------------------------------===//
224// Shuffles
225//===----------------------------------------------------------------------===//
226
227/// Replace `gpu.shuffle` with an `llvm.call` to the corresponding SPIR-V
228/// builtin for `shuffleResult`, keeping `value` and `offset` arguments, and a
229/// `true` constant for the `valid` result type. Conversion will only take place
230/// if `width` is constant and equal to the `subgroup` pass option:
231/// ```
232/// // %0 = gpu.shuffle idx %value, %offset, %width : f64
233/// %0 = llvm.call spir_funccc @_Z17sub_group_shuffledj(%value, %offset)
234/// : (f64, i32) -> f64
235/// ```
236struct GPUShuffleConversion final : ConvertOpToLLVMPattern<gpu::ShuffleOp> {
238
239 static StringRef getBaseName(gpu::ShuffleMode mode) {
240 switch (mode) {
241 case gpu::ShuffleMode::IDX:
242 return "sub_group_shuffle";
243 case gpu::ShuffleMode::XOR:
244 return "sub_group_shuffle_xor";
245 case gpu::ShuffleMode::UP:
246 return "sub_group_shuffle_up";
247 case gpu::ShuffleMode::DOWN:
248 return "sub_group_shuffle_down";
249 }
250 llvm_unreachable("Unhandled shuffle mode");
251 }
252
253 static std::optional<StringRef> getTypeMangling(Type type) {
255 .Case<Float16Type>([](auto) { return "Dhj"; })
256 .Case<Float32Type>([](auto) { return "fj"; })
257 .Case<Float64Type>([](auto) { return "dj"; })
258 .Case<IntegerType>([](auto intTy) -> std::optional<StringRef> {
259 switch (intTy.getWidth()) {
260 case 8:
261 return "cj";
262 case 16:
263 return "sj";
264 case 32:
265 return "ij";
266 case 64:
267 return "lj";
268 }
269 return std::nullopt;
270 })
271 .Default(std::nullopt);
272 }
273
274 static std::optional<std::string> getFuncName(gpu::ShuffleMode mode,
275 Type type) {
276 StringRef baseName = getBaseName(mode);
277 std::optional<StringRef> typeMangling = getTypeMangling(type);
278 if (!typeMangling)
279 return std::nullopt;
280 return llvm::formatv("_Z{}{}{}", baseName.size(), baseName,
281 typeMangling.value());
282 }
283
284 /// Get the subgroup size from the target or return a default.
285 static std::optional<int> getSubgroupSize(Operation *op) {
286 auto parentFunc = op->getParentOfType<LLVM::LLVMFuncOp>();
287 if (!parentFunc)
288 return std::nullopt;
289 return parentFunc.getIntelReqdSubGroupSize();
290 }
291
292 static bool hasValidWidth(gpu::ShuffleOp op) {
293 llvm::APInt val;
294 Value width = op.getWidth();
295 return matchPattern(width, m_ConstantInt(&val)) &&
296 val == getSubgroupSize(op);
297 }
298
299 static Value bitcastOrExtBeforeShuffle(Value oldVal, Location loc,
300 ConversionPatternRewriter &rewriter) {
301 return TypeSwitch<Type, Value>(oldVal.getType())
302 .Case([&](BFloat16Type) {
303 return LLVM::BitcastOp::create(rewriter, loc, rewriter.getI16Type(),
304 oldVal);
305 })
306 .Case([&](IntegerType intTy) -> Value {
307 if (intTy.getWidth() == 1)
308 return LLVM::ZExtOp::create(rewriter, loc, rewriter.getI8Type(),
309 oldVal);
310 return oldVal;
311 })
312 .Default(oldVal);
313 }
314
315 static Value bitcastOrTruncAfterShuffle(Value oldVal, Type newTy,
316 Location loc,
317 ConversionPatternRewriter &rewriter) {
318 return TypeSwitch<Type, Value>(newTy)
319 .Case([&](BFloat16Type) {
320 return LLVM::BitcastOp::create(rewriter, loc, newTy, oldVal);
321 })
322 .Case([&](IntegerType intTy) -> Value {
323 if (intTy.getWidth() == 1)
324 return LLVM::TruncOp::create(rewriter, loc, newTy, oldVal);
325 return oldVal;
326 })
327 .Default(oldVal);
328 }
329
330 LogicalResult
331 matchAndRewrite(gpu::ShuffleOp op, OpAdaptor adaptor,
332 ConversionPatternRewriter &rewriter) const final {
333 if (!hasValidWidth(op))
334 return rewriter.notifyMatchFailure(
335 op, "shuffle width and subgroup size mismatch");
336
337 Location loc = op->getLoc();
338 Value inValue =
339 bitcastOrExtBeforeShuffle(adaptor.getValue(), loc, rewriter);
340 std::optional<std::string> funcName =
341 getFuncName(op.getMode(), inValue.getType());
342 if (!funcName)
343 return rewriter.notifyMatchFailure(op, "unsupported value type");
344
345 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
346 assert(moduleOp && "Expecting module");
347 Type valueType = inValue.getType();
348 Type offsetType = adaptor.getOffset().getType();
349 Type resultType = valueType;
350 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
351 moduleOp, funcName.value(), {valueType, offsetType}, resultType,
352 /*isMemNone=*/false, /*isConvergent=*/true);
353
354 std::array<Value, 2> args{inValue, adaptor.getOffset()};
355 Value result =
356 createSPIRVBuiltinCall(loc, rewriter, func, args).getResult();
357 Value resultOrConversion =
358 bitcastOrTruncAfterShuffle(result, op.getType(0), loc, rewriter);
359
360 Value trueVal =
361 LLVM::ConstantOp::create(rewriter, loc, rewriter.getI1Type(), true);
362 rewriter.replaceOp(op, {resultOrConversion, trueVal});
363 return success();
364 }
365};
366
367class MemorySpaceToOpenCLMemorySpaceConverter final : public TypeConverter {
368public:
369 MemorySpaceToOpenCLMemorySpaceConverter(MLIRContext *ctx) {
370 addConversion([](Type t) { return t; });
371 addConversion([ctx](BaseMemRefType memRefType) -> std::optional<Type> {
372 // Attach global addr space attribute to memrefs with no addr space attr
373 Attribute memSpaceAttr = memRefType.getMemorySpace();
374 if (memSpaceAttr)
375 return std::nullopt;
376
377 unsigned globalAddrspace = storageClassToAddressSpace(
378 spirv::ClientAPI::OpenCL, spirv::StorageClass::CrossWorkgroup);
379 Attribute addrSpaceAttr =
380 IntegerAttr::get(IntegerType::get(ctx, 64), globalAddrspace);
381 if (auto rankedType = dyn_cast<MemRefType>(memRefType)) {
382 return MemRefType::get(memRefType.getShape(),
383 memRefType.getElementType(),
384 rankedType.getLayout(), addrSpaceAttr);
385 }
386 return UnrankedMemRefType::get(memRefType.getElementType(),
387 addrSpaceAttr);
388 });
389 addConversion([this](FunctionType type) {
390 auto inputs = llvm::map_to_vector(
391 type.getInputs(), [this](Type ty) { return convertType(ty); });
392 auto results = llvm::map_to_vector(
393 type.getResults(), [this](Type ty) { return convertType(ty); });
394 return FunctionType::get(type.getContext(), inputs, results);
395 });
396 }
397};
398
399//===----------------------------------------------------------------------===//
400// Subgroup query ops.
401//===----------------------------------------------------------------------===//
402
403template <typename SubgroupOp>
404struct GPUSubgroupOpConversion final : ConvertOpToLLVMPattern<SubgroupOp> {
405 using ConvertOpToLLVMPattern<SubgroupOp>::ConvertOpToLLVMPattern;
407
408 LogicalResult
409 matchAndRewrite(SubgroupOp op, typename SubgroupOp::Adaptor adaptor,
410 ConversionPatternRewriter &rewriter) const final {
411 constexpr StringRef funcName = [] {
412 if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupIdOp>) {
413 return "_Z16get_sub_group_id";
414 } else if constexpr (std::is_same_v<SubgroupOp, gpu::LaneIdOp>) {
415 return "_Z22get_sub_group_local_id";
416 } else if constexpr (std::is_same_v<SubgroupOp, gpu::NumSubgroupsOp>) {
417 return "_Z18get_num_sub_groups";
418 } else if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupSizeOp>) {
419 return "_Z18get_sub_group_size";
420 }
421 }();
422
423 Operation *moduleOp =
424 op->template getParentWithTrait<OpTrait::SymbolTable>();
425 Type resultTy = rewriter.getI32Type();
426 LLVM::LLVMFuncOp func =
427 lookupOrCreateSPIRVFn(moduleOp, funcName, {}, resultTy,
428 /*isMemNone=*/false, /*isConvergent=*/false);
429
430 Location loc = op->getLoc();
431 Value result = createSPIRVBuiltinCall(loc, rewriter, func, {}).getResult();
432
433 Type indexTy = getTypeConverter()->getIndexType();
434 if (resultTy != indexTy) {
435 if (indexTy.getIntOrFloatBitWidth() < resultTy.getIntOrFloatBitWidth()) {
436 return failure();
437 }
438 result = LLVM::ZExtOp::create(rewriter, loc, indexTy, result);
439 }
440
441 rewriter.replaceOp(op, result);
442 return success();
443 }
444};
445
446//===----------------------------------------------------------------------===//
447// GPU To LLVM-SPV Pass.
448//===----------------------------------------------------------------------===//
449
450struct GPUToLLVMSPVConversionPass final
451 : impl::ConvertGpuOpsToLLVMSPVOpsBase<GPUToLLVMSPVConversionPass> {
452 using Base::Base;
453
454 void runOnOperation() final {
455 MLIRContext *context = &getContext();
456 RewritePatternSet patterns(context);
457
458 LowerToLLVMOptions options(context);
459 options.overrideIndexBitwidth(this->use64bitIndex ? 64 : 32);
460 LLVMTypeConverter converter(context, options);
461 LLVMConversionTarget target(*context);
462
463 // Force OpenCL address spaces when they are not present
464 {
465 MemorySpaceToOpenCLMemorySpaceConverter converter(context);
466 AttrTypeReplacer replacer;
467 replacer.addReplacement([&converter](BaseMemRefType origType)
468 -> std::optional<BaseMemRefType> {
469 return converter.convertType<BaseMemRefType>(origType);
470 });
471
472 replacer.recursivelyReplaceElementsIn(getOperation(),
473 /*replaceAttrs=*/true,
474 /*replaceLocs=*/false,
475 /*replaceTypes=*/true);
476 }
477
478 target.addIllegalOp<gpu::BarrierOp, gpu::BlockDimOp, gpu::BlockIdOp,
479 gpu::GPUFuncOp, gpu::GlobalIdOp, gpu::GridDimOp,
480 gpu::LaneIdOp, gpu::NumSubgroupsOp, gpu::ReturnOp,
481 gpu::ShuffleOp, gpu::SubgroupIdOp, gpu::SubgroupSizeOp,
482 gpu::ThreadIdOp, gpu::PrintfOp>();
483
486 patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/2,
487 LLVM::cconv::CConv::SPIR_FUNC,
488 "_Z6printfPU3AS2Kcz");
489
490 if (failed(applyPartialConversion(getOperation(), target,
491 std::move(patterns))))
492 signalPassFailure();
493 }
494};
495} // namespace
496
497//===----------------------------------------------------------------------===//
498// GPU To LLVM-SPV Patterns.
499//===----------------------------------------------------------------------===//
500
501namespace mlir {
502namespace {
503static unsigned
504gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace addressSpace) {
505 constexpr spirv::ClientAPI clientAPI = spirv::ClientAPI::OpenCL;
506 return storageClassToAddressSpace(clientAPI,
507 addressSpaceToStorageClass(addressSpace));
508}
509} // namespace
510
512 const LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
513 patterns.add<GPUBarrierConversion, GPUReturnOpLowering, GPUShuffleConversion,
514 GPUSubgroupOpConversion<gpu::LaneIdOp>,
515 GPUSubgroupOpConversion<gpu::NumSubgroupsOp>,
516 GPUSubgroupOpConversion<gpu::SubgroupIdOp>,
517 GPUSubgroupOpConversion<gpu::SubgroupSizeOp>,
518 LaunchConfigOpConversion<gpu::BlockDimOp>,
519 LaunchConfigOpConversion<gpu::BlockIdOp>,
520 LaunchConfigOpConversion<gpu::GlobalIdOp>,
521 LaunchConfigOpConversion<gpu::GridDimOp>,
522 LaunchConfigOpConversion<gpu::ThreadIdOp>>(typeConverter);
523 MLIRContext *context = &typeConverter.getContext();
524 unsigned privateAddressSpace =
525 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Private);
526 unsigned localAddressSpace =
527 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Workgroup);
528 OperationName llvmFuncOpName(LLVM::LLVMFuncOp::getOperationName(), context);
529 StringAttr kernelBlockSizeAttributeName =
530 LLVM::LLVMFuncOp::getReqdWorkGroupSizeAttrName(llvmFuncOpName);
532 typeConverter,
534 privateAddressSpace, localAddressSpace,
535 /*kernelAttributeName=*/{}, kernelBlockSizeAttributeName,
536 LLVM::CConv::SPIR_KERNEL, LLVM::CConv::SPIR_FUNC,
537 /*encodeWorkgroupAttributionsAsArguments=*/true});
538}
539
542 gpuAddressSpaceToOCLAddressSpace);
543}
544} // namespace mlir
return success()
static LLVM::CallOp createSPIRVBuiltinCall(Location loc, ConversionPatternRewriter &rewriter, LLVM::LLVMFuncOp func, ValueRange args)
static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable, StringRef name, ArrayRef< Type > paramTypes, Type resultType, bool isMemNone, bool isConvergent)
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
b getContext())
static llvm::ManagedStatic< PassManagerOptions > options
ArrayRef< int64_t > getShape() const
Returns the shape of this memref type.
Attribute getMemorySpace() const
Returns the memory space in which data referred to by this memref resides.
Type getElementType() const
Returns the element type of this memref type.
Utility class for operation conversions targeting the LLVM dialect that match exactly one source oper...
Definition Pattern.h:216
ConvertOpToLLVMPattern(const LLVMTypeConverter &typeConverter, PatternBenefit benefit=1)
Definition Pattern.h:222
Base class for operation conversions targeting the LLVM IR dialect.
Definition Pattern.h:95
const LLVMTypeConverter * getTypeConverter() const
Definition Pattern.cpp:27
Conversion from types to the LLVM IR dialect.
MLIRContext & getContext() const
Returns the MLIR context.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
This class helps build Operations.
Definition Builders.h:207
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition Operation.h:686
Operation * getParentWithTrait()
Returns the closest surrounding parent operation with trait Trait.
Definition Operation.h:248
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:223
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition Operation.h:238
static Operation * lookupSymbolIn(Operation *op, StringAttr symbol)
Returns the operation registered with the given symbol name with the regions of 'symbolTableOp'.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
Definition Types.cpp:122
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
Type getType() const
Return the type of this value.
Definition Value.h:105
void recursivelyReplaceElementsIn(Operation *op, bool replaceAttrs=true, bool replaceLocs=false, bool replaceTypes=false)
Replace the elements within the given operation, and all nested operations.
void addReplacement(ReplaceFn< Attribute > fn)
AttrTypeReplacerBase.
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:573
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition Matchers.h:527
unsigned storageClassToAddressSpace(spirv::ClientAPI clientAPI, spirv::StorageClass storageClass)
void populateGpuToLLVMSPVConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns)
spirv::StorageClass addressSpaceToStorageClass(gpu::AddressSpace addressSpace)
const FrozenRewritePatternSet & patterns
void populateGpuMemorySpaceAttributeConversions(TypeConverter &typeConverter, const MemorySpaceMapping &mapping)
Populates memory space attribute conversion rules for lowering gpu.address_space to integer values.
llvm::TypeSwitch< T, ResultT > TypeSwitch
Definition LLVM.h:144