MLIR 22.0.0git
GPUToLLVMSPV.cpp
Go to the documentation of this file.
1//===- GPUToLLVMSPV.cpp - Convert GPU operations to LLVM dialect ----------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
25#include "mlir/IR/Matchers.h"
27#include "mlir/IR/SymbolTable.h"
28#include "mlir/Pass/Pass.h"
29#include "mlir/Support/LLVM.h"
31
32#include "llvm/ADT/TypeSwitch.h"
33#include "llvm/Support/FormatVariadic.h"
34
35#define DEBUG_TYPE "gpu-to-llvm-spv"
36
37using namespace mlir;
38
39namespace mlir {
40#define GEN_PASS_DEF_CONVERTGPUOPSTOLLVMSPVOPS
41#include "mlir/Conversion/Passes.h.inc"
42} // namespace mlir
43
44//===----------------------------------------------------------------------===//
45// Helper Functions
46//===----------------------------------------------------------------------===//
47
48static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
49 StringRef name,
50 ArrayRef<Type> paramTypes,
51 Type resultType, bool isMemNone,
52 bool isConvergent) {
53 auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
54 SymbolTable::lookupSymbolIn(symbolTable, name));
55 if (!func) {
56 OpBuilder b(symbolTable->getRegion(0));
57 func = LLVM::LLVMFuncOp::create(
58 b, symbolTable->getLoc(), name,
59 LLVM::LLVMFunctionType::get(resultType, paramTypes));
60 func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
61 func.setNoUnwind(true);
62 func.setWillReturn(true);
63
64 if (isMemNone) {
65 // no externally observable effects
66 constexpr auto noModRef = mlir::LLVM::ModRefInfo::NoModRef;
67 auto memAttr = b.getAttr<LLVM::MemoryEffectsAttr>(
68 /*other=*/noModRef,
69 /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef);
70 func.setMemoryEffectsAttr(memAttr);
71 }
72
73 func.setConvergent(isConvergent);
74 }
75 return func;
76}
77
78static LLVM::CallOp createSPIRVBuiltinCall(Location loc,
79 ConversionPatternRewriter &rewriter,
80 LLVM::LLVMFuncOp func,
81 ValueRange args) {
82 auto call = LLVM::CallOp::create(rewriter, loc, func, args);
83 call.setCConv(func.getCConv());
84 call.setConvergentAttr(func.getConvergentAttr());
85 call.setNoUnwindAttr(func.getNoUnwindAttr());
86 call.setWillReturnAttr(func.getWillReturnAttr());
87 call.setMemoryEffectsAttr(func.getMemoryEffectsAttr());
88 return call;
89}
90
91namespace {
92//===----------------------------------------------------------------------===//
93// Barriers
94//===----------------------------------------------------------------------===//
95
96/// Replace `gpu.barrier` with an `llvm.call` to `barrier` with
97/// `CLK_LOCAL_MEM_FENCE` argument, indicating work-group memory scope:
98/// ```
99/// // gpu.barrier
100/// %c1 = llvm.mlir.constant(1: i32) : i32
101/// llvm.call spir_funccc @_Z7barrierj(%c1) : (i32) -> ()
102/// ```
103struct GPUBarrierConversion final : ConvertOpToLLVMPattern<gpu::BarrierOp> {
105
106 LogicalResult
107 matchAndRewrite(gpu::BarrierOp op, OpAdaptor adaptor,
108 ConversionPatternRewriter &rewriter) const final {
109 constexpr StringLiteral funcName = "_Z7barrierj";
110
111 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
112 assert(moduleOp && "Expecting module");
113 Type flagTy = rewriter.getI32Type();
114 Type voidTy = rewriter.getType<LLVM::LLVMVoidType>();
115 LLVM::LLVMFuncOp func =
116 lookupOrCreateSPIRVFn(moduleOp, funcName, flagTy, voidTy,
117 /*isMemNone=*/false, /*isConvergent=*/true);
118
119 // Value used by SPIR-V backend to represent `CLK_LOCAL_MEM_FENCE`.
120 // See `llvm/lib/Target/SPIRV/SPIRVBuiltins.td`.
121 constexpr int64_t localMemFenceFlag = 1;
122 Location loc = op->getLoc();
123 Value flag =
124 LLVM::ConstantOp::create(rewriter, loc, flagTy, localMemFenceFlag);
125 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, flag));
126 return success();
127 }
128};
129
130//===----------------------------------------------------------------------===//
131// SPIR-V Builtins
132//===----------------------------------------------------------------------===//
133
134/// Replace `gpu.*` with an `llvm.call` to the corresponding SPIR-V builtin with
135/// a constant argument for the `dimension` attribute. Return type will depend
136/// on index width option:
137/// ```
138/// // %thread_id_y = gpu.thread_id y
139/// %c1 = llvm.mlir.constant(1: i32) : i32
140/// %0 = llvm.call spir_funccc @_Z12get_local_idj(%c1) : (i32) -> i64
141/// ```
142struct LaunchConfigConversion : ConvertToLLVMPattern {
143 LaunchConfigConversion(StringRef funcName, StringRef rootOpName,
144 MLIRContext *context,
145 const LLVMTypeConverter &typeConverter,
146 PatternBenefit benefit)
147 : ConvertToLLVMPattern(rootOpName, context, typeConverter, benefit),
148 funcName(funcName) {}
149
150 virtual gpu::Dimension getDimension(Operation *op) const = 0;
151
152 LogicalResult
153 matchAndRewrite(Operation *op, ArrayRef<Value> operands,
154 ConversionPatternRewriter &rewriter) const final {
155 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
156 assert(moduleOp && "Expecting module");
157 Type dimTy = rewriter.getI32Type();
158 Type indexTy = getTypeConverter()->getIndexType();
159 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy,
160 indexTy, /*isMemNone=*/true,
161 /*isConvergent=*/false);
162
163 Location loc = op->getLoc();
164 gpu::Dimension dim = getDimension(op);
165 Value dimVal = LLVM::ConstantOp::create(rewriter, loc, dimTy,
166 static_cast<int64_t>(dim));
167 rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, dimVal));
168 return success();
169 }
170
171 StringRef funcName;
172};
173
174template <typename SourceOp>
175struct LaunchConfigOpConversion final : LaunchConfigConversion {
176 static StringRef getFuncName();
177
178 explicit LaunchConfigOpConversion(const LLVMTypeConverter &typeConverter,
179 PatternBenefit benefit = 1)
180 : LaunchConfigConversion(getFuncName(), SourceOp::getOperationName(),
181 &typeConverter.getContext(), typeConverter,
182 benefit) {}
183
184 gpu::Dimension getDimension(Operation *op) const final {
185 return cast<SourceOp>(op).getDimension();
186 }
187};
188
189template <>
190StringRef LaunchConfigOpConversion<gpu::BlockIdOp>::getFuncName() {
191 return "_Z12get_group_idj";
192}
193
194template <>
195StringRef LaunchConfigOpConversion<gpu::GridDimOp>::getFuncName() {
196 return "_Z14get_num_groupsj";
197}
198
199template <>
200StringRef LaunchConfigOpConversion<gpu::BlockDimOp>::getFuncName() {
201 return "_Z14get_local_sizej";
202}
203
204template <>
205StringRef LaunchConfigOpConversion<gpu::ThreadIdOp>::getFuncName() {
206 return "_Z12get_local_idj";
207}
208
209template <>
210StringRef LaunchConfigOpConversion<gpu::GlobalIdOp>::getFuncName() {
211 return "_Z13get_global_idj";
212}
213
214//===----------------------------------------------------------------------===//
215// Shuffles
216//===----------------------------------------------------------------------===//
217
218/// Replace `gpu.shuffle` with an `llvm.call` to the corresponding SPIR-V
219/// builtin for `shuffleResult`, keeping `value` and `offset` arguments, and a
220/// `true` constant for the `valid` result type. Conversion will only take place
221/// if `width` is constant and equal to the `subgroup` pass option:
222/// ```
223/// // %0 = gpu.shuffle idx %value, %offset, %width : f64
224/// %0 = llvm.call spir_funccc @_Z17sub_group_shuffledj(%value, %offset)
225/// : (f64, i32) -> f64
226/// ```
227struct GPUShuffleConversion final : ConvertOpToLLVMPattern<gpu::ShuffleOp> {
229
230 static StringRef getBaseName(gpu::ShuffleMode mode) {
231 switch (mode) {
232 case gpu::ShuffleMode::IDX:
233 return "sub_group_shuffle";
234 case gpu::ShuffleMode::XOR:
235 return "sub_group_shuffle_xor";
236 case gpu::ShuffleMode::UP:
237 return "sub_group_shuffle_up";
238 case gpu::ShuffleMode::DOWN:
239 return "sub_group_shuffle_down";
240 }
241 llvm_unreachable("Unhandled shuffle mode");
242 }
243
244 static std::optional<StringRef> getTypeMangling(Type type) {
246 .Case<Float16Type>([](auto) { return "Dhj"; })
247 .Case<Float32Type>([](auto) { return "fj"; })
248 .Case<Float64Type>([](auto) { return "dj"; })
249 .Case<IntegerType>([](auto intTy) -> std::optional<StringRef> {
250 switch (intTy.getWidth()) {
251 case 8:
252 return "cj";
253 case 16:
254 return "sj";
255 case 32:
256 return "ij";
257 case 64:
258 return "lj";
259 }
260 return std::nullopt;
261 })
262 .Default(std::nullopt);
263 }
264
265 static std::optional<std::string> getFuncName(gpu::ShuffleMode mode,
266 Type type) {
267 StringRef baseName = getBaseName(mode);
268 std::optional<StringRef> typeMangling = getTypeMangling(type);
269 if (!typeMangling)
270 return std::nullopt;
271 return llvm::formatv("_Z{}{}{}", baseName.size(), baseName,
272 typeMangling.value());
273 }
274
275 /// Get the subgroup size from the target or return a default.
276 static std::optional<int> getSubgroupSize(Operation *op) {
277 auto parentFunc = op->getParentOfType<LLVM::LLVMFuncOp>();
278 if (!parentFunc)
279 return std::nullopt;
280 return parentFunc.getIntelReqdSubGroupSize();
281 }
282
283 static bool hasValidWidth(gpu::ShuffleOp op) {
284 llvm::APInt val;
285 Value width = op.getWidth();
286 return matchPattern(width, m_ConstantInt(&val)) &&
287 val == getSubgroupSize(op);
288 }
289
290 static Value bitcastOrExtBeforeShuffle(Value oldVal, Location loc,
291 ConversionPatternRewriter &rewriter) {
292 return TypeSwitch<Type, Value>(oldVal.getType())
293 .Case([&](BFloat16Type) {
294 return LLVM::BitcastOp::create(rewriter, loc, rewriter.getI16Type(),
295 oldVal);
296 })
297 .Case([&](IntegerType intTy) -> Value {
298 if (intTy.getWidth() == 1)
299 return LLVM::ZExtOp::create(rewriter, loc, rewriter.getI8Type(),
300 oldVal);
301 return oldVal;
302 })
303 .Default(oldVal);
304 }
305
306 static Value bitcastOrTruncAfterShuffle(Value oldVal, Type newTy,
307 Location loc,
308 ConversionPatternRewriter &rewriter) {
309 return TypeSwitch<Type, Value>(newTy)
310 .Case([&](BFloat16Type) {
311 return LLVM::BitcastOp::create(rewriter, loc, newTy, oldVal);
312 })
313 .Case([&](IntegerType intTy) -> Value {
314 if (intTy.getWidth() == 1)
315 return LLVM::TruncOp::create(rewriter, loc, newTy, oldVal);
316 return oldVal;
317 })
318 .Default(oldVal);
319 }
320
321 LogicalResult
322 matchAndRewrite(gpu::ShuffleOp op, OpAdaptor adaptor,
323 ConversionPatternRewriter &rewriter) const final {
324 if (!hasValidWidth(op))
325 return rewriter.notifyMatchFailure(
326 op, "shuffle width and subgroup size mismatch");
327
328 Location loc = op->getLoc();
329 Value inValue =
330 bitcastOrExtBeforeShuffle(adaptor.getValue(), loc, rewriter);
331 std::optional<std::string> funcName =
332 getFuncName(op.getMode(), inValue.getType());
333 if (!funcName)
334 return rewriter.notifyMatchFailure(op, "unsupported value type");
335
336 Operation *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
337 assert(moduleOp && "Expecting module");
338 Type valueType = inValue.getType();
339 Type offsetType = adaptor.getOffset().getType();
340 Type resultType = valueType;
341 LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
342 moduleOp, funcName.value(), {valueType, offsetType}, resultType,
343 /*isMemNone=*/false, /*isConvergent=*/true);
344
345 std::array<Value, 2> args{inValue, adaptor.getOffset()};
346 Value result =
347 createSPIRVBuiltinCall(loc, rewriter, func, args).getResult();
348 Value resultOrConversion =
349 bitcastOrTruncAfterShuffle(result, op.getType(0), loc, rewriter);
350
351 Value trueVal =
352 LLVM::ConstantOp::create(rewriter, loc, rewriter.getI1Type(), true);
353 rewriter.replaceOp(op, {resultOrConversion, trueVal});
354 return success();
355 }
356};
357
358class MemorySpaceToOpenCLMemorySpaceConverter final : public TypeConverter {
359public:
360 MemorySpaceToOpenCLMemorySpaceConverter(MLIRContext *ctx) {
361 addConversion([](Type t) { return t; });
362 addConversion([ctx](BaseMemRefType memRefType) -> std::optional<Type> {
363 // Attach global addr space attribute to memrefs with no addr space attr
364 Attribute memSpaceAttr = memRefType.getMemorySpace();
365 if (memSpaceAttr)
366 return std::nullopt;
367
368 unsigned globalAddrspace = storageClassToAddressSpace(
369 spirv::ClientAPI::OpenCL, spirv::StorageClass::CrossWorkgroup);
370 Attribute addrSpaceAttr =
371 IntegerAttr::get(IntegerType::get(ctx, 64), globalAddrspace);
372 if (auto rankedType = dyn_cast<MemRefType>(memRefType)) {
373 return MemRefType::get(memRefType.getShape(),
374 memRefType.getElementType(),
375 rankedType.getLayout(), addrSpaceAttr);
376 }
377 return UnrankedMemRefType::get(memRefType.getElementType(),
378 addrSpaceAttr);
379 });
380 addConversion([this](FunctionType type) {
381 auto inputs = llvm::map_to_vector(
382 type.getInputs(), [this](Type ty) { return convertType(ty); });
383 auto results = llvm::map_to_vector(
384 type.getResults(), [this](Type ty) { return convertType(ty); });
385 return FunctionType::get(type.getContext(), inputs, results);
386 });
387 }
388};
389
390//===----------------------------------------------------------------------===//
391// Subgroup query ops.
392//===----------------------------------------------------------------------===//
393
394template <typename SubgroupOp>
395struct GPUSubgroupOpConversion final : ConvertOpToLLVMPattern<SubgroupOp> {
396 using ConvertOpToLLVMPattern<SubgroupOp>::ConvertOpToLLVMPattern;
398
399 LogicalResult
400 matchAndRewrite(SubgroupOp op, typename SubgroupOp::Adaptor adaptor,
401 ConversionPatternRewriter &rewriter) const final {
402 constexpr StringRef funcName = [] {
403 if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupIdOp>) {
404 return "_Z16get_sub_group_id";
405 } else if constexpr (std::is_same_v<SubgroupOp, gpu::LaneIdOp>) {
406 return "_Z22get_sub_group_local_id";
407 } else if constexpr (std::is_same_v<SubgroupOp, gpu::NumSubgroupsOp>) {
408 return "_Z18get_num_sub_groups";
409 } else if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupSizeOp>) {
410 return "_Z18get_sub_group_size";
411 }
412 }();
413
414 Operation *moduleOp =
415 op->template getParentWithTrait<OpTrait::SymbolTable>();
416 Type resultTy = rewriter.getI32Type();
417 LLVM::LLVMFuncOp func =
418 lookupOrCreateSPIRVFn(moduleOp, funcName, {}, resultTy,
419 /*isMemNone=*/false, /*isConvergent=*/false);
420
421 Location loc = op->getLoc();
422 Value result = createSPIRVBuiltinCall(loc, rewriter, func, {}).getResult();
423
424 Type indexTy = getTypeConverter()->getIndexType();
425 if (resultTy != indexTy) {
426 if (indexTy.getIntOrFloatBitWidth() < resultTy.getIntOrFloatBitWidth()) {
427 return failure();
428 }
429 result = LLVM::ZExtOp::create(rewriter, loc, indexTy, result);
430 }
431
432 rewriter.replaceOp(op, result);
433 return success();
434 }
435};
436
437//===----------------------------------------------------------------------===//
438// GPU To LLVM-SPV Pass.
439//===----------------------------------------------------------------------===//
440
441struct GPUToLLVMSPVConversionPass final
442 : impl::ConvertGpuOpsToLLVMSPVOpsBase<GPUToLLVMSPVConversionPass> {
443 using Base::Base;
444
445 void runOnOperation() final {
446 MLIRContext *context = &getContext();
447 RewritePatternSet patterns(context);
448
449 LowerToLLVMOptions options(context);
450 options.overrideIndexBitwidth(this->use64bitIndex ? 64 : 32);
451 LLVMTypeConverter converter(context, options);
452 LLVMConversionTarget target(*context);
453
454 // Force OpenCL address spaces when they are not present
455 {
456 MemorySpaceToOpenCLMemorySpaceConverter converter(context);
457 AttrTypeReplacer replacer;
458 replacer.addReplacement([&converter](BaseMemRefType origType)
459 -> std::optional<BaseMemRefType> {
460 return converter.convertType<BaseMemRefType>(origType);
461 });
462
463 replacer.recursivelyReplaceElementsIn(getOperation(),
464 /*replaceAttrs=*/true,
465 /*replaceLocs=*/false,
466 /*replaceTypes=*/true);
467 }
468
469 target.addIllegalOp<gpu::BarrierOp, gpu::BlockDimOp, gpu::BlockIdOp,
470 gpu::GPUFuncOp, gpu::GlobalIdOp, gpu::GridDimOp,
471 gpu::LaneIdOp, gpu::NumSubgroupsOp, gpu::ReturnOp,
472 gpu::ShuffleOp, gpu::SubgroupIdOp, gpu::SubgroupSizeOp,
473 gpu::ThreadIdOp, gpu::PrintfOp>();
474
477 patterns.add<GPUPrintfOpToLLVMCallLowering>(converter, /*addressSpace=*/2,
478 LLVM::cconv::CConv::SPIR_FUNC,
479 "_Z6printfPU3AS2Kcz");
480
481 if (failed(applyPartialConversion(getOperation(), target,
482 std::move(patterns))))
483 signalPassFailure();
484 }
485};
486} // namespace
487
488//===----------------------------------------------------------------------===//
489// GPU To LLVM-SPV Patterns.
490//===----------------------------------------------------------------------===//
491
492namespace mlir {
493namespace {
494static unsigned
495gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace addressSpace) {
496 constexpr spirv::ClientAPI clientAPI = spirv::ClientAPI::OpenCL;
497 return storageClassToAddressSpace(clientAPI,
498 addressSpaceToStorageClass(addressSpace));
499}
500} // namespace
501
503 const LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
504 patterns.add<GPUBarrierConversion, GPUReturnOpLowering, GPUShuffleConversion,
505 GPUSubgroupOpConversion<gpu::LaneIdOp>,
506 GPUSubgroupOpConversion<gpu::NumSubgroupsOp>,
507 GPUSubgroupOpConversion<gpu::SubgroupIdOp>,
508 GPUSubgroupOpConversion<gpu::SubgroupSizeOp>,
509 LaunchConfigOpConversion<gpu::BlockDimOp>,
510 LaunchConfigOpConversion<gpu::BlockIdOp>,
511 LaunchConfigOpConversion<gpu::GlobalIdOp>,
512 LaunchConfigOpConversion<gpu::GridDimOp>,
513 LaunchConfigOpConversion<gpu::ThreadIdOp>>(typeConverter);
514 MLIRContext *context = &typeConverter.getContext();
515 unsigned privateAddressSpace =
516 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Private);
517 unsigned localAddressSpace =
518 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Workgroup);
519 OperationName llvmFuncOpName(LLVM::LLVMFuncOp::getOperationName(), context);
520 StringAttr kernelBlockSizeAttributeName =
521 LLVM::LLVMFuncOp::getReqdWorkGroupSizeAttrName(llvmFuncOpName);
523 typeConverter,
525 privateAddressSpace, localAddressSpace,
526 /*kernelAttributeName=*/{}, kernelBlockSizeAttributeName,
527 LLVM::CConv::SPIR_KERNEL, LLVM::CConv::SPIR_FUNC,
528 /*encodeWorkgroupAttributionsAsArguments=*/true});
529}
530
533 gpuAddressSpaceToOCLAddressSpace);
534}
535} // namespace mlir
return success()
static LLVM::CallOp createSPIRVBuiltinCall(Location loc, ConversionPatternRewriter &rewriter, LLVM::LLVMFuncOp func, ValueRange args)
static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable, StringRef name, ArrayRef< Type > paramTypes, Type resultType, bool isMemNone, bool isConvergent)
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
b getContext())
static llvm::ManagedStatic< PassManagerOptions > options
ArrayRef< int64_t > getShape() const
Returns the shape of this memref type.
Attribute getMemorySpace() const
Returns the memory space in which data referred to by this memref resides.
Type getElementType() const
Returns the element type of this memref type.
Utility class for operation conversions targeting the LLVM dialect that match exactly one source oper...
Definition Pattern.h:209
ConvertOpToLLVMPattern(const LLVMTypeConverter &typeConverter, PatternBenefit benefit=1)
Definition Pattern.h:215
Base class for operation conversions targeting the LLVM IR dialect.
Definition Pattern.h:88
const LLVMTypeConverter * getTypeConverter() const
Definition Pattern.cpp:27
Conversion from types to the LLVM IR dialect.
MLIRContext & getContext() const
Returns the MLIR context.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
This class helps build Operations.
Definition Builders.h:207
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition Operation.h:686
Operation * getParentWithTrait()
Returns the closest surrounding parent operation with trait Trait.
Definition Operation.h:248
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:223
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition Operation.h:238
static Operation * lookupSymbolIn(Operation *op, StringAttr symbol)
Returns the operation registered with the given symbol name with the regions of 'symbolTableOp'.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
Definition Types.cpp:122
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
Type getType() const
Return the type of this value.
Definition Value.h:105
void recursivelyReplaceElementsIn(Operation *op, bool replaceAttrs=true, bool replaceLocs=false, bool replaceTypes=false)
Replace the elements within the given operation, and all nested operations.
void addReplacement(ReplaceFn< Attribute > fn)
AttrTypeReplacerBase.
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:561
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition Matchers.h:527
unsigned storageClassToAddressSpace(spirv::ClientAPI clientAPI, spirv::StorageClass storageClass)
void populateGpuToLLVMSPVConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns)
spirv::StorageClass addressSpaceToStorageClass(gpu::AddressSpace addressSpace)
const FrozenRewritePatternSet & patterns
void populateGpuMemorySpaceAttributeConversions(TypeConverter &typeConverter, const MemorySpaceMapping &mapping)
Populates memory space attribute conversion rules for lowering gpu.address_space to integer values.
llvm::TypeSwitch< T, ResultT > TypeSwitch
Definition LLVM.h:144