MLIR  20.0.0git
GPUToLLVMSPV.cpp
Go to the documentation of this file.
1 //===- GPUToLLVMSPV.cpp - Convert GPU operations to LLVM dialect ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
11 #include "../GPUCommon/GPUOpsLowering.h"
24 #include "mlir/IR/BuiltinTypes.h"
25 #include "mlir/IR/Matchers.h"
26 #include "mlir/IR/PatternMatch.h"
27 #include "mlir/IR/SymbolTable.h"
28 #include "mlir/Pass/Pass.h"
29 #include "mlir/Support/LLVM.h"
31 
32 #include "llvm/ADT/TypeSwitch.h"
33 #include "llvm/Support/FormatVariadic.h"
34 
35 #define DEBUG_TYPE "gpu-to-llvm-spv"
36 
37 using namespace mlir;
38 
39 namespace mlir {
40 #define GEN_PASS_DEF_CONVERTGPUOPSTOLLVMSPVOPS
41 #include "mlir/Conversion/Passes.h.inc"
42 } // namespace mlir
43 
44 //===----------------------------------------------------------------------===//
45 // Helper Functions
46 //===----------------------------------------------------------------------===//
47 
48 static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
49  StringRef name,
50  ArrayRef<Type> paramTypes,
51  Type resultType, bool isMemNone,
52  bool isConvergent) {
53  auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
54  SymbolTable::lookupSymbolIn(symbolTable, name));
55  if (!func) {
56  OpBuilder b(symbolTable->getRegion(0));
57  func = b.create<LLVM::LLVMFuncOp>(
58  symbolTable->getLoc(), name,
59  LLVM::LLVMFunctionType::get(resultType, paramTypes));
60  func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
61  func.setNoUnwind(true);
62  func.setWillReturn(true);
63 
64  if (isMemNone) {
65  // no externally observable effects
66  constexpr auto noModRef = mlir::LLVM::ModRefInfo::NoModRef;
67  auto memAttr = b.getAttr<LLVM::MemoryEffectsAttr>(
68  /*other=*/noModRef,
69  /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef);
70  func.setMemoryEffectsAttr(memAttr);
71  }
72 
73  func.setConvergent(isConvergent);
74  }
75  return func;
76 }
77 
78 static LLVM::CallOp createSPIRVBuiltinCall(Location loc,
79  ConversionPatternRewriter &rewriter,
80  LLVM::LLVMFuncOp func,
81  ValueRange args) {
82  auto call = rewriter.create<LLVM::CallOp>(loc, func, args);
83  call.setCConv(func.getCConv());
84  call.setConvergentAttr(func.getConvergentAttr());
85  call.setNoUnwindAttr(func.getNoUnwindAttr());
86  call.setWillReturnAttr(func.getWillReturnAttr());
87  call.setMemoryEffectsAttr(func.getMemoryEffectsAttr());
88  return call;
89 }
90 
91 namespace {
92 //===----------------------------------------------------------------------===//
93 // Barriers
94 //===----------------------------------------------------------------------===//
95 
96 /// Replace `gpu.barrier` with an `llvm.call` to `barrier` with
97 /// `CLK_LOCAL_MEM_FENCE` argument, indicating work-group memory scope:
98 /// ```
99 /// // gpu.barrier
100 /// %c1 = llvm.mlir.constant(1: i32) : i32
101 /// llvm.call spir_funccc @_Z7barrierj(%c1) : (i32) -> ()
102 /// ```
103 struct GPUBarrierConversion final : ConvertOpToLLVMPattern<gpu::BarrierOp> {
105 
106  LogicalResult
107  matchAndRewrite(gpu::BarrierOp op, OpAdaptor adaptor,
108  ConversionPatternRewriter &rewriter) const final {
109  constexpr StringLiteral funcName = "_Z7barrierj";
110 
112  assert(moduleOp && "Expecting module");
113  Type flagTy = rewriter.getI32Type();
114  Type voidTy = rewriter.getType<LLVM::LLVMVoidType>();
115  LLVM::LLVMFuncOp func =
116  lookupOrCreateSPIRVFn(moduleOp, funcName, flagTy, voidTy,
117  /*isMemNone=*/false, /*isConvergent=*/true);
118 
119  // Value used by SPIR-V backend to represent `CLK_LOCAL_MEM_FENCE`.
120  // See `llvm/lib/Target/SPIRV/SPIRVBuiltins.td`.
121  constexpr int64_t localMemFenceFlag = 1;
122  Location loc = op->getLoc();
123  Value flag =
124  rewriter.create<LLVM::ConstantOp>(loc, flagTy, localMemFenceFlag);
125  rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, flag));
126  return success();
127  }
128 };
129 
130 //===----------------------------------------------------------------------===//
131 // SPIR-V Builtins
132 //===----------------------------------------------------------------------===//
133 
134 /// Replace `gpu.*` with an `llvm.call` to the corresponding SPIR-V builtin with
135 /// a constant argument for the `dimension` attribute. Return type will depend
136 /// on index width option:
137 /// ```
138 /// // %thread_id_y = gpu.thread_id y
139 /// %c1 = llvm.mlir.constant(1: i32) : i32
140 /// %0 = llvm.call spir_funccc @_Z12get_local_idj(%c1) : (i32) -> i64
141 /// ```
142 struct LaunchConfigConversion : ConvertToLLVMPattern {
143  LaunchConfigConversion(StringRef funcName, StringRef rootOpName,
144  MLIRContext *context,
146  PatternBenefit benefit)
147  : ConvertToLLVMPattern(rootOpName, context, typeConverter, benefit),
148  funcName(funcName) {}
149 
150  virtual gpu::Dimension getDimension(Operation *op) const = 0;
151 
152  LogicalResult
153  matchAndRewrite(Operation *op, ArrayRef<Value> operands,
154  ConversionPatternRewriter &rewriter) const final {
156  assert(moduleOp && "Expecting module");
157  Type dimTy = rewriter.getI32Type();
158  Type indexTy = getTypeConverter()->getIndexType();
159  LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy,
160  indexTy, /*isMemNone=*/true,
161  /*isConvergent=*/false);
162 
163  Location loc = op->getLoc();
164  gpu::Dimension dim = getDimension(op);
165  Value dimVal = rewriter.create<LLVM::ConstantOp>(loc, dimTy,
166  static_cast<int64_t>(dim));
167  rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, dimVal));
168  return success();
169  }
170 
171  StringRef funcName;
172 };
173 
174 template <typename SourceOp>
175 struct LaunchConfigOpConversion final : LaunchConfigConversion {
176  static StringRef getFuncName();
177 
178  explicit LaunchConfigOpConversion(const LLVMTypeConverter &typeConverter,
179  PatternBenefit benefit = 1)
180  : LaunchConfigConversion(getFuncName(), SourceOp::getOperationName(),
182  benefit) {}
183 
184  gpu::Dimension getDimension(Operation *op) const final {
185  return cast<SourceOp>(op).getDimension();
186  }
187 };
188 
189 template <>
190 StringRef LaunchConfigOpConversion<gpu::BlockIdOp>::getFuncName() {
191  return "_Z12get_group_idj";
192 }
193 
194 template <>
195 StringRef LaunchConfigOpConversion<gpu::GridDimOp>::getFuncName() {
196  return "_Z14get_num_groupsj";
197 }
198 
199 template <>
200 StringRef LaunchConfigOpConversion<gpu::BlockDimOp>::getFuncName() {
201  return "_Z14get_local_sizej";
202 }
203 
204 template <>
205 StringRef LaunchConfigOpConversion<gpu::ThreadIdOp>::getFuncName() {
206  return "_Z12get_local_idj";
207 }
208 
209 template <>
210 StringRef LaunchConfigOpConversion<gpu::GlobalIdOp>::getFuncName() {
211  return "_Z13get_global_idj";
212 }
213 
214 //===----------------------------------------------------------------------===//
215 // Shuffles
216 //===----------------------------------------------------------------------===//
217 
218 /// Replace `gpu.shuffle` with an `llvm.call` to the corresponding SPIR-V
219 /// builtin for `shuffleResult`, keeping `value` and `offset` arguments, and a
220 /// `true` constant for the `valid` result type. Conversion will only take place
221 /// if `width` is constant and equal to the `subgroup` pass option:
222 /// ```
223 /// // %0 = gpu.shuffle idx %value, %offset, %width : f64
224 /// %0 = llvm.call spir_funccc @_Z17sub_group_shuffledj(%value, %offset)
225 /// : (f64, i32) -> f64
226 /// ```
227 struct GPUShuffleConversion final : ConvertOpToLLVMPattern<gpu::ShuffleOp> {
229 
230  static StringRef getBaseName(gpu::ShuffleMode mode) {
231  switch (mode) {
232  case gpu::ShuffleMode::IDX:
233  return "sub_group_shuffle";
234  case gpu::ShuffleMode::XOR:
235  return "sub_group_shuffle_xor";
236  case gpu::ShuffleMode::UP:
237  return "sub_group_shuffle_up";
238  case gpu::ShuffleMode::DOWN:
239  return "sub_group_shuffle_down";
240  }
241  llvm_unreachable("Unhandled shuffle mode");
242  }
243 
244  static std::optional<StringRef> getTypeMangling(Type type) {
246  .Case<Float16Type>([](auto) { return "Dhj"; })
247  .Case<Float32Type>([](auto) { return "fj"; })
248  .Case<Float64Type>([](auto) { return "dj"; })
249  .Case<IntegerType>([](auto intTy) -> std::optional<StringRef> {
250  switch (intTy.getWidth()) {
251  case 8:
252  return "cj";
253  case 16:
254  return "sj";
255  case 32:
256  return "ij";
257  case 64:
258  return "lj";
259  }
260  return std::nullopt;
261  })
262  .Default([](auto) { return std::nullopt; });
263  }
264 
265  static std::optional<std::string> getFuncName(gpu::ShuffleOp op) {
266  StringRef baseName = getBaseName(op.getMode());
267  std::optional<StringRef> typeMangling = getTypeMangling(op.getType(0));
268  if (!typeMangling)
269  return std::nullopt;
270  return llvm::formatv("_Z{0}{1}{2}", baseName.size(), baseName,
271  typeMangling.value());
272  }
273 
274  /// Get the subgroup size from the target or return a default.
275  static std::optional<int> getSubgroupSize(Operation *op) {
276  auto parentFunc = op->getParentOfType<LLVM::LLVMFuncOp>();
277  if (!parentFunc)
278  return std::nullopt;
279  return parentFunc.getIntelReqdSubGroupSize();
280  }
281 
282  static bool hasValidWidth(gpu::ShuffleOp op) {
283  llvm::APInt val;
284  Value width = op.getWidth();
285  return matchPattern(width, m_ConstantInt(&val)) &&
286  val == getSubgroupSize(op);
287  }
288 
289  LogicalResult
290  matchAndRewrite(gpu::ShuffleOp op, OpAdaptor adaptor,
291  ConversionPatternRewriter &rewriter) const final {
292  if (!hasValidWidth(op))
293  return rewriter.notifyMatchFailure(
294  op, "shuffle width and subgroup size mismatch");
295 
296  std::optional<std::string> funcName = getFuncName(op);
297  if (!funcName)
298  return rewriter.notifyMatchFailure(op, "unsupported value type");
299 
301  assert(moduleOp && "Expecting module");
302  Type valueType = adaptor.getValue().getType();
303  Type offsetType = adaptor.getOffset().getType();
304  Type resultType = valueType;
305  LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
306  moduleOp, funcName.value(), {valueType, offsetType}, resultType,
307  /*isMemNone=*/false, /*isConvergent=*/true);
308 
309  Location loc = op->getLoc();
310  std::array<Value, 2> args{adaptor.getValue(), adaptor.getOffset()};
311  Value result =
312  createSPIRVBuiltinCall(loc, rewriter, func, args).getResult();
313  Value trueVal =
314  rewriter.create<LLVM::ConstantOp>(loc, rewriter.getI1Type(), true);
315  rewriter.replaceOp(op, {result, trueVal});
316  return success();
317  }
318 };
319 
320 class MemorySpaceToOpenCLMemorySpaceConverter final : public TypeConverter {
321 public:
322  MemorySpaceToOpenCLMemorySpaceConverter(MLIRContext *ctx) {
323  addConversion([](Type t) { return t; });
324  addConversion([ctx](BaseMemRefType memRefType) -> std::optional<Type> {
325  // Attach global addr space attribute to memrefs with no addr space attr
326  Attribute memSpaceAttr = memRefType.getMemorySpace();
327  if (memSpaceAttr)
328  return std::nullopt;
329 
330  unsigned globalAddrspace = storageClassToAddressSpace(
331  spirv::ClientAPI::OpenCL, spirv::StorageClass::CrossWorkgroup);
332  Attribute addrSpaceAttr =
333  IntegerAttr::get(IntegerType::get(ctx, 64), globalAddrspace);
334  if (auto rankedType = dyn_cast<MemRefType>(memRefType)) {
335  return MemRefType::get(memRefType.getShape(),
336  memRefType.getElementType(),
337  rankedType.getLayout(), addrSpaceAttr);
338  }
339  return UnrankedMemRefType::get(memRefType.getElementType(),
340  addrSpaceAttr);
341  });
342  addConversion([this](FunctionType type) {
343  auto inputs = llvm::map_to_vector(
344  type.getInputs(), [this](Type ty) { return convertType(ty); });
345  auto results = llvm::map_to_vector(
346  type.getResults(), [this](Type ty) { return convertType(ty); });
347  return FunctionType::get(type.getContext(), inputs, results);
348  });
349  }
350 };
351 
352 //===----------------------------------------------------------------------===//
353 // Subgroup query ops.
354 //===----------------------------------------------------------------------===//
355 
356 template <typename SubgroupOp>
357 struct GPUSubgroupOpConversion final : ConvertOpToLLVMPattern<SubgroupOp> {
360 
361  LogicalResult
362  matchAndRewrite(SubgroupOp op, typename SubgroupOp::Adaptor adaptor,
363  ConversionPatternRewriter &rewriter) const final {
364  constexpr StringRef funcName = [] {
365  if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupIdOp>) {
366  return "_Z16get_sub_group_id";
367  } else if constexpr (std::is_same_v<SubgroupOp, gpu::LaneIdOp>) {
368  return "_Z22get_sub_group_local_id";
369  } else if constexpr (std::is_same_v<SubgroupOp, gpu::NumSubgroupsOp>) {
370  return "_Z18get_num_sub_groups";
371  } else if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupSizeOp>) {
372  return "_Z18get_sub_group_size";
373  }
374  }();
375 
376  Operation *moduleOp =
377  op->template getParentWithTrait<OpTrait::SymbolTable>();
378  Type resultTy = rewriter.getI32Type();
379  LLVM::LLVMFuncOp func =
380  lookupOrCreateSPIRVFn(moduleOp, funcName, {}, resultTy,
381  /*isMemNone=*/false, /*isConvergent=*/false);
382 
383  Location loc = op->getLoc();
384  Value result = createSPIRVBuiltinCall(loc, rewriter, func, {}).getResult();
385 
386  Type indexTy = getTypeConverter()->getIndexType();
387  if (resultTy != indexTy) {
388  if (indexTy.getIntOrFloatBitWidth() < resultTy.getIntOrFloatBitWidth()) {
389  return failure();
390  }
391  result = rewriter.create<LLVM::ZExtOp>(loc, indexTy, result);
392  }
393 
394  rewriter.replaceOp(op, result);
395  return success();
396  }
397 };
398 
399 //===----------------------------------------------------------------------===//
400 // GPU To LLVM-SPV Pass.
401 //===----------------------------------------------------------------------===//
402 
403 struct GPUToLLVMSPVConversionPass final
404  : impl::ConvertGpuOpsToLLVMSPVOpsBase<GPUToLLVMSPVConversionPass> {
405  using Base::Base;
406 
407  void runOnOperation() final {
408  MLIRContext *context = &getContext();
409  RewritePatternSet patterns(context);
410 
411  LowerToLLVMOptions options(context);
412  options.overrideIndexBitwidth(this->use64bitIndex ? 64 : 32);
414  LLVMConversionTarget target(*context);
415 
416  // Force OpenCL address spaces when they are not present
417  {
418  MemorySpaceToOpenCLMemorySpaceConverter converter(context);
419  AttrTypeReplacer replacer;
420  replacer.addReplacement([&converter](BaseMemRefType origType)
421  -> std::optional<BaseMemRefType> {
422  return converter.convertType<BaseMemRefType>(origType);
423  });
424 
425  replacer.recursivelyReplaceElementsIn(getOperation(),
426  /*replaceAttrs=*/true,
427  /*replaceLocs=*/false,
428  /*replaceTypes=*/true);
429  }
430 
431  target.addIllegalOp<gpu::BarrierOp, gpu::BlockDimOp, gpu::BlockIdOp,
432  gpu::GPUFuncOp, gpu::GlobalIdOp, gpu::GridDimOp,
433  gpu::LaneIdOp, gpu::NumSubgroupsOp, gpu::ReturnOp,
434  gpu::ShuffleOp, gpu::SubgroupIdOp, gpu::SubgroupSizeOp,
435  gpu::ThreadIdOp>();
436 
439 
440  if (failed(applyPartialConversion(getOperation(), target,
441  std::move(patterns))))
442  signalPassFailure();
443  }
444 };
445 } // namespace
446 
447 //===----------------------------------------------------------------------===//
448 // GPU To LLVM-SPV Patterns.
449 //===----------------------------------------------------------------------===//
450 
451 namespace mlir {
452 namespace {
453 static unsigned
454 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace addressSpace) {
455  constexpr spirv::ClientAPI clientAPI = spirv::ClientAPI::OpenCL;
456  return storageClassToAddressSpace(clientAPI,
457  addressSpaceToStorageClass(addressSpace));
458 }
459 } // namespace
460 
463  patterns.add<GPUBarrierConversion, GPUReturnOpLowering, GPUShuffleConversion,
464  GPUSubgroupOpConversion<gpu::LaneIdOp>,
465  GPUSubgroupOpConversion<gpu::NumSubgroupsOp>,
466  GPUSubgroupOpConversion<gpu::SubgroupIdOp>,
467  GPUSubgroupOpConversion<gpu::SubgroupSizeOp>,
468  LaunchConfigOpConversion<gpu::BlockDimOp>,
469  LaunchConfigOpConversion<gpu::BlockIdOp>,
470  LaunchConfigOpConversion<gpu::GlobalIdOp>,
471  LaunchConfigOpConversion<gpu::GridDimOp>,
472  LaunchConfigOpConversion<gpu::ThreadIdOp>>(typeConverter);
473  MLIRContext *context = &typeConverter.getContext();
474  unsigned privateAddressSpace =
475  gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Private);
476  unsigned localAddressSpace =
477  gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Workgroup);
478  OperationName llvmFuncOpName(LLVM::LLVMFuncOp::getOperationName(), context);
479  StringAttr kernelBlockSizeAttributeName =
480  LLVM::LLVMFuncOp::getReqdWorkGroupSizeAttrName(llvmFuncOpName);
484  privateAddressSpace, localAddressSpace,
485  /*kernelAttributeName=*/{}, kernelBlockSizeAttributeName,
486  LLVM::CConv::SPIR_KERNEL, LLVM::CConv::SPIR_FUNC,
487  /*encodeWorkgroupAttributionsAsArguments=*/true});
488 }
489 
492  gpuAddressSpaceToOCLAddressSpace);
493 }
494 } // namespace mlir
static LLVM::CallOp createSPIRVBuiltinCall(Location loc, ConversionPatternRewriter &rewriter, LLVM::LLVMFuncOp func, ValueRange args)
static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable, StringRef name, ArrayRef< Type > paramTypes, Type resultType, bool isMemNone, bool isConvergent)
static MLIRContext * getContext(OpFoldResult val)
static llvm::ManagedStatic< PassManagerOptions > options
This is an attribute/type replacer that is naively cached.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:149
ArrayRef< int64_t > getShape() const
Returns the shape of this memref type.
Attribute getMemorySpace() const
Returns the memory space in which data referred to by this memref resides.
Type getElementType() const
Returns the element type of this memref type.
Attr getAttr(Args &&...args)
Get or construct an instance of the attribute Attr with provided arguments.
Definition: Builders.h:107
This class implements a pattern rewriter for use with ConversionPatterns.
Utility class for operation conversions targeting the LLVM dialect that match exactly one source oper...
Definition: Pattern.h:143
ConvertOpToLLVMPattern(const LLVMTypeConverter &typeConverter, PatternBenefit benefit=1)
Definition: Pattern.h:149
Base class for operation conversions targeting the LLVM IR dialect.
Definition: Pattern.h:41
const LLVMTypeConverter * getTypeConverter() const
Definition: Pattern.cpp:27
Derived class that automatically populates legalization information for different LLVM ops.
Conversion from types to the LLVM IR dialect.
Definition: TypeConverter.h:35
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
Options to control the LLVM lowering.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:216
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
A trait used to provide symbol table functionalities to a region operation.
Definition: SymbolTable.h:435
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition: Operation.h:238
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition: Operation.h:687
Operation * getParentWithTrait()
Returns the closest surrounding parent operation with trait Trait.
Definition: Operation.h:248
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
Definition: PatternMatch.h:34
static Operation * lookupSymbolIn(Operation *op, StringAttr symbol)
Returns the operation registered with the given symbol name with the regions of 'symbolTableOp'.
Type conversion class.
LogicalResult convertType(Type t, SmallVectorImpl< Type > &results) const
Convert the given type.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
Definition: Types.cpp:133
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
void recursivelyReplaceElementsIn(Operation *op, bool replaceAttrs=true, bool replaceLocs=false, bool replaceTypes=false)
Replace the elements within the given operation, and all nested operations.
void addReplacement(ReplaceFn< Attribute > fn)
Register a replacement function for mapping a given attribute or type.
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition: Matchers.h:527
unsigned storageClassToAddressSpace(spirv::ClientAPI clientAPI, spirv::StorageClass storageClass)
void populateGpuToLLVMSPVConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns)
TypeConverter & typeConverter
spirv::StorageClass addressSpaceToStorageClass(gpu::AddressSpace addressSpace)
void populateGpuMemorySpaceAttributeConversions(TypeConverter &typeConverter, const MemorySpaceMapping &mapping)
Populates memory space attribute conversion rules for lowering gpu.address_space to integer values.
const FrozenRewritePatternSet & patterns
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
const TypeConverter & converter
LogicalResult applyPartialConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Below we define several entry points for operation conversion.