MLIR  20.0.0git
GPUToLLVMSPV.cpp
Go to the documentation of this file.
1 //===- GPUToLLVMSPV.cpp - Convert GPU operations to LLVM dialect ----------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
11 #include "../GPUCommon/GPUOpsLowering.h"
26 #include "mlir/IR/BuiltinTypes.h"
27 #include "mlir/IR/Matchers.h"
28 #include "mlir/IR/PatternMatch.h"
29 #include "mlir/IR/SymbolTable.h"
30 #include "mlir/Pass/Pass.h"
31 #include "mlir/Support/LLVM.h"
33 
34 #include "llvm/ADT/TypeSwitch.h"
35 #include "llvm/Support/FormatVariadic.h"
36 
37 #define DEBUG_TYPE "gpu-to-llvm-spv"
38 
39 using namespace mlir;
40 
41 namespace mlir {
42 #define GEN_PASS_DEF_CONVERTGPUOPSTOLLVMSPVOPS
43 #include "mlir/Conversion/Passes.h.inc"
44 } // namespace mlir
45 
46 //===----------------------------------------------------------------------===//
47 // Helper Functions
48 //===----------------------------------------------------------------------===//
49 
50 static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable,
51  StringRef name,
52  ArrayRef<Type> paramTypes,
53  Type resultType, bool isMemNone,
54  bool isConvergent) {
55  auto func = dyn_cast_or_null<LLVM::LLVMFuncOp>(
56  SymbolTable::lookupSymbolIn(symbolTable, name));
57  if (!func) {
58  OpBuilder b(symbolTable->getRegion(0));
59  func = b.create<LLVM::LLVMFuncOp>(
60  symbolTable->getLoc(), name,
61  LLVM::LLVMFunctionType::get(resultType, paramTypes));
62  func.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
63  func.setNoUnwind(true);
64  func.setWillReturn(true);
65 
66  if (isMemNone) {
67  // no externally observable effects
68  constexpr auto noModRef = mlir::LLVM::ModRefInfo::NoModRef;
69  auto memAttr = b.getAttr<LLVM::MemoryEffectsAttr>(
70  /*other=*/noModRef,
71  /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef);
72  func.setMemoryEffectsAttr(memAttr);
73  }
74 
75  func.setConvergent(isConvergent);
76  }
77  return func;
78 }
79 
80 static LLVM::CallOp createSPIRVBuiltinCall(Location loc,
81  ConversionPatternRewriter &rewriter,
82  LLVM::LLVMFuncOp func,
83  ValueRange args) {
84  auto call = rewriter.create<LLVM::CallOp>(loc, func, args);
85  call.setCConv(func.getCConv());
86  call.setConvergentAttr(func.getConvergentAttr());
87  call.setNoUnwindAttr(func.getNoUnwindAttr());
88  call.setWillReturnAttr(func.getWillReturnAttr());
89  call.setMemoryEffectsAttr(func.getMemoryEffectsAttr());
90  return call;
91 }
92 
93 namespace {
94 //===----------------------------------------------------------------------===//
95 // Barriers
96 //===----------------------------------------------------------------------===//
97 
98 /// Replace `gpu.barrier` with an `llvm.call` to `barrier` with
99 /// `CLK_LOCAL_MEM_FENCE` argument, indicating work-group memory scope:
100 /// ```
101 /// // gpu.barrier
102 /// %c1 = llvm.mlir.constant(1: i32) : i32
103 /// llvm.call spir_funccc @_Z7barrierj(%c1) : (i32) -> ()
104 /// ```
105 struct GPUBarrierConversion final : ConvertOpToLLVMPattern<gpu::BarrierOp> {
107 
108  LogicalResult
109  matchAndRewrite(gpu::BarrierOp op, OpAdaptor adaptor,
110  ConversionPatternRewriter &rewriter) const final {
111  constexpr StringLiteral funcName = "_Z7barrierj";
112 
114  assert(moduleOp && "Expecting module");
115  Type flagTy = rewriter.getI32Type();
116  Type voidTy = rewriter.getType<LLVM::LLVMVoidType>();
117  LLVM::LLVMFuncOp func =
118  lookupOrCreateSPIRVFn(moduleOp, funcName, flagTy, voidTy,
119  /*isMemNone=*/false, /*isConvergent=*/true);
120 
121  // Value used by SPIR-V backend to represent `CLK_LOCAL_MEM_FENCE`.
122  // See `llvm/lib/Target/SPIRV/SPIRVBuiltins.td`.
123  constexpr int64_t localMemFenceFlag = 1;
124  Location loc = op->getLoc();
125  Value flag =
126  rewriter.create<LLVM::ConstantOp>(loc, flagTy, localMemFenceFlag);
127  rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, flag));
128  return success();
129  }
130 };
131 
132 //===----------------------------------------------------------------------===//
133 // SPIR-V Builtins
134 //===----------------------------------------------------------------------===//
135 
136 /// Replace `gpu.*` with an `llvm.call` to the corresponding SPIR-V builtin with
137 /// a constant argument for the `dimension` attribute. Return type will depend
138 /// on index width option:
139 /// ```
140 /// // %thread_id_y = gpu.thread_id y
141 /// %c1 = llvm.mlir.constant(1: i32) : i32
142 /// %0 = llvm.call spir_funccc @_Z12get_local_idj(%c1) : (i32) -> i64
143 /// ```
144 struct LaunchConfigConversion : ConvertToLLVMPattern {
145  LaunchConfigConversion(StringRef funcName, StringRef rootOpName,
146  MLIRContext *context,
147  const LLVMTypeConverter &typeConverter,
148  PatternBenefit benefit)
149  : ConvertToLLVMPattern(rootOpName, context, typeConverter, benefit),
150  funcName(funcName) {}
151 
152  virtual gpu::Dimension getDimension(Operation *op) const = 0;
153 
154  LogicalResult
155  matchAndRewrite(Operation *op, ArrayRef<Value> operands,
156  ConversionPatternRewriter &rewriter) const final {
158  assert(moduleOp && "Expecting module");
159  Type dimTy = rewriter.getI32Type();
160  Type indexTy = getTypeConverter()->getIndexType();
161  LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(moduleOp, funcName, dimTy,
162  indexTy, /*isMemNone=*/true,
163  /*isConvergent=*/false);
164 
165  Location loc = op->getLoc();
166  gpu::Dimension dim = getDimension(op);
167  Value dimVal = rewriter.create<LLVM::ConstantOp>(loc, dimTy,
168  static_cast<int64_t>(dim));
169  rewriter.replaceOp(op, createSPIRVBuiltinCall(loc, rewriter, func, dimVal));
170  return success();
171  }
172 
173  StringRef funcName;
174 };
175 
176 template <typename SourceOp>
177 struct LaunchConfigOpConversion final : LaunchConfigConversion {
178  static StringRef getFuncName();
179 
180  explicit LaunchConfigOpConversion(const LLVMTypeConverter &typeConverter,
181  PatternBenefit benefit = 1)
182  : LaunchConfigConversion(getFuncName(), SourceOp::getOperationName(),
183  &typeConverter.getContext(), typeConverter,
184  benefit) {}
185 
186  gpu::Dimension getDimension(Operation *op) const final {
187  return cast<SourceOp>(op).getDimension();
188  }
189 };
190 
191 template <>
192 StringRef LaunchConfigOpConversion<gpu::BlockIdOp>::getFuncName() {
193  return "_Z12get_group_idj";
194 }
195 
196 template <>
197 StringRef LaunchConfigOpConversion<gpu::GridDimOp>::getFuncName() {
198  return "_Z14get_num_groupsj";
199 }
200 
201 template <>
202 StringRef LaunchConfigOpConversion<gpu::BlockDimOp>::getFuncName() {
203  return "_Z14get_local_sizej";
204 }
205 
206 template <>
207 StringRef LaunchConfigOpConversion<gpu::ThreadIdOp>::getFuncName() {
208  return "_Z12get_local_idj";
209 }
210 
211 template <>
212 StringRef LaunchConfigOpConversion<gpu::GlobalIdOp>::getFuncName() {
213  return "_Z13get_global_idj";
214 }
215 
216 //===----------------------------------------------------------------------===//
217 // Shuffles
218 //===----------------------------------------------------------------------===//
219 
220 /// Replace `gpu.shuffle` with an `llvm.call` to the corresponding SPIR-V
221 /// builtin for `shuffleResult`, keeping `value` and `offset` arguments, and a
222 /// `true` constant for the `valid` result type. Conversion will only take place
223 /// if `width` is constant and equal to the `subgroup` pass option:
224 /// ```
225 /// // %0 = gpu.shuffle idx %value, %offset, %width : f64
226 /// %0 = llvm.call spir_funccc @_Z17sub_group_shuffledj(%value, %offset)
227 /// : (f64, i32) -> f64
228 /// ```
229 struct GPUShuffleConversion final : ConvertOpToLLVMPattern<gpu::ShuffleOp> {
231 
232  static StringRef getBaseName(gpu::ShuffleMode mode) {
233  switch (mode) {
234  case gpu::ShuffleMode::IDX:
235  return "sub_group_shuffle";
236  case gpu::ShuffleMode::XOR:
237  return "sub_group_shuffle_xor";
238  case gpu::ShuffleMode::UP:
239  return "sub_group_shuffle_up";
240  case gpu::ShuffleMode::DOWN:
241  return "sub_group_shuffle_down";
242  }
243  llvm_unreachable("Unhandled shuffle mode");
244  }
245 
246  static std::optional<StringRef> getTypeMangling(Type type) {
248  .Case<Float16Type>([](auto) { return "Dhj"; })
249  .Case<Float32Type>([](auto) { return "fj"; })
250  .Case<Float64Type>([](auto) { return "dj"; })
251  .Case<IntegerType>([](auto intTy) -> std::optional<StringRef> {
252  switch (intTy.getWidth()) {
253  case 8:
254  return "cj";
255  case 16:
256  return "sj";
257  case 32:
258  return "ij";
259  case 64:
260  return "lj";
261  }
262  return std::nullopt;
263  })
264  .Default([](auto) { return std::nullopt; });
265  }
266 
267  static std::optional<std::string> getFuncName(gpu::ShuffleOp op) {
268  StringRef baseName = getBaseName(op.getMode());
269  std::optional<StringRef> typeMangling = getTypeMangling(op.getType(0));
270  if (!typeMangling)
271  return std::nullopt;
272  return llvm::formatv("_Z{0}{1}{2}", baseName.size(), baseName,
273  typeMangling.value());
274  }
275 
276  /// Get the subgroup size from the target or return a default.
277  static int getSubgroupSize(Operation *op) {
280  .getSubgroupSize();
281  }
282 
283  static bool hasValidWidth(gpu::ShuffleOp op) {
284  llvm::APInt val;
285  Value width = op.getWidth();
286  return matchPattern(width, m_ConstantInt(&val)) &&
287  val == getSubgroupSize(op);
288  }
289 
290  LogicalResult
291  matchAndRewrite(gpu::ShuffleOp op, OpAdaptor adaptor,
292  ConversionPatternRewriter &rewriter) const final {
293  if (!hasValidWidth(op))
294  return rewriter.notifyMatchFailure(
295  op, "shuffle width and subgroup size mismatch");
296 
297  std::optional<std::string> funcName = getFuncName(op);
298  if (!funcName)
299  return rewriter.notifyMatchFailure(op, "unsupported value type");
300 
302  assert(moduleOp && "Expecting module");
303  Type valueType = adaptor.getValue().getType();
304  Type offsetType = adaptor.getOffset().getType();
305  Type resultType = valueType;
306  LLVM::LLVMFuncOp func = lookupOrCreateSPIRVFn(
307  moduleOp, funcName.value(), {valueType, offsetType}, resultType,
308  /*isMemNone=*/false, /*isConvergent=*/true);
309 
310  Location loc = op->getLoc();
311  std::array<Value, 2> args{adaptor.getValue(), adaptor.getOffset()};
312  Value result =
313  createSPIRVBuiltinCall(loc, rewriter, func, args).getResult();
314  Value trueVal =
315  rewriter.create<LLVM::ConstantOp>(loc, rewriter.getI1Type(), true);
316  rewriter.replaceOp(op, {result, trueVal});
317  return success();
318  }
319 };
320 
321 class MemorySpaceToOpenCLMemorySpaceConverter final : public TypeConverter {
322 public:
323  MemorySpaceToOpenCLMemorySpaceConverter(MLIRContext *ctx) {
324  addConversion([](Type t) { return t; });
325  addConversion([ctx](BaseMemRefType memRefType) -> std::optional<Type> {
326  // Attach global addr space attribute to memrefs with no addr space attr
327  Attribute memSpaceAttr = memRefType.getMemorySpace();
328  if (memSpaceAttr)
329  return std::nullopt;
330 
331  unsigned globalAddrspace = storageClassToAddressSpace(
332  spirv::ClientAPI::OpenCL, spirv::StorageClass::CrossWorkgroup);
333  Attribute addrSpaceAttr =
334  IntegerAttr::get(IntegerType::get(ctx, 64), globalAddrspace);
335  if (auto rankedType = dyn_cast<MemRefType>(memRefType)) {
336  return MemRefType::get(memRefType.getShape(),
337  memRefType.getElementType(),
338  rankedType.getLayout(), addrSpaceAttr);
339  }
340  return UnrankedMemRefType::get(memRefType.getElementType(),
341  addrSpaceAttr);
342  });
343  addConversion([this](FunctionType type) {
344  auto inputs = llvm::map_to_vector(
345  type.getInputs(), [this](Type ty) { return convertType(ty); });
346  auto results = llvm::map_to_vector(
347  type.getResults(), [this](Type ty) { return convertType(ty); });
348  return FunctionType::get(type.getContext(), inputs, results);
349  });
350  }
351 };
352 
353 //===----------------------------------------------------------------------===//
354 // Subgroup query ops.
355 //===----------------------------------------------------------------------===//
356 
357 template <typename SubgroupOp>
358 struct GPUSubgroupOpConversion final : ConvertOpToLLVMPattern<SubgroupOp> {
361 
362  LogicalResult
363  matchAndRewrite(SubgroupOp op, typename SubgroupOp::Adaptor adaptor,
364  ConversionPatternRewriter &rewriter) const final {
365  constexpr StringRef funcName = [] {
366  if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupIdOp>) {
367  return "_Z16get_sub_group_id";
368  } else if constexpr (std::is_same_v<SubgroupOp, gpu::LaneIdOp>) {
369  return "_Z22get_sub_group_local_id";
370  } else if constexpr (std::is_same_v<SubgroupOp, gpu::NumSubgroupsOp>) {
371  return "_Z18get_num_sub_groups";
372  } else if constexpr (std::is_same_v<SubgroupOp, gpu::SubgroupSizeOp>) {
373  return "_Z18get_sub_group_size";
374  }
375  }();
376 
377  Operation *moduleOp =
378  op->template getParentWithTrait<OpTrait::SymbolTable>();
379  Type resultTy = rewriter.getI32Type();
380  LLVM::LLVMFuncOp func =
381  lookupOrCreateSPIRVFn(moduleOp, funcName, {}, resultTy,
382  /*isMemNone=*/false, /*isConvergent=*/false);
383 
384  Location loc = op->getLoc();
385  Value result = createSPIRVBuiltinCall(loc, rewriter, func, {}).getResult();
386 
387  Type indexTy = getTypeConverter()->getIndexType();
388  if (resultTy != indexTy) {
389  if (indexTy.getIntOrFloatBitWidth() < resultTy.getIntOrFloatBitWidth()) {
390  return failure();
391  }
392  result = rewriter.create<LLVM::ZExtOp>(loc, indexTy, result);
393  }
394 
395  rewriter.replaceOp(op, result);
396  return success();
397  }
398 };
399 
400 //===----------------------------------------------------------------------===//
401 // GPU To LLVM-SPV Pass.
402 //===----------------------------------------------------------------------===//
403 
404 struct GPUToLLVMSPVConversionPass final
405  : impl::ConvertGpuOpsToLLVMSPVOpsBase<GPUToLLVMSPVConversionPass> {
406  using Base::Base;
407 
408  void runOnOperation() final {
409  MLIRContext *context = &getContext();
410  RewritePatternSet patterns(context);
411 
412  LowerToLLVMOptions options(context);
413  if (indexBitwidth != kDeriveIndexBitwidthFromDataLayout)
414  options.overrideIndexBitwidth(indexBitwidth);
415 
416  LLVMTypeConverter converter(context, options);
417  LLVMConversionTarget target(*context);
418 
419  // Force OpenCL address spaces when they are not present
420  {
421  MemorySpaceToOpenCLMemorySpaceConverter converter(context);
422  AttrTypeReplacer replacer;
423  replacer.addReplacement([&converter](BaseMemRefType origType)
424  -> std::optional<BaseMemRefType> {
425  return converter.convertType<BaseMemRefType>(origType);
426  });
427 
428  replacer.recursivelyReplaceElementsIn(getOperation(),
429  /*replaceAttrs=*/true,
430  /*replaceLocs=*/false,
431  /*replaceTypes=*/true);
432  }
433 
434  target.addIllegalOp<gpu::BarrierOp, gpu::BlockDimOp, gpu::BlockIdOp,
435  gpu::GPUFuncOp, gpu::GlobalIdOp, gpu::GridDimOp,
436  gpu::LaneIdOp, gpu::NumSubgroupsOp, gpu::ReturnOp,
437  gpu::ShuffleOp, gpu::SubgroupIdOp, gpu::SubgroupSizeOp,
438  gpu::ThreadIdOp>();
439 
440  populateGpuToLLVMSPVConversionPatterns(converter, patterns);
442 
443  if (failed(applyPartialConversion(getOperation(), target,
444  std::move(patterns))))
445  signalPassFailure();
446  }
447 };
448 } // namespace
449 
450 //===----------------------------------------------------------------------===//
451 // GPU To LLVM-SPV Patterns.
452 //===----------------------------------------------------------------------===//
453 
454 namespace mlir {
455 namespace {
456 static unsigned
457 gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace addressSpace) {
458  constexpr spirv::ClientAPI clientAPI = spirv::ClientAPI::OpenCL;
459  return storageClassToAddressSpace(clientAPI,
460  addressSpaceToStorageClass(addressSpace));
461 }
462 } // namespace
463 
465  const LLVMTypeConverter &typeConverter, RewritePatternSet &patterns) {
466  patterns.add<GPUBarrierConversion, GPUReturnOpLowering, GPUShuffleConversion,
467  GPUSubgroupOpConversion<gpu::LaneIdOp>,
468  GPUSubgroupOpConversion<gpu::NumSubgroupsOp>,
469  GPUSubgroupOpConversion<gpu::SubgroupIdOp>,
470  GPUSubgroupOpConversion<gpu::SubgroupSizeOp>,
471  LaunchConfigOpConversion<gpu::BlockDimOp>,
472  LaunchConfigOpConversion<gpu::BlockIdOp>,
473  LaunchConfigOpConversion<gpu::GlobalIdOp>,
474  LaunchConfigOpConversion<gpu::GridDimOp>,
475  LaunchConfigOpConversion<gpu::ThreadIdOp>>(typeConverter);
476  MLIRContext *context = &typeConverter.getContext();
477  unsigned privateAddressSpace =
478  gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Private);
479  unsigned localAddressSpace =
480  gpuAddressSpaceToOCLAddressSpace(gpu::AddressSpace::Workgroup);
481  OperationName llvmFuncOpName(LLVM::LLVMFuncOp::getOperationName(), context);
482  StringAttr kernelBlockSizeAttributeName =
483  LLVM::LLVMFuncOp::getReqdWorkGroupSizeAttrName(llvmFuncOpName);
484  patterns.add<GPUFuncOpLowering>(
485  typeConverter,
487  privateAddressSpace, localAddressSpace,
488  /*kernelAttributeName=*/{}, kernelBlockSizeAttributeName,
489  LLVM::CConv::SPIR_KERNEL, LLVM::CConv::SPIR_FUNC,
490  /*encodeWorkgroupAttributionsAsArguments=*/true});
491 }
492 
495  gpuAddressSpaceToOCLAddressSpace);
496 }
497 } // namespace mlir
static LLVM::CallOp createSPIRVBuiltinCall(Location loc, ConversionPatternRewriter &rewriter, LLVM::LLVMFuncOp func, ValueRange args)
static LLVM::LLVMFuncOp lookupOrCreateSPIRVFn(Operation *symbolTable, StringRef name, ArrayRef< Type > paramTypes, Type resultType, bool isMemNone, bool isConvergent)
static MLIRContext * getContext(OpFoldResult val)
static llvm::ManagedStatic< PassManagerOptions > options
This is an attribute/type replacer that is naively cached.
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class provides a shared interface for ranked and unranked memref types.
Definition: BuiltinTypes.h:149
ArrayRef< int64_t > getShape() const
Returns the shape of this memref type.
Attribute getMemorySpace() const
Returns the memory space in which data referred to by this memref resides.
Type getElementType() const
Returns the element type of this memref type.
Attr getAttr(Args &&...args)
Get or construct an instance of the attribute Attr with provided arguments.
Definition: Builders.h:106
This class implements a pattern rewriter for use with ConversionPatterns.
Utility class for operation conversions targeting the LLVM dialect that match exactly one source oper...
Definition: Pattern.h:143
ConvertOpToLLVMPattern(const LLVMTypeConverter &typeConverter, PatternBenefit benefit=1)
Definition: Pattern.h:147
Base class for operation conversions targeting the LLVM IR dialect.
Definition: Pattern.h:41
const LLVMTypeConverter * getTypeConverter() const
Definition: Pattern.cpp:27
Derived class that automatically populates legalization information for different LLVM ops.
Conversion from types to the LLVM IR dialect.
Definition: TypeConverter.h:35
MLIRContext & getContext() const
Returns the MLIR context.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:66
Options to control the LLVM lowering.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:60
This class helps build Operations.
Definition: Builders.h:215
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:497
A trait used to provide symbol table functionalities to a region operation.
Definition: SymbolTable.h:435
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:223
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition: Operation.h:682
Operation * getParentWithTrait()
Returns the closest surrounding parent operation with trait Trait.
Definition: Operation.h:248
This class represents the benefit of a pattern match in a unitless scheme that ranges from 0 (very li...
Definition: PatternMatch.h:34
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
Definition: PatternMatch.h:847
static Operation * lookupSymbolIn(Operation *op, StringAttr symbol)
Returns the operation registered with the given symbol name with the regions of 'symbolTableOp'.
Type conversion class.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
unsigned getIntOrFloatBitWidth() const
Return the bit width of an integer or a float type, assert failure on other types.
Definition: Types.cpp:133
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
void recursivelyReplaceElementsIn(Operation *op, bool replaceAttrs=true, bool replaceLocs=false, bool replaceTypes=false)
Replace the elements within the given operation, and all nested operations.
void addReplacement(ReplaceFn< Attribute > fn)
Register a replacement function for mapping a given attribute or type.
ResourceLimitsAttr getResourceLimits() const
Returns the target resource limits.
TargetEnvAttr lookupTargetEnvOrDefault(Operation *op)
Queries the target environment recursively from enclosing symbol table ops containing the given op or...
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:485
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition: Matchers.h:522
unsigned storageClassToAddressSpace(spirv::ClientAPI clientAPI, spirv::StorageClass storageClass)
void populateGpuToLLVMSPVConversionPatterns(const LLVMTypeConverter &converter, RewritePatternSet &patterns)
static constexpr unsigned kDeriveIndexBitwidthFromDataLayout
Value to pass as bitwidth for the index type when the converter is expected to derive the bitwidth fr...
spirv::StorageClass addressSpaceToStorageClass(gpu::AddressSpace addressSpace)
void populateGpuMemorySpaceAttributeConversions(TypeConverter &typeConverter, const MemorySpaceMapping &mapping)
Populates memory space attribute conversion rules for lowering gpu.address_space to integer values.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult applyPartialConversion(ArrayRef< Operation * > ops, const ConversionTarget &target, const FrozenRewritePatternSet &patterns, ConversionConfig config=ConversionConfig())
Below we define several entry points for operation conversion.