MLIR  20.0.0git
XeGPUOps.cpp
Go to the documentation of this file.
1 //===- XeGPUOps.cpp - MLIR XeGPU ops implementation -------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
12 #include "mlir/IR/Builders.h"
13 #include "mlir/IR/TypeUtilities.h"
14 
15 #include "llvm/Support/Debug.h"
16 
17 #define DEBUG_TYPE "xegpu"
18 
19 namespace mlir {
20 namespace xegpu {
21 
23  SmallVector<int64_t> &shape) {
24  SmallVector<int64_t> old = shape;
25  for (size_t i = 0; i < trans.size(); i++)
26  shape[i] = old[trans[i]];
27 }
28 
29 template <typename T>
30 static std::string makeString(T array, bool breakline = false) {
31  std::string buf;
32  buf.clear();
33  llvm::raw_string_ostream os(buf);
34  os << "[";
35  for (size_t i = 1; i < array.size(); i++) {
36  os << array[i - 1] << ", ";
37  if (breakline)
38  os << "\n\t\t";
39  }
40  os << array.back() << "]";
41  return buf;
42 }
43 
46  if (auto ty = llvm::dyn_cast<ShapedType>(type))
47  shape = SmallVector<int64_t>(ty.getShape());
48  else
49  shape.push_back(1);
50  return shape;
51 }
52 
53 static int64_t getRankOf(Value val) {
54  auto type = val.getType();
55  if (auto ty = llvm::dyn_cast<ShapedType>(type))
56  return ty.getRank();
57  return 0;
58 }
59 
60 static bool isReadHintOrNone(const CachePolicyAttr &attr) {
61  if (!attr)
62  return true;
63  auto kind = attr.getValue();
64  return kind == CachePolicy::CACHED || kind == CachePolicy::UNCACHED ||
65  kind == CachePolicy::STREAMING || kind == CachePolicy::READ_INVALIDATE;
66 }
67 
68 static bool isWriteHintOrNone(const CachePolicyAttr &attr) {
69  if (!attr)
70  return true;
71  auto kind = attr.getValue();
72  return kind == CachePolicy::CACHED || kind == CachePolicy::UNCACHED ||
73  kind == CachePolicy::WRITE_BACK || kind == CachePolicy::WRITE_THROUGH;
74 }
75 
76 //===----------------------------------------------------------------------===//
77 // XeGPU_CreateNdDescOp
78 //===----------------------------------------------------------------------===//
79 void CreateNdDescOp::build(OpBuilder &builder, OperationState &state,
80  Type tdesc, TypedValue<MemRefType> source,
82  [[maybe_unused]] auto ty = source.getType();
83  assert(ty.hasStaticShape() && offsets.size() == (size_t)ty.getRank());
84 
85  llvm::SmallVector<int64_t> staticOffsets;
86  llvm::SmallVector<Value> dynamicOffsets;
87  dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets);
88 
89  build(builder, state, tdesc, source, dynamicOffsets /* dynamic offsets */,
90  ValueRange({}) /* empty dynamic shape */,
91  ValueRange({}) /* empty dynamic strides */,
92  staticOffsets /* const offsets */, {} /* empty const shape*/,
93  {} /* empty const strides*/);
94 }
95 
96 void CreateNdDescOp::build(OpBuilder &builder, OperationState &state,
97  Type tdesc, TypedValue<IntegerType> source,
101  assert(shape.size() && offsets.size() && strides.size() &&
102  shape.size() == strides.size() && shape.size() == offsets.size());
103 
104  llvm::SmallVector<int64_t> staticOffsets;
105  llvm::SmallVector<int64_t> staticShape;
106  llvm::SmallVector<int64_t> staticStrides;
107  llvm::SmallVector<Value> dynamicOffsets;
108  llvm::SmallVector<Value> dynamicShape;
109  llvm::SmallVector<Value> dynamicStrides;
110 
111  dispatchIndexOpFoldResults(offsets, dynamicOffsets, staticOffsets);
112  dispatchIndexOpFoldResults(shape, dynamicShape, staticShape);
113  dispatchIndexOpFoldResults(strides, dynamicStrides, staticStrides);
114 
115  auto staticOffsetsAttr = builder.getDenseI64ArrayAttr(staticOffsets);
116  auto staticShapeAttr = builder.getDenseI64ArrayAttr(staticShape);
117  auto staticStridesAttr = builder.getDenseI64ArrayAttr(staticStrides);
118 
119  build(builder, state, tdesc, source, dynamicOffsets, dynamicShape,
120  dynamicStrides, staticOffsetsAttr, staticShapeAttr, staticStridesAttr);
121 }
122 
123 LogicalResult CreateNdDescOp::verify() {
124  auto rank = (int64_t)getMixedOffsets().size();
125  bool invalidRank = false;
126  bool invalidElemTy = false;
127 
128  // Memory space of created TensorDesc should match with the source.
129  // Both source and TensorDesc are considered for global memory by default,
130  // if the memory scope attr is not specified. If source is an integer,
131  // it is considered as ptr to global memory.
132  auto srcMemorySpace = getSourceMemorySpace();
133  auto tdescMemorySpace = static_cast<unsigned>(getType().getMemorySpace());
134  if (srcMemorySpace != tdescMemorySpace)
135  return emitOpError("Memory space mismatch.")
136  << " Source: " << srcMemorySpace
137  << ", TensorDesc: " << tdescMemorySpace;
138 
139  // check source type matches the rank if it is a memref.
140  // It also should have the same ElementType as TensorDesc.
141  auto memrefTy = dyn_cast<MemRefType>(getSourceType());
142  if (memrefTy) {
143  invalidRank |= (memrefTy.getRank() != rank);
144  invalidElemTy |= memrefTy.getElementType() != getElementType();
145  }
146 
147  // mismatches among shape, strides, and offsets are
148  // already handeled by OffsetSizeAndStrideOpInterface.
149  // So they are not check here.
150  if (invalidRank)
151  return emitOpError(
152  "Expecting the rank of shape, strides, offsets, and source (if source "
153  "is a memref) should match with each other.");
154 
155  // check result TensorDesc rank
156  invalidRank = (getType().getRank() > 2 || getType().getRank() > rank);
157 
158  if (invalidRank)
159  return emitOpError(
160  "Expecting the TensorDesc rank is up to 2 and not greater than the "
161  "ranks of shape, strides, offsets or the memref source.");
162 
163  if (invalidElemTy)
164  return emitOpError("TensorDesc should have the same element "
165  "type with the source if it is a memref.\n");
166 
167  if (getType().isScattered())
168  return emitOpError("Expects a non-scattered TensorDesc.\n");
169 
170  if (getType().getRank() == 2 &&
171  tdescMemorySpace == static_cast<unsigned>(MemorySpace::SLM))
172  return emitOpError("SLM is not supported for 2D Block TensorDesc.\n");
173 
174  return success();
175 }
176 
177 //===----------------------------------------------------------------------===//
178 // XeGPU_PrefetchNdOp
179 //===----------------------------------------------------------------------===//
180 LogicalResult PrefetchNdOp::verify() {
181  auto tdescTy = getTensorDescType();
182  if (tdescTy.isScattered())
183  return emitOpError("Expects a non-scattered TensorDesc.\n");
184 
185  if (!isReadHintOrNone(getL1HintAttr()))
186  return emitOpError("invlid l1_hint: ") << getL1HintAttr();
187 
188  if (!isReadHintOrNone(getL2HintAttr()))
189  return emitOpError("invlid l2_hint: ") << getL2HintAttr();
190 
191  if (!isReadHintOrNone(getL3HintAttr()))
192  return emitOpError("invlid l3_hint: ") << getL3HintAttr();
193 
194  return success();
195 }
196 
197 //===----------------------------------------------------------------------===//
198 // XeGPU_LoadNdOp
199 //===----------------------------------------------------------------------===//
200 LogicalResult LoadNdOp::verify() {
201  auto tdescTy = getTensorDescType();
202  auto valueTy = getType();
203 
204  if (tdescTy.getRank() > 2)
205  return emitOpError("Expecting a 1D/2D TensorDesc.\n");
206 
207  if (tdescTy.isScattered())
208  return emitOpError("Expects a non-scattered TensorDesc.\n");
209 
210  if (!valueTy)
211  return emitOpError("Invalid result, it should be a VectorType.\n");
212 
213  if (!isReadHintOrNone(getL1HintAttr()))
214  return emitOpError("invlid l1_hint: ") << getL1HintAttr();
215 
216  if (!isReadHintOrNone(getL2HintAttr()))
217  return emitOpError("invlid l2_hint: ") << getL2HintAttr();
218 
219  if (!isReadHintOrNone(getL3HintAttr()))
220  return emitOpError("invlid l3_hint: ") << getL3HintAttr();
221 
222  auto array_len = tdescTy.getArrayLength();
223  auto tdescShape = getShapeOf(tdescTy);
224  auto valueShape = getShapeOf(valueTy);
225 
226  if (getTranspose()) {
227  auto trans = getTranspose().value();
228 
229  // Make sure the transpose value is valid.
230  bool valid = std::all_of(trans.begin(), trans.end(), [&](int t) {
231  return t >= 0 && t < tdescTy.getRank();
232  });
233 
234  if (valid)
235  transpose(trans, tdescShape);
236  else
237  emitWarning("Invalid transpose attr. It is ignored.");
238  }
239 
240  if (getPacked()) {
241  if (tdescTy.getRank() == 2) {
242  const int axis = 0;
243  auto vnni_factor = valueShape.back();
244  tdescShape[axis] /= vnni_factor;
245  tdescShape.push_back(vnni_factor);
246  } else {
247  emitWarning("Invalid Packed Attr. It is ignored (available for 2D "
248  "TensorDesc only).");
249  }
250  }
251 
252  if (array_len > 1) {
253  auto it = tdescShape.begin();
254  tdescShape.insert(it, array_len);
255  }
256 
257  if (tdescShape != valueShape)
258  return emitOpError() << "Result shape doesn't match TensorDesc shape."
259  << "The expected shape is " << makeString(tdescShape)
260  << ". But the given shape is "
261  << makeString(valueShape) << ".\n";
262  return success();
263 }
264 
265 //===----------------------------------------------------------------------===//
266 // XeGPU_StoreNdOp
267 //===----------------------------------------------------------------------===//
268 LogicalResult StoreNdOp::verify() {
269  auto dstTy = getTensorDescType(); // Tile
270  auto valTy = getValueType(); // Vector
271 
272  if (dstTy.getRank() > 2)
273  return emitOpError("Expecting a 1D/2D TensorDesc.\n");
274 
275  if (dstTy.isScattered())
276  return emitOpError("Expects a non-scattered TensorDesc.\n");
277 
278  if (!valTy)
279  return emitOpError("Exepcting a VectorType result.\n");
280 
281  if (!isWriteHintOrNone(getL1HintAttr()))
282  return emitOpError("invlid l1_hint: ") << getL1HintAttr();
283 
284  if (!isWriteHintOrNone(getL2HintAttr()))
285  return emitOpError("invlid l2_hint: ") << getL2HintAttr();
286 
287  if (!isWriteHintOrNone(getL3HintAttr()))
288  return emitOpError("invlid l3_hint: ") << getL3HintAttr();
289 
290  return success();
291 }
292 
293 //===----------------------------------------------------------------------===//
294 // XeGPU_UpdateNDOffsetOp
295 //===----------------------------------------------------------------------===//
296 LogicalResult UpdateNdOffsetOp::verify() {
297  auto ty = getTensorDescType();
298  if (ty.isScattered())
299  return emitOpError("Expects a non-scattered TensorDesc.\n");
300 
301  // number of offsets specified must match the rank of the tensor descriptor
302  if (ty.getRank() != (int64_t)getNumOffsets()) {
303  return emitOpError("Invalid number of offsets.");
304  }
305  return success();
306 }
307 
308 //===----------------------------------------------------------------------===//
309 // XeGPU_CreateDescOp
310 //===----------------------------------------------------------------------===//
311 
312 void CreateDescOp::build(OpBuilder &builder, OperationState &state,
313  TensorDescType TensorDesc, Value source,
315  auto loc = source.getLoc();
316  int64_t size = static_cast<int64_t>(offsets.size());
317  auto type = VectorType::get(size, builder.getIndexType());
318  auto values = getValueOrCreateConstantIndexOp(builder, loc, offsets);
319  auto offset = builder.create<vector::FromElementsOp>(loc, type, values);
320  build(builder, state, TensorDesc, source, offset);
321 }
322 
323 void CreateDescOp::build(OpBuilder &builder, OperationState &state,
324  TensorDescType TensorDesc, Value source,
325  llvm::ArrayRef<int64_t> offsets) {
326  auto ofrs = getAsIndexOpFoldResult(builder.getContext(), offsets);
327  build(builder, state, TensorDesc, source, ofrs);
328 }
329 
330 LogicalResult CreateDescOp::verify() {
331  auto tdescTy = getTensorDescType();
332 
333  if (getRankOf(getSource()) > 1)
334  return emitOpError(
335  "Expecting the source is a 1D memref or pointer (uint64_t).");
336 
337  if (!tdescTy.isScattered())
338  return emitOpError("Expects a scattered TensorDesc.\n");
339 
340  // Memory space of created TensorDesc should match with the source.
341  // Both source and TensorDesc are considered for global memory by default,
342  // if the memory scope attr is not specified. If source is an integer,
343  // it is considered as ptr to global memory.
344  auto srcMemorySpace = getSourceMemorySpace();
345  auto tdescMemorySpace = static_cast<unsigned>(tdescTy.getMemorySpace());
346  if (srcMemorySpace != tdescMemorySpace)
347  return emitOpError("Memory space mismatch.")
348  << " Source: " << srcMemorySpace
349  << ", TensorDesc: " << tdescMemorySpace;
350 
351  auto chunkSize = tdescTy.getChunkSize();
352 
353  // check chunk_size
354  llvm::SmallVector<int64_t> supportedChunkSizes = {1, 2, 3, 4, 8,
355  16, 32, 64, 128, 256};
356  if (!llvm::is_contained(supportedChunkSizes, chunkSize))
357  return emitOpError("Invalid chunk_size. Supported values are 1, 2, 3, 4, "
358  "8, 16, 32, 64, 128, or 256.");
359 
360  // check total size
361  auto elemBits = tdescTy.getElementType().getIntOrFloatBitWidth();
362  auto bitsPerLane = elemBits * chunkSize;
363  if (chunkSize > 1 && bitsPerLane % 32) {
364  // For 8-bit and 16-bit data, the hardware only supports chunk size of 1.
365  // For 32-bit data, the hardware can support larger larger chunk size. So
366  // we can bitcast 8-bit/16-bit data to 32-bit data for better performance.
367  // But this requires the total size is 32 bit aligned to make the
368  // optimization work.
369  return emitOpError(
370  "access size (chunk_size * sizeof(elemTy)) should be 32-bit aligned.");
371  }
372 
373  auto lscConstraints = 512 * 8; // each access is upto 512 bytes.
374  if (elemBits * tdescTy.getNumElements() > lscConstraints)
375  return emitOpError("total access size (simd_lanes * chunk_size * "
376  "sizeof(elemTy)) is upto 512 bytes.");
377 
378  SmallVector<int64_t> shape({(int64_t)getNumOffsets()});
379  if (chunkSize != 1)
380  shape.push_back(chunkSize);
381 
382  auto tdescShape = getShapeOf(tdescTy);
383  if (shape != tdescShape)
384  return emitOpError("Incorrect TensorDesc shape. ")
385  << "Expected is " << makeString(shape) << "\n";
386 
387  return success();
388 }
389 
390 //===----------------------------------------------------------------------===//
391 // XeGPU_PrefetchOp
392 //===----------------------------------------------------------------------===//
393 LogicalResult PrefetchOp::verify() {
394  auto tdescTy = getTensorDescType();
395  if (!tdescTy.isScattered())
396  return emitOpError("Expects a scattered TensorDesc.\n");
397 
398  if (!isReadHintOrNone(getL1HintAttr()))
399  return emitOpError("invlid l1_hint: ") << getL1HintAttr();
400 
401  if (!isReadHintOrNone(getL2HintAttr()))
402  return emitOpError("invlid l2_hint: ") << getL2HintAttr();
403 
404  if (!isReadHintOrNone(getL3HintAttr()))
405  return emitOpError("invlid l3_hint: ") << getL3HintAttr();
406 
407  return success();
408 }
409 
410 //===----------------------------------------------------------------------===//
411 // XeGPU_LoadGatherOp
412 //===----------------------------------------------------------------------===//
413 LogicalResult LoadGatherOp::verify() {
414  auto tdescTy = getTensorDescType();
415  auto maskTy = getMaskType();
416  auto valueTy = getValueType();
417 
418  if (!tdescTy.isScattered())
419  return emitOpError("Expects a scattered TensorDesc.\n");
420 
421  if (!isReadHintOrNone(getL1HintAttr()))
422  return emitOpError("invlid l1_hint: ") << getL1HintAttr();
423 
424  if (!isReadHintOrNone(getL2HintAttr()))
425  return emitOpError("invlid l2_hint: ") << getL2HintAttr();
426 
427  if (!isReadHintOrNone(getL3HintAttr()))
428  return emitOpError("invlid l3_hint: ") << getL3HintAttr();
429 
430  auto tdescElemTy = tdescTy.getElementType();
431  auto valueElemTy = getElementType();
432  if (tdescElemTy != valueElemTy)
433  return emitOpError(
434  "Value should have the same element type as TensorDesc.");
435 
436  auto maskShape = getShapeOf(maskTy);
437  auto valueShape = getShapeOf(valueTy);
438  auto tdescShape = getShapeOf(tdescTy);
439 
440  if (tdescShape[0] != maskShape[0])
441  return emitOpError("dim-0 of the Mask and TensorDesc should be the same.");
442 
443  if (tdescTy.getRank() == 2) {
444  if (!getTransposeAttr())
445  return emitOpError("load_gather has to be transposed.");
446  transpose({1, 0}, tdescShape);
447  }
448 
449  if (valueShape != tdescShape)
450  return emitOpError("Unexpected result shape")
451  << "(Expected shape: " << makeString(tdescShape)
452  << ", Given shape: " << makeString(valueShape) << ").\n";
453 
454  return success();
455 }
456 
457 //===----------------------------------------------------------------------===//
458 // XeGPU_StoreScatterOp
459 //===----------------------------------------------------------------------===//
460 LogicalResult StoreScatterOp::verify() {
461  auto tdescTy = getTensorDescType();
462  if (!tdescTy.isScattered())
463  return emitOpError("Expects a scattered TensorDesc.\n");
464 
465  if (!isWriteHintOrNone(getL1HintAttr()))
466  return emitOpError("invlid l1_hint: ") << getL1HintAttr();
467 
468  if (!isWriteHintOrNone(getL2HintAttr()))
469  return emitOpError("invlid l2_hint: ") << getL2HintAttr();
470 
471  if (!isWriteHintOrNone(getL3HintAttr()))
472  return emitOpError("invlid l3_hint: ") << getL3HintAttr();
473 
474  auto maskTy = getMaskType();
475  auto valueTy = getValueType();
476  auto maskShape = getShapeOf(maskTy);
477  auto tdescShape = getShapeOf(tdescTy);
478  auto valueShape = getShapeOf(valueTy);
479  if (tdescShape[0] != maskShape[0])
480  return emitOpError("dim-0 of the Mask and TensorDesc should be the same.");
481 
482  if (tdescTy.getRank() == 2) {
483  if (!getTransposeAttr())
484  return emitOpError("load_gather has to be transposed.");
485  transpose({1, 0}, tdescShape);
486  }
487 
488  if (valueShape != tdescShape)
489  return emitOpError("Unexpected value shape")
490  << "(Expected shape: " << makeString(tdescShape)
491  << ", Given shape: " << makeString(valueShape) << ").\n";
492 
493  return success();
494 }
495 
496 //===----------------------------------------------------------------------===//
497 // XeGPU_UpdateOffsetOp
498 //===----------------------------------------------------------------------===//
499 void UpdateOffsetOp::build(OpBuilder &builder, OperationState &state,
500  mlir::Value tensorDesc,
502  auto tdescTy = mlir::dyn_cast<TensorDescType>(tensorDesc.getType());
503  assert(tdescTy && "Expecting the source is a TensorDescType value.");
504  auto loc = tensorDesc.getLoc();
505  int64_t size = static_cast<int64_t>(offsets.size());
506  auto type = VectorType::get({size}, builder.getIndexType());
507  auto values = getValueOrCreateConstantIndexOp(builder, loc, offsets);
508  auto offset = builder.create<vector::FromElementsOp>(loc, type, values);
509  build(builder, state, tdescTy, tensorDesc, offset);
510 }
511 
512 void UpdateOffsetOp::build(OpBuilder &builder, OperationState &state,
513  Value tensorDesc, llvm::ArrayRef<int64_t> offsets) {
514  auto ofrs = getAsIndexOpFoldResult(builder.getContext(), offsets);
515  build(builder, state, tensorDesc, ofrs);
516 }
517 
518 //===----------------------------------------------------------------------===//
519 // XeGPU_DpasOp
520 //===----------------------------------------------------------------------===//
521 LogicalResult DpasOp::verify() {
522  int64_t lhsRank = getLhsType().getRank();
523  int64_t rhsRank = getRhsType().getRank();
524 
525  if (lhsRank != 2 || (rhsRank != 2 && rhsRank != 3))
526  return emitOpError("expecting lhs to be a 2D vector, and rhs to be either "
527  "2D or 3D (packed) vector.");
528 
529  auto lhsShape = getLhsType().getShape();
530  auto rhsShape = getRhsType().getShape();
531  auto bK = rhsRank == 3 ? rhsShape[0] * rhsShape[2] : rhsShape[0];
532  if (bK != lhsShape[1])
533  return emitOpError("K-dimension mismatch.");
534 
535  return success();
536 }
537 
538 } // namespace xegpu
539 } // namespace mlir
540 
541 #include <mlir/Dialect/XeGPU/IR/XeGPUEnums.cpp.inc>
542 #define GET_OP_CLASSES
543 #include <mlir/Dialect/XeGPU/IR/XeGPU.cpp.inc>
static Type getElementType(Type type, ArrayRef< int32_t > indices, function_ref< InFlightDiagnostic(StringRef)> emitErrorFn)
Walks the given type hierarchy with the given indices, potentially down to component granularity,...
Definition: SPIRVOps.cpp:215
This class helps build Operations.
Definition: Builders.h:215
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:381
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:129
Location getLoc() const
Return the location of this value.
Definition: Value.cpp:26
static std::string makeString(T array, bool breakline=false)
Definition: XeGPUOps.cpp:30
static int64_t getRankOf(Value val)
Definition: XeGPUOps.cpp:53
static bool isWriteHintOrNone(const CachePolicyAttr &attr)
Definition: XeGPUOps.cpp:68
static bool isReadHintOrNone(const CachePolicyAttr &attr)
Definition: XeGPUOps.cpp:60
static void transpose(llvm::ArrayRef< int64_t > trans, SmallVector< int64_t > &shape)
Definition: XeGPUOps.cpp:22
static SmallVector< int64_t > getShapeOf(Type type)
Definition: XeGPUOps.cpp:44
Include the generated interface declarations.
InFlightDiagnostic emitWarning(Location loc)
Utility method to emit a warning message using this location.
OpFoldResult getAsIndexOpFoldResult(MLIRContext *ctx, int64_t val)
Convert int64_t to integer attributes of index type and return them as OpFoldResult.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition: Utils.cpp:305
std::conditional_t< std::is_same_v< Ty, mlir::Type >, mlir::Value, detail::TypedValue< Ty > > TypedValue
If Ty is mlir::Type this will select Value instead of having a wrapper around it.
Definition: Value.h:498
void dispatchIndexOpFoldResults(ArrayRef< OpFoldResult > ofrs, SmallVectorImpl< Value > &dynamicVec, SmallVectorImpl< int64_t > &staticVec)
Helper function to dispatch multiple OpFoldResults according to the behavior of dispatchIndexOpFoldRe...
Value getValueOrCreateConstantIndexOp(OpBuilder &b, Location loc, OpFoldResult ofr)
Converts an OpFoldResult to a Value.
Definition: Utils.cpp:112
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
LogicalResult verify(Operation *op, bool verifyRecursively=true)
Perform (potentially expensive) checks of invariants, used to detect compiler bugs,...
Definition: Verifier.cpp:426
This represents an operation in an abstracted form, suitable for use with the builder APIs.