MLIR  22.0.0git
XeGPUBlocking.cpp
Go to the documentation of this file.
1 //===---- XeGPUBlocking.cpp ---- XeGPU Blocking Pass ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
16 #include "mlir/Pass/PassManager.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/Support/DebugLog.h"
21 
22 namespace mlir {
23 namespace xegpu {
24 #define GEN_PASS_DEF_XEGPUBLOCKING
25 #include "mlir/Dialect/XeGPU/Transforms/Passes.h.inc"
26 } // namespace xegpu
27 } // namespace mlir
28 
29 #define DEBUG_TYPE "xegpu-blocking"
30 
31 using namespace mlir;
32 
33 namespace {
34 
35 // reslove the unrealized conversion cast ops generated when doing SCF
36 // Structural Type Conversion. It will have two formats, N:1 vector
37 // cast and 1:N vector cast. vector::insert_strided_slice ops will be
38 // used for the first case, and vector::extract_strided_slice ops will be
39 // used for the second case.
40 static void
41 resolveUnrealizedConversionCastOp(UnrealizedConversionCastOp castOp) {
42  ValueRange inputs = castOp.getInputs();
43  ValueRange outputs = castOp.getOutputs();
44 
45  auto hasIdenticalVectorTypes = [](ValueRange values) {
46  auto types = values.getTypes();
47  return llvm::all_of(types, [&](Type type) {
48  return isa<VectorType>(type) && type == types.front();
49  });
50  };
51 
52  // We only interest in the case where all inputs and outputs have the
53  // identical VectorTypes
54  if (!hasIdenticalVectorTypes(inputs) || !hasIdenticalVectorTypes(outputs)) {
55  LDBG() << "skip unrealized conversion cast op not emulating pack/unpack.";
56  return;
57  }
58 
59  VectorType outputTy = dyn_cast<VectorType>(outputs[0].getType());
60  OpBuilder builder(castOp);
61  if (inputs.size() > 1 && outputs.size() == 1) {
62  // the castOp is emulating an unpack op
63  ArrayRef<int64_t> shape = outputTy.getShape();
65  builder, castOp.getLoc(), inputs, shape);
66  castOp->replaceAllUsesWith(ValueRange(result));
67  castOp->erase();
68  } else if (castOp.getNumResults() > 1 && castOp.getNumOperands() == 1) {
69  // the castOp is emulating a pack op
70  ArrayRef<int64_t> tileShape = outputTy.getShape();
72  builder, castOp.getLoc(), inputs[0], tileShape);
73  castOp->replaceAllUsesWith(results);
74  castOp->erase();
75  }
76 }
77 
78 // This pattern lowers ConvertLayoutOp by removing the inst_data field from the
79 // layout attributes. Since both producer and consumer operations handle data
80 // partitioning based on their own inst_data, while maintaining original input
81 // and output shape, ConvertLayoutOp does not need to manage inst_data.
82 struct ConvertLayoutOpPattern
83  : public OpRewritePattern<xegpu::ConvertLayoutOp> {
85  LogicalResult matchAndRewrite(xegpu::ConvertLayoutOp op,
86  PatternRewriter &rewriter) const override {
87  xegpu::DistributeLayoutAttr input_layout = op.getInputLayoutAttr();
88  xegpu::DistributeLayoutAttr target_layout = op.getTargetLayoutAttr();
89  if (input_layout.getInstDataAsInt().empty() ||
90  target_layout.getInstDataAsInt().empty())
91  return rewriter.notifyMatchFailure(op, "Not a target ConvertLayoutOp.");
92 
93  input_layout = input_layout.dropInstData();
94  target_layout = target_layout.dropInstData();
95  auto newOp = rewriter.createOrFold<xegpu::ConvertLayoutOp>(
96  op.getLoc(), op.getType(), op.getSource(), input_layout, target_layout);
97  rewriter.replaceOp(op, newOp);
98  return success();
99  }
100 };
101 
102 //===------------------------------------------------------------------------===//
103 // The XeGPUBlockingPass leverages the unroll patterns for XeGPU and Vector ops
104 // to partition operations that process large shapes into multiple operations on
105 // smaller shapes, as specified by the inst_data in the layout attribute. This
106 // enables each resulting operation to be efficiently mapped to a hardware
107 // instruction.
108 //===------------------------------------------------------------------------===//
109 
110 class XeGPUBlockingPass final
111  : public xegpu::impl::XeGPUBlockingBase<XeGPUBlockingPass> {
112 public:
113  void runOnOperation() override;
114 
115 private:
116  // Get the tile shape for a given OpOperand or OpResult by examining the
117  // corresponding layout attribute. If layout is not present or is not a
118  // subgroup level layout, it returns std::nullopt.
119  template <typename T,
120  typename = std::enable_if_t<std::is_same_v<T, OpOperand> ||
121  std::is_same_v<T, OpResult>>>
122  std::optional<SmallVector<int64_t>>
123  getTileShape(const T &operandOrResult) const;
124 
125  // Get the tile shape for a given operation.
126  std::optional<SmallVector<int64_t>> getTileShape(Operation *op) const;
127 
128  // Determine if the operation requires unrolling. Return false if all operands
129  // and results have tile shapes identical to their original types. Otherwise,
130  // return true.
131  bool needsUnroll(Operation *op) const;
132 };
133 } // namespace
134 
135 template <typename T, typename>
136 std::optional<SmallVector<int64_t>>
137 XeGPUBlockingPass::getTileShape(const T &operandOrResult) const {
138  Value value;
139  if constexpr (std::is_same_v<T, OpOperand>)
140  value = operandOrResult.get();
141  else
142  value = (Value)operandOrResult;
143 
144  xegpu::DistributeLayoutAttr layout =
145  xegpu::getDistributeLayoutAttr(operandOrResult);
146  if (layout && layout.isForSubgroup()) {
147  if (!layout.getInstDataAsInt().empty())
148  return layout.getInstDataAsInt();
149 
150  if (auto type = dyn_cast<ShapedType>(value.getType()))
151  return llvm::to_vector(type.getShape());
152  }
153  LDBG() << "failed to getTileShape for: " << value;
154  return std::nullopt;
155 }
156 
157 std::optional<SmallVector<int64_t>>
159  if (isa<xegpu::CreateNdDescOp, xegpu::UpdateNdOffsetOp, xegpu::CreateDescOp,
160  xegpu::UpdateOffsetOp>(op))
161  return getTileShape(op->getOpResult(0));
162  if (isa<xegpu::PrefetchNdOp, xegpu::LoadNdOp, xegpu::PrefetchOp,
163  xegpu::LoadGatherOp>(op))
164  return getTileShape(op->getOpOperand(0));
165  if (isa<xegpu::StoreNdOp, xegpu::StoreScatterOp>(op))
166  return getTileShape(op->getOpOperand(1));
167 
168  if (isa<xegpu::DpasOp>(op)) {
169  std::optional<SmallVector<int64_t>> aTile =
170  getTileShape(op->getOpOperand(0));
171  std::optional<SmallVector<int64_t>> bTile =
172  getTileShape(op->getOpOperand(1));
173 
174  if (!aTile || aTile->size() != 2 || !bTile || bTile->size() != 2)
175  return std::nullopt;
176 
177  // semantic check for A and B
178  if ((*aTile)[1] != (*bTile)[0])
179  return std::nullopt;
180 
181  // semantic check for C
182  if (op->getNumOperands() == 3) {
183  std::optional<SmallVector<int64_t>> cTile =
184  getTileShape(op->getOpOperand(2));
185  int64_t expectedCTile[2] = {(*aTile)[0], (*bTile)[1]};
186  if (!cTile || !llvm::equal(*cTile, expectedCTile))
187  return std::nullopt;
188  }
189 
190  return SmallVector<int64_t>({(*aTile)[0], (*aTile)[1], (*bTile)[1]});
191  }
192 
194  return getTileShape(op->getOpResult(0));
195 
196  if (isa<vector::MultiDimReductionOp>(op))
197  return getTileShape(op->getOpOperand(0));
198 
199  if (isa<vector::TransposeOp, vector::BroadcastOp>(op))
200  return getTileShape(op->getOpResult(0));
201 
202  return std::nullopt;
203 }
204 
205 bool XeGPUBlockingPass::needsUnroll(Operation *op) const {
206  // skip the op if any of its operands or results has workgroup level layouts
207  bool hasWgLayoutOperands =
208  llvm::any_of(op->getOpOperands(), [](OpOperand &opr) {
209  xegpu::DistributeLayoutAttr layout =
210  xegpu::getDistributeLayoutAttr(opr);
211  return layout && layout.isForWorkgroup();
212  });
213  bool hasWgLayoutResults =
214  llvm::any_of(op->getOpResults(), [](OpResult result) {
215  xegpu::DistributeLayoutAttr layout =
216  xegpu::getDistributeLayoutAttr(result);
217  return layout && layout.isForWorkgroup();
218  });
219  if (hasWgLayoutOperands || hasWgLayoutResults) {
220  LDBG() << "skip unrolling for op with workgroup level layout: " << *op;
221  return false;
222  }
223 
224  auto isUnrollable = [](Value value, ArrayRef<int64_t> tileShape) {
225  Type valTy = value.getType();
226  if (auto tdescTy = dyn_cast<xegpu::TensorDescType>(valTy)) {
227  xegpu::DistributeLayoutAttr layout = tdescTy.getLayoutAttr();
228  return layout && !layout.getInstDataAsInt().empty();
229  }
230  auto shapedType = dyn_cast<ShapedType>(valTy);
231  return shapedType && !llvm::equal(tileShape, shapedType.getShape());
232  };
233 
234  bool hasUnrollableOperands =
235  llvm::any_of(op->getOpOperands(), [&](OpOperand &opr) {
236  std::optional<SmallVector<int64_t>> tileShape = getTileShape(opr);
237  return tileShape.has_value() && isUnrollable(opr.get(), *tileShape);
238  });
239  bool hasUnrollableResults =
240  llvm::any_of(op->getOpResults(), [&](OpResult result) {
241  std::optional<SmallVector<int64_t>> tileShape = getTileShape(result);
242  return tileShape.has_value() && isUnrollable(result, *tileShape);
243  });
244  return hasUnrollableOperands || hasUnrollableResults;
245 }
246 
247 void XeGPUBlockingPass::runOnOperation() {
248  MLIRContext *ctx = &getContext();
249  Operation *op = getOperation();
250 
251  // Preserve the LayoutAttr for each operand to the owner's DictionaryAttr.
252  // This ensures that the LayoutAttr remains accessible even if the defining
253  // operation is replaced.
255  op, [](Value v) { return xegpu::getDistributeLayoutAttr(v); });
256 
257  auto getTileShapeAndCount = [](llvm::ArrayRef<int64_t> shape,
258  xegpu::LayoutAttr layout) {
259  int count = 1;
260  SmallVector<int64_t> tileShape(shape);
261  if (layout && layout.getInstData()) {
262  DenseI32ArrayAttr instData = layout.getInstData();
263  tileShape = llvm::to_vector_of<int64_t>(instData.asArrayRef());
264  count = computeProduct(shape) / computeProduct(tileShape);
265  }
266  return std::make_pair(tileShape, count);
267  };
268 
269  // Perform type conversion for SCF control folow ops
270  TypeConverter converter;
271  converter.addConversion([](Type type) -> Type { return type; });
272  converter.addConversion(
273  [&](RankedTensorType type,
274  SmallVectorImpl<Type> &result) -> std::optional<LogicalResult> {
275  Type elemTy = type.getElementType();
276  ArrayRef<int64_t> shape = type.getShape();
277 
278  auto layout =
279  llvm::dyn_cast_if_present<xegpu::LayoutAttr>(type.getEncoding());
280  if (layout && layout.isForWorkgroup())
281  return failure();
282 
283  int count;
284  SmallVector<int64_t> subShape;
285  std::tie(subShape, count) = getTileShapeAndCount(shape, layout);
286  auto newTy = VectorType::get(subShape, elemTy);
287  result.append(count, newTy);
288  return success();
289  });
290  converter.addConversion(
291  [&](xegpu::TensorDescType type,
292  SmallVectorImpl<Type> &result) -> std::optional<LogicalResult> {
293  Type elemTy = type.getElementType();
294  ArrayRef<int64_t> shape = type.getShape();
295 
296  xegpu::LayoutAttr layout = type.getLayoutAttr();
297  if (layout && layout.isForWorkgroup())
298  return failure();
299 
300  int count;
301  SmallVector<int64_t> subShape;
302  std::tie(subShape, count) = getTileShapeAndCount(shape, layout);
303 
304  if (layout)
305  layout = layout.dropInstData();
306 
307  auto newTy = xegpu::TensorDescType::get(
308  type.getContext(), subShape, elemTy, type.getEncoding(), layout);
309  result.append(count, newTy);
310  return success();
311  });
312 
314 
316  options.setFilterConstraint(
317  [&](Operation *op) -> LogicalResult { return success(needsUnroll(op)); });
318 
319  options.setNativeShapeFn([&](Operation *op) { return getTileShape(op); });
320 
321  options.setUnrolledTypesFn([&](ShapedType type, ArrayRef<int64_t> tileShape) {
322  Type elemTy = type.getElementType();
323  Type newTy;
324 
325  if (auto tdescTy = dyn_cast<xegpu::TensorDescType>(type)) {
326 
327  Attribute encoding = tdescTy.getEncoding();
328  // If the encoding is a ScatterTensorDescAttr, we need to
329  // potentially adjust the chunk size based on the inst_data.
330  if (tdescTy.isScattered()) {
331  int64_t chunkSize = tdescTy.getChunkSizeAsInt();
332 
333  if (chunkSize > 1) {
334  int64_t blockedChunkSize = chunkSize;
335  auto instData = tdescTy.getLayoutAttr().getInstData();
336  if (!instData.empty())
337  blockedChunkSize = instData.asArrayRef().back();
338 
339  // To create a new attribute with a different chunk_size:
340  auto newEncoding = xegpu::ScatterTensorDescAttr::get(
341  ctx, tdescTy.getMemorySpace(), blockedChunkSize);
342 
343  encoding = newEncoding;
344  }
345  }
346 
347  newTy =
348  xegpu::TensorDescType::get(ctx, tileShape, elemTy, encoding,
349  tdescTy.getLayoutAttr().dropInstData());
350  } else {
351  newTy = type.clone(tileShape, elemTy);
352  }
353 
354  std::optional<SmallVector<int64_t>> ratio =
355  computeShapeRatio(type.getShape(), tileShape);
356  assert(ratio && "The shape of the type must be a multiple of tileShape.");
357  return SmallVector<Type>(computeProduct(*ratio), newTy);
358  });
359 
361  patterns.add<ConvertLayoutOpPattern>(ctx);
362 
363  vector::UnrollVectorOptions vectorOptions;
364  vectorOptions.setNativeShapeFn(options.nativeShape);
365 
367  vector::populateVectorUnrollPatterns(patterns, vectorOptions);
368 
369  (void)applyPatternsGreedily(op, std::move(patterns));
370 
371  op->walk([](Operation *op) {
372  // Remove the layout attributes cached per operands.
373  for (OpOperand &opr : op->getOpOperands()) {
374  std::string name = xegpu::getLayoutName(opr);
375  if (op->hasAttrOfType<xegpu::LayoutAttr>(name))
376  op->removeAttr(name);
377  }
378 
379  // Update the layout attributes per result.
380  for (OpResult result : op->getOpResults()) {
381  std::string name = xegpu::getLayoutName(result);
382  if (auto layout = op->getAttrOfType<xegpu::LayoutAttr>(name)) {
383  op->removeAttr(name);
384  if (!isa<LoopLikeOpInterface>(op))
385  xegpu::setDistributeLayoutAttr(result, layout.dropInstData());
386  }
387  }
388 
389  // Resolve unrealized conversion cast ops emulating pack/unpack
390  if (auto castOp = dyn_cast<UnrealizedConversionCastOp>(op))
391  resolveUnrealizedConversionCastOp(castOp);
392  });
393 }
static MLIRContext * getContext(OpFoldResult val)
static std::array< int64_t, 2 > getTileShape(ArrayRef< int64_t > operandShape, Type elementType, int64_t lineSizeBits)
Returns the number of 8 x [128|256|512] bit tiles that compose the given operand shape.
Definition: MMAUtils.cpp:37
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
Definition: Attributes.h:25
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:63
This class helps build Operations.
Definition: Builders.h:207
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:519
This class represents an operand of an operation.
Definition: Value.h:257
This is a value defined by a result of an operation.
Definition: Value.h:447
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
OpResult getOpResult(unsigned idx)
Definition: Operation.h:421
AttrClass getAttrOfType(StringAttr name)
Definition: Operation.h:550
OpOperand & getOpOperand(unsigned idx)
Definition: Operation.h:388
bool hasAttrOfType(NameT &&name)
Definition: Operation.h:575
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Definition: Operation.h:797
unsigned getNumOperands()
Definition: Operation.h:346
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:383
result_range getOpResults()
Definition: Operation.h:420
Attribute removeAttr(StringAttr name)
Remove the attribute with the specified name if it exists.
Definition: Operation.h:600
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:404
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:783
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Definition: PatternMatch.h:716
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
Type conversion class.
void addConversion(FnT &&callback)
Register a conversion function.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
bool hasElementwiseMappableTraits(Operation *op)
Together, Elementwise, Scalarizable, Vectorizable, and Tensorizable provide an easy way for scalar op...
Definition: Operation.cpp:1397
Value createVectorWithShapeFromValues(OpBuilder &builder, Location loc, ValueRange values, ArrayRef< int64_t > shape)
Create a vector of shape from a set of values using vector.insert_stride_slice.
Definition: XeGPUUtils.cpp:242
void setDistributeLayoutAttr(const T &operandOrResult, const DistributeLayoutAttr layout)
Sets the DistributeLayoutAttr for a given OpOperand or OpResult by attaching it to the owner's dictio...
Definition: XeGPUUtils.cpp:164
void populateXeGPUUnrollPatterns(RewritePatternSet &patterns, const UnrollOptions &options)
Collect a set of patterns to unroll xegpu operations to a smaller shapes.
void setDistributeLayoutAttrs(Operation *op, function_ref< DistributeLayoutAttr(Value)> getLayoutImpl)
Set the DistributeLayoutAttr for each OpOperand and OpResult of the given operation.
Definition: XeGPUUtils.cpp:182
std::string getLayoutName(const OpOperand &operand)
Return the attribute name for the OpOperand to attach DistributeLayoutAttr.
Definition: XeGPUUtils.cpp:106
void doSCFStructuralTypeConversionWithTensorType(Operation *op, TypeConverter converter)
Do type conversion for SCF structural ops, e.g., scf.for using SCF structure type convertion patterns...
Definition: XeGPUUtils.cpp:267
DistributeLayoutAttr getDistributeLayoutAttr(const Value value)
Retrieves the DistributeLayoutAttr associated with a given Value.
Definition: XeGPUUtils.cpp:117
SmallVector< Value > extractVectorsWithShapeFromValue(OpBuilder &builder, Location loc, Value value, ArrayRef< int64_t > shape)
Extract a set of small vectors from a value with a given shape using vector.extract_stride_slice.
Definition: XeGPUUtils.cpp:222
Include the generated interface declarations.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition: Utils.cpp:304
LogicalResult applyPatternsGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
int64_t computeProduct(ArrayRef< int64_t > basis)
Self-explicit.
const FrozenRewritePatternSet & patterns
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:314
OpRewritePattern(MLIRContext *context, PatternBenefit benefit=1, ArrayRef< StringRef > generatedNames={})
Patterns must specify the root operation name they match against, and can also specify the benefit of...
Definition: PatternMatch.h:319
Options that control the vector unrolling.
UnrollVectorOptions & setNativeShapeFn(NativeShapeFnType fn)
Options to control the XeGPU unrolling.
Definition: Transforms.h:27