MLIR  22.0.0git
XeGPUBlocking.cpp
Go to the documentation of this file.
1 //===---- XeGPUBlocking.cpp ---- XeGPU Blocking Pass ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 
10 
17 #include "mlir/Pass/PassManager.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/Support/DebugLog.h"
22 
23 namespace mlir {
24 namespace xegpu {
25 #define GEN_PASS_DEF_XEGPUBLOCKING
26 #include "mlir/Dialect/XeGPU/Transforms/Passes.h.inc"
27 } // namespace xegpu
28 } // namespace mlir
29 
30 #define DEBUG_TYPE "xegpu-blocking"
31 
32 using namespace mlir;
33 
34 namespace {
35 
36 // reslove the unrealized conversion cast ops generated when doing SCF
37 // Structural Type Conversion. It will have two formats, N:1 vector
38 // cast and 1:N vector cast. vector::insert_strided_slice ops will be
39 // used for the first case, and vector::extract_strided_slice ops will be
40 // used for the second case.
41 static void
42 resolveUnrealizedConversionCastOp(UnrealizedConversionCastOp castOp) {
43  ValueRange inputs = castOp.getInputs();
44  ValueRange outputs = castOp.getOutputs();
45 
46  auto hasIdenticalVectorTypes = [](ValueRange values) {
47  auto types = values.getTypes();
48  return llvm::all_of(types, [&](Type type) {
49  return isa<VectorType>(type) && type == types.front();
50  });
51  };
52 
53  // We only interest in the case where all inputs and outputs have the
54  // identical VectorTypes
55  if (!hasIdenticalVectorTypes(inputs) || !hasIdenticalVectorTypes(outputs)) {
56  LDBG() << "skip unrealized conversion cast op not emulating pack/unpack.";
57  return;
58  }
59 
60  VectorType outputTy = dyn_cast<VectorType>(outputs[0].getType());
61  OpBuilder builder(castOp);
62  if (inputs.size() > 1 && outputs.size() == 1) {
63  // the castOp is emulating an unpack op
64  ArrayRef<int64_t> shape = outputTy.getShape();
66  builder, castOp.getLoc(), inputs, shape);
67  castOp->replaceAllUsesWith(ValueRange(result));
68  castOp->erase();
69  } else if (castOp.getNumResults() > 1 && castOp.getNumOperands() == 1) {
70  // the castOp is emulating a pack op
71  ArrayRef<int64_t> tileShape = outputTy.getShape();
73  builder, castOp.getLoc(), inputs[0], tileShape);
74  castOp->replaceAllUsesWith(results);
75  castOp->erase();
76  }
77 }
78 
79 // This pattern lowers ConvertLayoutOp by removing the inst_data field from the
80 // layout attributes. Since both producer and consumer operations handle data
81 // partitioning based on their own inst_data, while maintaining original input
82 // and output shape, ConvertLayoutOp does not need to manage inst_data.
83 struct ConvertLayoutOpPattern
84  : public OpRewritePattern<xegpu::ConvertLayoutOp> {
86  LogicalResult matchAndRewrite(xegpu::ConvertLayoutOp op,
87  PatternRewriter &rewriter) const override {
88  xegpu::DistributeLayoutAttr inputLayout = op.getInputLayoutAttr();
89  xegpu::DistributeLayoutAttr targetLayout = op.getTargetLayoutAttr();
90  if (inputLayout.getEffectiveInstDataAsInt().empty() ||
91  targetLayout.getEffectiveInstDataAsInt().empty())
92  return rewriter.notifyMatchFailure(op, "Not a target ConvertLayoutOp.");
93 
94  inputLayout = inputLayout.dropInstData();
95  targetLayout = targetLayout.dropInstData();
96  auto newOp = rewriter.createOrFold<xegpu::ConvertLayoutOp>(
97  op.getLoc(), op.getType(), op.getSource(), inputLayout, targetLayout);
98  rewriter.replaceOp(op, newOp);
99  return success();
100  }
101 };
102 
103 //===------------------------------------------------------------------------===//
104 // The XeGPUBlockingPass leverages the unroll patterns for XeGPU and Vector ops
105 // to partition operations that process large shapes into multiple operations on
106 // smaller shapes, as specified by the inst_data in the layout attribute. This
107 // enables each resulting operation to be efficiently mapped to a hardware
108 // instruction.
109 //===------------------------------------------------------------------------===//
110 
111 class XeGPUBlockingPass final
112  : public xegpu::impl::XeGPUBlockingBase<XeGPUBlockingPass> {
113 public:
114  void runOnOperation() override;
115 
116 private:
117  // Get the tile shape for a given OpOperand or OpResult by examining the
118  // corresponding layout attribute. If layout is not present or is not a
119  // subgroup level layout, it returns std::nullopt.
120  template <typename T,
121  typename = std::enable_if_t<std::is_same_v<T, OpOperand> ||
122  std::is_same_v<T, OpResult>>>
123  std::optional<SmallVector<int64_t>>
124  getTileShape(const T &operandOrResult) const;
125 
126  // Get the tile shape for a given operation.
127  std::optional<SmallVector<int64_t>> getTileShape(Operation *op) const;
128 
129  // Determine if the operation requires unrolling. Return false if all operands
130  // and results have tile shapes identical to their original types. Otherwise,
131  // return true.
132  bool needsUnroll(Operation *op) const;
133 };
134 } // namespace
135 
136 template <typename T, typename>
137 std::optional<SmallVector<int64_t>>
138 XeGPUBlockingPass::getTileShape(const T &operandOrResult) const {
139  Value value;
140  if constexpr (std::is_same_v<T, OpOperand>)
141  value = operandOrResult.get();
142  else
143  value = (Value)operandOrResult;
144 
145  xegpu::DistributeLayoutAttr layout =
146  xegpu::getDistributeLayoutAttr(operandOrResult);
147  if (layout && layout.isForSubgroup()) {
148  if (!layout.getEffectiveInstDataAsInt().empty())
149  return layout.getEffectiveInstDataAsInt();
150 
151  if (auto type = dyn_cast<ShapedType>(value.getType()))
152  return llvm::to_vector(type.getShape());
153  }
154  LDBG() << "failed to getTileShape for: " << value;
155  return std::nullopt;
156 }
157 
158 std::optional<SmallVector<int64_t>>
160  if (isa<xegpu::CreateNdDescOp, xegpu::UpdateNdOffsetOp, xegpu::CreateDescOp,
161  xegpu::UpdateOffsetOp, xegpu::LoadMatrixOp>(op))
162  return getTileShape(op->getOpResult(0));
163  if (isa<xegpu::PrefetchNdOp, xegpu::LoadNdOp, xegpu::PrefetchOp,
164  xegpu::LoadGatherOp, xegpu::StoreMatrixOp>(op))
165  return getTileShape(op->getOpOperand(0));
166  if (isa<xegpu::StoreNdOp, xegpu::StoreScatterOp>(op))
167  return getTileShape(op->getOpOperand(1));
168 
169  if (isa<xegpu::DpasOp>(op)) {
170  std::optional<SmallVector<int64_t>> aTile =
171  getTileShape(op->getOpOperand(0));
172  std::optional<SmallVector<int64_t>> bTile =
173  getTileShape(op->getOpOperand(1));
174 
175  if (!aTile || aTile->size() != 2 || !bTile || bTile->size() != 2)
176  return std::nullopt;
177 
178  // semantic check for A and B
179  if ((*aTile)[1] != (*bTile)[0])
180  return std::nullopt;
181 
182  // semantic check for C
183  if (op->getNumOperands() == 3) {
184  std::optional<SmallVector<int64_t>> cTile =
185  getTileShape(op->getOpOperand(2));
186  int64_t expectedCTile[2] = {(*aTile)[0], (*bTile)[1]};
187  if (!cTile || !llvm::equal(*cTile, expectedCTile))
188  return std::nullopt;
189  }
190 
191  return SmallVector<int64_t>({(*aTile)[0], (*aTile)[1], (*bTile)[1]});
192  }
193 
195  return getTileShape(op->getOpResult(0));
196 
197  if (isa<vector::MultiDimReductionOp>(op))
198  return getTileShape(op->getOpOperand(0));
199 
200  if (isa<vector::TransposeOp, vector::BroadcastOp>(op))
201  return getTileShape(op->getOpResult(0));
202 
203  return std::nullopt;
204 }
205 
206 bool XeGPUBlockingPass::needsUnroll(Operation *op) const {
207  // skip the op if any of its operands or results has workgroup level layouts
208  bool hasWgLayoutOperands =
209  llvm::any_of(op->getOpOperands(), [](OpOperand &opr) {
210  xegpu::DistributeLayoutAttr layout =
211  xegpu::getDistributeLayoutAttr(opr);
212  return layout && layout.isForWorkgroup();
213  });
214  bool hasWgLayoutResults =
215  llvm::any_of(op->getOpResults(), [](OpResult result) {
216  xegpu::DistributeLayoutAttr layout =
217  xegpu::getDistributeLayoutAttr(result);
218  return layout && layout.isForWorkgroup();
219  });
220  if (hasWgLayoutOperands || hasWgLayoutResults) {
221  LDBG() << "skip unrolling for op with workgroup level layout: " << *op;
222  return false;
223  }
224 
225  auto isUnrollable = [](Value value, ArrayRef<int64_t> tileShape) {
226  Type valTy = value.getType();
227  if (auto tdescTy = dyn_cast<xegpu::TensorDescType>(valTy)) {
228  xegpu::DistributeLayoutAttr layout = tdescTy.getLayoutAttr();
229  return layout && !layout.getEffectiveInstDataAsInt().empty();
230  }
231  auto shapedType = dyn_cast<ShapedType>(valTy);
232  return shapedType && !llvm::equal(tileShape, shapedType.getShape());
233  };
234 
235  bool hasUnrollableOperands =
236  llvm::any_of(op->getOpOperands(), [&](OpOperand &opr) {
237  std::optional<SmallVector<int64_t>> tileShape = getTileShape(opr);
238  return tileShape.has_value() && isUnrollable(opr.get(), *tileShape);
239  });
240  bool hasUnrollableResults =
241  llvm::any_of(op->getOpResults(), [&](OpResult result) {
242  std::optional<SmallVector<int64_t>> tileShape = getTileShape(result);
243  return tileShape.has_value() && isUnrollable(result, *tileShape);
244  });
245  return hasUnrollableOperands || hasUnrollableResults;
246 }
247 
248 void XeGPUBlockingPass::runOnOperation() {
249  MLIRContext *ctx = &getContext();
250  Operation *op = getOperation();
251 
252  // Preserve the LayoutAttr for each operand to the owner's DictionaryAttr.
253  // This ensures that the LayoutAttr remains accessible even if the defining
254  // operation is replaced.
256  op, [](Value v) { return xegpu::getDistributeLayoutAttr(v); });
257 
258  auto getTileShapeAndCount = [](llvm::ArrayRef<int64_t> shape,
259  xegpu::LayoutAttr layout) {
260  int count = 1;
261  SmallVector<int64_t> tileShape(shape);
262  if (layout && layout.getInstData()) {
263  DenseI32ArrayAttr instData = layout.getInstData();
264  tileShape = llvm::to_vector_of<int64_t>(instData.asArrayRef());
265  count = computeProduct(shape) / computeProduct(tileShape);
266  }
267  return std::make_pair(tileShape, count);
268  };
269 
270  // Perform type conversion for SCF control folow ops
271  TypeConverter converter;
272  converter.addConversion([](Type type) -> Type { return type; });
273  converter.addConversion(
274  [&](RankedTensorType type,
275  SmallVectorImpl<Type> &result) -> std::optional<LogicalResult> {
276  Type elemTy = type.getElementType();
277  ArrayRef<int64_t> shape = type.getShape();
278 
279  auto layout =
280  llvm::dyn_cast_if_present<xegpu::LayoutAttr>(type.getEncoding());
281  if (layout && layout.isForWorkgroup())
282  return failure();
283 
284  int count;
285  SmallVector<int64_t> subShape;
286  std::tie(subShape, count) = getTileShapeAndCount(shape, layout);
287  auto newTy = VectorType::get(subShape, elemTy);
288  result.append(count, newTy);
289  return success();
290  });
291  converter.addConversion(
292  [&](xegpu::TensorDescType type,
293  SmallVectorImpl<Type> &result) -> std::optional<LogicalResult> {
294  Type elemTy = type.getElementType();
295  ArrayRef<int64_t> shape = type.getShape();
296 
297  xegpu::LayoutAttr layout = type.getLayoutAttr();
298  if (layout && layout.isForWorkgroup())
299  return failure();
300 
301  int count;
302  SmallVector<int64_t> subShape;
303  std::tie(subShape, count) = getTileShapeAndCount(shape, layout);
304 
305  if (layout)
306  layout = layout.dropInstData();
307 
308  auto newTy = xegpu::TensorDescType::get(
309  type.getContext(), subShape, elemTy, type.getEncoding(), layout);
310  result.append(count, newTy);
311  return success();
312  });
313 
315 
317  options.setFilterConstraint(
318  [&](Operation *op) -> LogicalResult { return success(needsUnroll(op)); });
319 
320  options.setNativeShapeFn([&](Operation *op) { return getTileShape(op); });
321 
322  options.setUnrolledTypesFn([&](ShapedType type, ArrayRef<int64_t> tileShape) {
323  Type elemTy = type.getElementType();
324  Type newTy;
325 
326  if (auto tdescTy = dyn_cast<xegpu::TensorDescType>(type)) {
327 
328  Attribute encoding = tdescTy.getEncoding();
329  // If the encoding is a ScatterTensorDescAttr, we need to
330  // potentially adjust the chunk size based on the inst_data.
331  if (tdescTy.isScattered()) {
332  int64_t chunkSize = tdescTy.getChunkSizeAsInt();
333 
334  if (chunkSize > 1) {
335  int64_t blockedChunkSize = chunkSize;
336  auto instData = tdescTy.getLayoutAttr().getInstData();
337  if (!instData.empty())
338  blockedChunkSize = instData.asArrayRef().back();
339 
340  // To create a new attribute with a different chunk_size:
341  auto newEncoding = xegpu::ScatterTensorDescAttr::get(
342  ctx, tdescTy.getMemorySpace(), blockedChunkSize);
343 
344  encoding = newEncoding;
345  }
346  }
347 
348  newTy =
349  xegpu::TensorDescType::get(ctx, tileShape, elemTy, encoding,
350  tdescTy.getLayoutAttr().dropInstData());
351  } else {
352  newTy = type.clone(tileShape, elemTy);
353  }
354 
355  std::optional<SmallVector<int64_t>> ratio =
356  computeShapeRatio(type.getShape(), tileShape);
357  assert(ratio && "The shape of the type must be a multiple of tileShape.");
358  return SmallVector<Type>(computeProduct(*ratio), newTy);
359  });
360 
362  patterns.add<ConvertLayoutOpPattern>(ctx);
363 
364  vector::UnrollVectorOptions vectorOptions;
365  vectorOptions.setNativeShapeFn(options.nativeShape);
366 
368  vector::populateVectorUnrollPatterns(patterns, vectorOptions);
369 
370  (void)applyPatternsGreedily(op, std::move(patterns));
371 
372  op->walk([](Operation *op) {
373  // Remove the layout attributes cached per operands.
374  for (OpOperand &opr : op->getOpOperands()) {
375  std::string name = xegpu::getLayoutName(opr);
376  if (op->hasAttrOfType<xegpu::LayoutAttr>(name))
377  op->removeAttr(name);
378  }
379 
380  // Update the layout attributes per result.
381  for (OpResult result : op->getOpResults()) {
382  std::string name = xegpu::getLayoutName(result);
383  if (auto layout = op->getAttrOfType<xegpu::LayoutAttr>(name)) {
384  op->removeAttr(name);
385  if (!isa<LoopLikeOpInterface>(op))
386  xegpu::setDistributeLayoutAttr(result, layout.dropInstData());
387  }
388  }
389 
390  // Resolve unrealized conversion cast ops emulating pack/unpack
391  if (auto castOp = dyn_cast<UnrealizedConversionCastOp>(op))
392  resolveUnrealizedConversionCastOp(castOp);
393  });
394 }
static MLIRContext * getContext(OpFoldResult val)
static std::array< int64_t, 2 > getTileShape(ArrayRef< int64_t > operandShape, Type elementType, int64_t lineSizeBits)
Returns the number of 8 x [128|256|512] bit tiles that compose the given operand shape.
Definition: MMAUtils.cpp:37
static llvm::ManagedStatic< PassManagerOptions > options
Attributes are known-constant values of operations.
Definition: Attributes.h:25
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:63
This class helps build Operations.
Definition: Builders.h:207
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:519
This class represents an operand of an operation.
Definition: Value.h:257
This is a value defined by a result of an operation.
Definition: Value.h:447
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
OpResult getOpResult(unsigned idx)
Definition: Operation.h:421
AttrClass getAttrOfType(StringAttr name)
Definition: Operation.h:550
OpOperand & getOpOperand(unsigned idx)
Definition: Operation.h:388
bool hasAttrOfType(NameT &&name)
Definition: Operation.h:575
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Definition: Operation.h:797
unsigned getNumOperands()
Definition: Operation.h:346
MutableArrayRef< OpOperand > getOpOperands()
Definition: Operation.h:383
result_range getOpResults()
Definition: Operation.h:420
Attribute removeAttr(StringAttr name)
Remove the attribute with the specified name if it exists.
Definition: Operation.h:600
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:404
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
Definition: PatternMatch.h:793
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Definition: PatternMatch.h:726
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
Type conversion class.
void addConversion(FnT &&callback)
Register a conversion function.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition: ValueRange.h:387
type_range getTypes() const
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
Type getType() const
Return the type of this value.
Definition: Value.h:105
bool hasElementwiseMappableTraits(Operation *op)
Together, Elementwise, Scalarizable, Vectorizable, and Tensorizable provide an easy way for scalar op...
Definition: Operation.cpp:1397
Value createVectorWithShapeFromValues(OpBuilder &builder, Location loc, ValueRange values, ArrayRef< int64_t > shape)
Create a vector of shape from a set of values using vector.insert_stride_slice.
Definition: XeGPUUtils.cpp:260
void setDistributeLayoutAttr(const T &operandOrResult, const DistributeLayoutAttr layout)
Sets the DistributeLayoutAttr for a given OpOperand or OpResult by attaching it to the owner's dictio...
Definition: XeGPUUtils.cpp:179
void populateXeGPUUnrollPatterns(RewritePatternSet &patterns, const UnrollOptions &options)
Collect a set of patterns to unroll xegpu operations to a smaller shapes.
void setDistributeLayoutAttrs(Operation *op, function_ref< DistributeLayoutAttr(Value)> getLayoutImpl)
Set the DistributeLayoutAttr for each OpOperand and OpResult of the given operation.
Definition: XeGPUUtils.cpp:197
std::string getLayoutName(const OpOperand &operand)
Return the attribute name for the OpOperand to attach DistributeLayoutAttr.
Definition: XeGPUUtils.cpp:106
void doSCFStructuralTypeConversionWithTensorType(Operation *op, TypeConverter converter)
Do type conversion for SCF structural ops, e.g., scf.for using SCF structure type convertion patterns...
Definition: XeGPUUtils.cpp:285
DistributeLayoutAttr getDistributeLayoutAttr(const Value value)
Retrieves the DistributeLayoutAttr associated with a given Value.
Definition: XeGPUUtils.cpp:117
SmallVector< Value > extractVectorsWithShapeFromValue(OpBuilder &builder, Location loc, Value value, ArrayRef< int64_t > shape)
Extract a set of small vectors from a value with a given shape using vector.extract_stride_slice.
Definition: XeGPUUtils.cpp:240
Include the generated interface declarations.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition: Utils.cpp:304
LogicalResult applyPatternsGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
int64_t computeProduct(ArrayRef< int64_t > basis)
Self-explicit.
const FrozenRewritePatternSet & patterns
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
Definition: PatternMatch.h:314
OpRewritePattern(MLIRContext *context, PatternBenefit benefit=1, ArrayRef< StringRef > generatedNames={})
Patterns must specify the root operation name they match against, and can also specify the benefit of...
Definition: PatternMatch.h:322
Options that control the vector unrolling.
UnrollVectorOptions & setNativeShapeFn(NativeShapeFnType fn)
Options to control the XeGPU unrolling.
Definition: Transforms.h:27