MLIR 23.0.0git
XeGPUBlocking.cpp
Go to the documentation of this file.
1//===---- XeGPUBlocking.cpp ---- XeGPU Blocking Pass ----------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
21#include "llvm/ADT/STLExtras.h"
22#include "llvm/Support/DebugLog.h"
23
24namespace mlir {
25namespace xegpu {
26#define GEN_PASS_DEF_XEGPUBLOCKING
27#include "mlir/Dialect/XeGPU/Transforms/Passes.h.inc"
28} // namespace xegpu
29} // namespace mlir
30
31#define DEBUG_TYPE "xegpu-blocking"
33using namespace mlir;
35namespace {
37// reslove the unrealized conversion cast ops generated when doing SCF
38// Structural Type Conversion. It will have two formats, N:1 vector
39// cast and 1:N vector cast. vector::insert_strided_slice ops will be
40// used for the first case, and vector::extract_strided_slice ops will be
41// used for the second case.
42static void
43resolveUnrealizedConversionCastOp(UnrealizedConversionCastOp castOp) {
44 ValueRange inputs = castOp.getInputs();
45 ValueRange outputs = castOp.getOutputs();
46
47 auto hasIdenticalVectorTypes = [](ValueRange values) {
48 auto types = values.getTypes();
49 return llvm::all_of(types, [&](Type type) {
50 return isa<VectorType>(type) && type == types.front();
51 });
52 };
53
54 // We only interest in the case where all inputs and outputs have the
55 // identical VectorTypes
56 if (!hasIdenticalVectorTypes(inputs) || !hasIdenticalVectorTypes(outputs)) {
57 LDBG() << "skip unrealized conversion cast op not emulating pack/unpack.";
58 return;
59 }
60
61 VectorType outputTy = dyn_cast<VectorType>(outputs[0].getType());
62 OpBuilder builder(castOp);
63 if (inputs.size() > 1 && outputs.size() == 1) {
64 // the castOp is emulating an unpack op
65 ArrayRef<int64_t> shape = outputTy.getShape();
67 builder, castOp.getLoc(), inputs, shape);
68 castOp->replaceAllUsesWith(ValueRange(result));
69 castOp->erase();
70 } else if (castOp.getNumResults() > 1 && castOp.getNumOperands() == 1) {
71 // the castOp is emulating a pack op
72 ArrayRef<int64_t> tileShape = outputTy.getShape();
74 builder, castOp.getLoc(), inputs[0], tileShape);
75 castOp->replaceAllUsesWith(results);
76 castOp->erase();
77 }
78}
79
80// This pattern lowers ConvertLayoutOp by removing the inst_data field from the
81// layout attributes. Since both producer and consumer operations handle data
82// partitioning based on their own inst_data, while maintaining original input
83// and output shape, ConvertLayoutOp does not need to manage inst_data.
84struct ConvertLayoutOpPattern
85 : public OpRewritePattern<xegpu::ConvertLayoutOp> {
87 LogicalResult matchAndRewrite(xegpu::ConvertLayoutOp op,
88 PatternRewriter &rewriter) const override {
89 xegpu::DistributeLayoutAttr inputLayout = op.getInputLayoutAttr();
90 xegpu::DistributeLayoutAttr targetLayout = op.getTargetLayoutAttr();
91 if (inputLayout.getEffectiveInstDataAsInt().empty() ||
92 targetLayout.getEffectiveInstDataAsInt().empty())
93 return rewriter.notifyMatchFailure(op, "Not a target ConvertLayoutOp.");
94
95 inputLayout = inputLayout.dropInstData();
96 targetLayout = targetLayout.dropInstData();
97 auto newOp = rewriter.createOrFold<xegpu::ConvertLayoutOp>(
98 op.getLoc(), op.getType(), op.getSource(), inputLayout, targetLayout);
99 rewriter.replaceOp(op, newOp);
100 return success();
101 }
102};
103
104//===------------------------------------------------------------------------===//
105// The XeGPUBlockingPass leverages the unroll patterns for XeGPU and Vector ops
106// to partition operations that process large shapes into multiple operations on
107// smaller shapes, as specified by the inst_data in the layout attribute. This
108// enables each resulting operation to be efficiently mapped to a hardware
109// instruction.
110//===------------------------------------------------------------------------===//
111
112class XeGPUBlockingPass final
113 : public xegpu::impl::XeGPUBlockingBase<XeGPUBlockingPass> {
114public:
115 void runOnOperation() override;
116
117private:
118 // Get the tile shape for a given OpOperand or OpResult by examining the
119 // corresponding layout attribute. If layout is not present or is not a
120 // subgroup level layout, it returns std::nullopt.
121 template <typename T,
122 typename = std::enable_if_t<std::is_same_v<T, OpOperand> ||
123 std::is_same_v<T, OpResult>>>
124 std::optional<SmallVector<int64_t>>
125 getTileShape(const T &operandOrResult) const;
126
127 // Get the tile shape for a given operation.
128 std::optional<SmallVector<int64_t>> getTileShape(Operation *op) const;
129
130 // Determine if the operation requires unrolling. Return false if all operands
131 // and results have tile shapes identical to their original types. Otherwise,
132 // return true.
133 bool needsUnroll(Operation *op) const;
134};
135} // namespace
136
137template <typename T, typename>
138std::optional<SmallVector<int64_t>>
139XeGPUBlockingPass::getTileShape(const T &operandOrResult) const {
140 Value value;
141 if constexpr (std::is_same_v<T, OpOperand>) {
142 value = operandOrResult.get();
143 } else {
144 value = (Value)operandOrResult;
145 }
146
147 xegpu::DistributeLayoutAttr layout =
148 xegpu::getDistributeLayoutAttr(operandOrResult);
149 if (layout && layout.isForSubgroup()) {
150 if (!layout.getEffectiveInstDataAsInt().empty()) {
151 SmallVector<int64_t> instData = layout.getEffectiveInstDataAsInt();
152 return instData;
153 }
154 if (auto type = dyn_cast<ShapedType>(value.getType()))
155 return llvm::to_vector(type.getShape());
156 }
157 LDBG() << "failed to getTileShape for: " << value;
158 return std::nullopt;
159}
160
161std::optional<SmallVector<int64_t>>
162XeGPUBlockingPass::getTileShape(Operation *op) const {
163 if (isa<xegpu::CreateNdDescOp, xegpu::UpdateNdOffsetOp, xegpu::CreateDescOp,
164 xegpu::UpdateOffsetOp, xegpu::LoadMatrixOp>(op))
165 return getTileShape(op->getOpResult(0));
166 if (isa<xegpu::PrefetchNdOp, xegpu::LoadNdOp, xegpu::PrefetchOp,
167 xegpu::StoreMatrixOp>(op))
168 return getTileShape(op->getOpOperand(0));
169 if (isa<xegpu::StoreNdOp>(op))
170 return getTileShape(op->getOpOperand(1));
171
172 // Handle LoadGatherOp and StoreScatterOp (with and without offset)
173 if (auto loadGatherOp = dyn_cast<xegpu::LoadGatherOp>(op)) {
174 if (loadGatherOp.getOffsets())
175 return getTileShape(loadGatherOp->getOpResult(0));
176 else
177 return getTileShape(loadGatherOp->getOpOperand(0));
178 }
179
180 if (auto storeScatterOp = dyn_cast<xegpu::StoreScatterOp>(op))
181 return getTileShape(storeScatterOp.getOffsets()
182 ? storeScatterOp->getOpOperand(0)
183 : storeScatterOp->getOpOperand(1));
184
185 if (isa<xegpu::DpasOp>(op)) {
186 std::optional<SmallVector<int64_t>> aTile =
188 std::optional<SmallVector<int64_t>> bTile =
190
191 if (!aTile || aTile->size() != 2 || !bTile || bTile->size() != 2)
192 return std::nullopt;
193
194 // semantic check for A and B
195 if ((*aTile)[1] != (*bTile)[0])
196 return std::nullopt;
197
198 // semantic check for C
199 if (op->getNumOperands() == 3) {
200 std::optional<SmallVector<int64_t>> cTile =
202 int64_t expectedCTile[2] = {(*aTile)[0], (*bTile)[1]};
203 if (!cTile || !llvm::equal(*cTile, expectedCTile))
204 return std::nullopt;
205 }
206
207 return SmallVector<int64_t>({(*aTile)[0], (*aTile)[1], (*bTile)[1]});
208 }
209
211 return getTileShape(op->getOpResult(0));
212
213 if (isa<vector::MultiDimReductionOp>(op))
214 return getTileShape(op->getOpOperand(0));
215
216 if (isa<vector::TransposeOp, vector::BroadcastOp, vector::StepOp,
217 vector::ConstantMaskOp, vector::CreateMaskOp>(op))
218 return getTileShape(op->getOpResult(0));
219
220 return std::nullopt;
221}
222
223bool XeGPUBlockingPass::needsUnroll(Operation *op) const {
224 // skip the op if any of its operands or results has workgroup level layouts
225 bool hasWgLayoutOperands =
226 llvm::any_of(op->getOpOperands(), [](OpOperand &opr) {
227 xegpu::DistributeLayoutAttr layout =
228 xegpu::getDistributeLayoutAttr(opr);
229 return layout && layout.isForWorkgroup();
230 });
231 bool hasWgLayoutResults =
232 llvm::any_of(op->getOpResults(), [](OpResult result) {
233 xegpu::DistributeLayoutAttr layout =
234 xegpu::getDistributeLayoutAttr(result);
235 return layout && layout.isForWorkgroup();
236 });
237 if (hasWgLayoutOperands || hasWgLayoutResults) {
238 LDBG() << "skip unrolling for op with workgroup level layout: " << *op;
239 return false;
240 }
241
242 auto isUnrollable = [](Value value, ArrayRef<int64_t> tileShape) {
243 Type valTy = value.getType();
244 if (auto tdescTy = dyn_cast<xegpu::TensorDescType>(valTy)) {
245 xegpu::DistributeLayoutAttr layout = tdescTy.getLayoutAttr();
246 return layout && !layout.getEffectiveInstDataAsInt().empty();
247 }
248 auto shapedType = dyn_cast<ShapedType>(valTy);
249 return shapedType && !llvm::equal(tileShape, shapedType.getShape());
250 };
251
252 bool hasUnrollableOperands =
253 llvm::any_of(op->getOpOperands(), [&](OpOperand &opr) {
254 std::optional<SmallVector<int64_t>> tileShape = getTileShape(opr);
255 return tileShape.has_value() && isUnrollable(opr.get(), *tileShape);
256 });
257 bool hasUnrollableResults =
258 llvm::any_of(op->getOpResults(), [&](OpResult result) {
259 std::optional<SmallVector<int64_t>> tileShape = getTileShape(result);
260 return tileShape.has_value() && isUnrollable(result, *tileShape);
261 });
262 return hasUnrollableOperands || hasUnrollableResults;
263}
264
265void XeGPUBlockingPass::runOnOperation() {
266 MLIRContext *ctx = &getContext();
267 Operation *op = getOperation();
268
270 signalPassFailure();
271 return;
272 }
273
274 auto getTileShapeAndCount = [](llvm::ArrayRef<int64_t> shape,
275 xegpu::LayoutAttr layout) {
276 int count = 1;
277 SmallVector<int64_t> tileShape(shape);
278 if (layout && layout.getInstData()) {
279 DenseI32ArrayAttr instData = layout.getInstData();
280 tileShape = llvm::to_vector_of<int64_t>(instData.asArrayRef());
281 count = computeProduct(shape) / computeProduct(tileShape);
282 }
283 return std::make_pair(tileShape, count);
284 };
285
286 // Perform type conversion for SCF control folow ops
287 TypeConverter converter;
288 converter.addConversion([](Type type) -> Type { return type; });
289 converter.addConversion(
290 [&](RankedTensorType type,
291 SmallVectorImpl<Type> &result) -> std::optional<LogicalResult> {
292 Type elemTy = type.getElementType();
293 ArrayRef<int64_t> shape = type.getShape();
294
295 auto layout =
296 llvm::dyn_cast_if_present<xegpu::LayoutAttr>(type.getEncoding());
297 if (layout && layout.isForWorkgroup())
298 return failure();
299
300 int count;
301 SmallVector<int64_t> subShape;
302 std::tie(subShape, count) = getTileShapeAndCount(shape, layout);
303 auto newTy = VectorType::get(subShape, elemTy);
304 result.append(count, newTy);
305 return success();
306 });
307 converter.addConversion(
308 [&](xegpu::TensorDescType type,
309 SmallVectorImpl<Type> &result) -> std::optional<LogicalResult> {
310 Type elemTy = type.getElementType();
311 ArrayRef<int64_t> shape = type.getShape();
312
313 xegpu::LayoutAttr layout = type.getLayoutAttr();
314 if (layout && layout.isForWorkgroup())
315 return failure();
316
317 int count;
318 SmallVector<int64_t> subShape;
319 std::tie(subShape, count) = getTileShapeAndCount(shape, layout);
320
321 if (layout)
322 layout = layout.dropInstData();
323
324 auto newTy = xegpu::TensorDescType::get(
325 type.getContext(), subShape, elemTy, type.getEncoding(), layout);
326 result.append(count, newTy);
327 return success();
328 });
329
331
332 xegpu::UnrollOptions options;
333 options.setFilterConstraint(
334 [&](Operation *op) -> LogicalResult { return success(needsUnroll(op)); });
335
336 options.setNativeShapeFn([&](Operation *op) { return getTileShape(op); });
337
338 options.setUnrolledTypesFn([&](ShapedType type, ArrayRef<int64_t> tileShape,
339 bool returnSingleType = false) {
340 Type elemTy = type.getElementType();
341 Type newTy;
342
343 if (auto tdescTy = dyn_cast<xegpu::TensorDescType>(type)) {
344
345 Attribute encoding = tdescTy.getEncoding();
346 // If the encoding is a ScatterTensorDescAttr, we need to
347 // potentially adjust the chunk size based on the inst_data.
348 if (tdescTy.isScattered()) {
349 int64_t chunkSize = tdescTy.getChunkSizeAsInt();
350
351 if (chunkSize > 1) {
352 int64_t blockedChunkSize = chunkSize;
353 auto instData = tdescTy.getLayoutAttr().getInstData();
354 if (!instData.empty())
355 blockedChunkSize = instData.asArrayRef().back();
356
357 // To create a new attribute with a different chunk_size:
358 auto newEncoding = xegpu::ScatterTensorDescAttr::get(
359 ctx, tdescTy.getMemorySpace(), blockedChunkSize);
360 encoding = newEncoding;
361 }
362 }
363
364 newTy =
365 xegpu::TensorDescType::get(ctx, tileShape, elemTy, encoding,
366 tdescTy.getLayoutAttr().dropInstData());
367 } else {
368 newTy = VectorType::get(tileShape, elemTy);
369 }
370
371 if (returnSingleType)
372 return SmallVector<Type>{newTy};
373 std::optional<SmallVector<int64_t>> ratio =
374 computeShapeRatio(type.getShape(), tileShape);
375 assert(ratio && "The shape of the type must be a multiple of tileShape.");
376 return SmallVector<Type>(computeProduct(*ratio), newTy);
377 });
378
379 RewritePatternSet patterns(ctx);
380 patterns.add<ConvertLayoutOpPattern>(ctx);
381
382 vector::UnrollVectorOptions vectorOptions;
383 vectorOptions.setNativeShapeFn(options.nativeShape);
384
386 vector::populateVectorUnrollPatterns(patterns, vectorOptions);
387
388 (void)applyPatternsGreedily(op, std::move(patterns));
389
390 op->walk([](Operation *op) {
391 // Remove the layout attributes cached per operands.
392 for (OpOperand &opr : op->getOpOperands()) {
393 std::string name = xegpu::getTemporaryLayoutName(opr);
394 if (op->hasAttrOfType<xegpu::DistributeLayoutAttr>(name))
395 op->removeAttr(name);
396 }
397
398 // Update the layout attributes per result.
399 for (OpResult result : op->getOpResults()) {
400 std::string name = xegpu::getTemporaryLayoutName(result);
401 if (auto layout = op->getAttrOfType<xegpu::DistributeLayoutAttr>(name)) {
402 op->removeAttr(name);
403 if (!isa<LoopLikeOpInterface>(op))
404 xegpu::setDistributeLayoutAttr(result, layout.dropInstData());
405 }
406 }
407
408 // Resolve unrealized conversion cast ops emulating pack/unpack
409 if (auto castOp = dyn_cast<UnrealizedConversionCastOp>(op))
410 resolveUnrealizedConversionCastOp(castOp);
411 });
412}
return success()
b getContext())
static std::array< int64_t, 2 > getTileShape(ArrayRef< int64_t > operandShape, Type elementType, int64_t lineSizeBits)
Returns the number of 8 x [128|256|512] bit tiles that compose the given operand shape.
Definition MMAUtils.cpp:37
static llvm::ManagedStatic< PassManagerOptions > options
This class helps build Operations.
Definition Builders.h:209
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition Builders.h:528
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
OpResult getOpResult(unsigned idx)
Definition Operation.h:421
AttrClass getAttrOfType(StringAttr name)
Definition Operation.h:550
bool hasAttrOfType(NameT &&name)
Definition Operation.h:575
MutableArrayRef< OpOperand > getOpOperands()
Definition Operation.h:383
unsigned getNumOperands()
Definition Operation.h:346
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Definition Operation.h:797
result_range getOpResults()
Definition Operation.h:420
Attribute removeAttr(StringAttr name)
Remove the attribute with the specified name if it exists.
Definition Operation.h:600
OpOperand & getOpOperand(unsigned idx)
Definition Operation.h:388
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:404
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
std::enable_if_t<!std::is_convertible< CallbackT, Twine >::value, LogicalResult > notifyMatchFailure(Location loc, CallbackT &&reasonCallback)
Used to notify the listener that the IR failed to be rewritten because of a match failure,...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:387
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
bool hasElementwiseMappableTraits(Operation *op)
Together, Elementwise, Scalarizable, Vectorizable, and Tensorizable provide an easy way for scalar op...
Value createVectorWithShapeFromValues(OpBuilder &builder, Location loc, ValueRange values, ArrayRef< int64_t > shape)
Create a vector of shape from a set of values using vector.insert_stride_slice.
void populateXeGPUUnrollPatterns(RewritePatternSet &patterns, const UnrollOptions &options)
Collect a set of patterns to unroll xegpu operations to a smaller shapes.
void setDistributeLayoutAttr(const OpResult &Result, const DistributeLayoutAttr layout)
[to-be-deprecated] Sets the DistributeLayoutAttr for a given OpResult user should use setAnchorLayout...
bool recoverTemporaryLayouts(Operation *rootOp)
Attach layout attributes to all vector-type operands of operations within the given operation's neste...
void doSCFStructuralTypeConversionWithTensorType(Operation *op, TypeConverter converter)
Do type conversion for SCF structural ops, e.g., scf.for using SCF structure type convertion patterns...
DistributeLayoutAttr getDistributeLayoutAttr(const Value value)
Retrieves the DistributeLayoutAttr associated with a given Value.
std::string getTemporaryLayoutName(const OpOperand &operand)
Return the attribute name for the OpOperand to attach DistributeLayoutAttr.
SmallVector< Value > extractVectorsWithShapeFromValue(OpBuilder &builder, Location loc, Value value, ArrayRef< int64_t > shape)
Extract a set of small vectors from a value with a given shape using vector.extract_stride_slice.
Include the generated interface declarations.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition Utils.cpp:305
LogicalResult applyPatternsGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
int64_t computeProduct(ArrayRef< int64_t > basis)
Self-explicit.
const FrozenRewritePatternSet & patterns
detail::DenseArrayAttrImpl< int32_t > DenseI32ArrayAttr
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...
OpRewritePattern(MLIRContext *context, PatternBenefit benefit=1, ArrayRef< StringRef > generatedNames={})
Patterns must specify the root operation name they match against, and can also specify the benefit of...