MLIR 23.0.0git
XeGPULayoutImpl.cpp
Go to the documentation of this file.
1//===---- XeGPULayoutImpl.cpp - MLIR Utilities for XeGPUOps
2//------------------===//
3//
4// Part of the MLIR Project, under the Apache License v2.0 with LLVM Exceptions.
5// See https://llvm.org/LICENSE.txt for license information.
6// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7//
8//===----------------------------------------------------------------------===//
9//
10// This file implements layout utility functions for XeGPU dialect
11// transformation.
12//
13//===----------------------------------------------------------------------===//
14
22#include "mlir/IR/Builders.h"
23#include "mlir/IR/Operation.h"
24#include "mlir/IR/ValueRange.h"
27#include "llvm/Support/FormatVariadic.h"
28#include <cstdint>
29#include <numeric>
30
31using namespace mlir;
32
34 op->walk([&](Operation *nestOp) {
35 for (OpOperand &opr : nestOp->getOpOperands()) {
36 auto layout = getDistributeLayoutAttr(opr.get());
37 setDistributeLayoutAttr(opr, layout);
38 }
39
40 for (OpResult result : nestOp->getOpResults()) {
41 auto layout = getDistributeLayoutAttr(result);
43 }
44 });
45}
46
50 out.reserve(attrs.size());
51
52 for (auto attr : attrs) {
53 if (auto dist = dyn_cast<xegpu::DistributeLayoutAttr>(attr.getValue())) {
54 auto newLayout = dist.dropSgLayoutAndData();
55 if (newLayout)
56 out.emplace_back(attr.getName(), newLayout);
57 } else {
58 out.push_back(attr);
59 }
60 }
61
62 return out;
63}
64
68 out.reserve(attrs.size());
69
70 for (auto attr : attrs) {
71 if (auto dist = dyn_cast<xegpu::DistributeLayoutAttr>(attr.getValue())) {
72 auto newLayout = dist.dropInstData();
73 if (newLayout)
74 out.emplace_back(attr.getName(), newLayout);
75 } else {
76 out.push_back(attr);
77 }
78 }
79
80 return out;
81}
82
83// Attach layout attributes to all vector-type operands of operations within
84// the given operation's region. Reports an error if any vector operand lacks
85// a layout attribute.
87 auto result = rootOp->walk([&](Operation *op) {
88 for (OpOperand &operand : op->getOpOperands()) {
89 // Layouts are needed for vector type only.
90 if (!isa<VectorType>(operand.get().getType()))
91 continue;
92 // Skip block arguments since they don't have defining ops to attach
93 // layout attributes to.
94 if (isa<BlockArgument>(operand.get()))
95 continue;
96 auto layout = xegpu::getDistributeLayoutAttr(operand.get());
97 if (!layout) {
98 op->emitWarning("Could not find layout attribute for operand ")
99 << operand.getOperandNumber() << " of operation " << op->getName();
100 continue;
101 }
102 xegpu::setTemporaryLayout(operand, layout);
103 }
104 return WalkResult::advance();
105 });
106 return !result.wasInterrupted();
107}
108
109template <typename T, typename>
110void xegpu::removeLayoutAttr(const T &operandOrResult) {
111 Operation *owner = operandOrResult.getOwner();
112 std::string name = xegpu::getTemporaryLayoutName(operandOrResult);
113 if (owner->hasAttrOfType<DistributeLayoutAttr>(name))
114 owner->removeAttr(name);
115}
116
117// Explicit instantiation for OpResult
118template void
120
121// Explicit instantiation for OpOperand
122template void
124
126 op->walk([&](Operation *nestOp) {
127 // Remove all attributes of DistributeLayoutAttr type
128 SmallVector<StringAttr> attrsToRemove;
129 for (auto namedAttr : nestOp->getAttrs()) {
130 if (isa<DistributeLayoutAttr>(namedAttr.getValue()))
131 attrsToRemove.push_back(namedAttr.getName());
132 }
133 for (auto attrName : attrsToRemove)
134 nestOp->removeAttr(attrName);
135 });
136}
137
138/// Infers the source layout attribute for a broadcast operation given the
139/// result layout attribute, result shape, source shape.
140xegpu::DistributeLayoutAttr
141xegpu::inferBroadcastSourceLayout(xegpu::DistributeLayoutAttr resLayout,
142 ArrayRef<int64_t> resShape,
143 ArrayRef<int64_t> srcShape) {
144
145 SmallVector<int64_t> bcastDims;
146 size_t dimDiff = resShape.size() - srcShape.size();
147 auto bcastSourceLayout = resLayout;
148 for (size_t i = dimDiff; i < resShape.size(); i++) {
149 if ((srcShape[i - dimDiff] == 1) && (resShape[i] != 1))
150 bcastDims.push_back(i);
151 }
152
153 // the sg_layout and lane_layout for unit dimensions are preserved so it can
154 // be propagate to producer op so potentially used by the multi-reduction op.
155 if (!bcastDims.empty())
156 bcastSourceLayout = bcastSourceLayout.setUnitDimData(bcastDims);
157
158 if (dimDiff > 0) {
159 SmallVector<int64_t> sliceDims;
160 for (size_t i = 0; i < dimDiff; i++)
161 sliceDims.push_back(i);
162 bcastSourceLayout = xegpu::SliceAttr::get(
163 resLayout.getContext(), bcastSourceLayout,
164 DenseI64ArrayAttr::get(resLayout.getContext(), sliceDims));
165 }
166 return bcastSourceLayout;
167}
168
169/// Infers the source layout attribute for a reduction operation given the
170/// result layout attribute and reduced dims.
171xegpu::DistributeLayoutAttr
172xegpu::inferMultiReductionSourceLayout(xegpu::DistributeLayoutAttr resLayout,
173 SmallVector<int64_t> reduceDims) {
174
175 assert(isa<xegpu::SliceAttr>(resLayout) &&
176 "reduction result layout must be slice layout");
177
178 xegpu::SliceAttr sliceLayout = dyn_cast<xegpu::SliceAttr>(resLayout);
179
180 assert((reduceDims == sliceLayout.getDims().asArrayRef()) &&
181 "reduction dims must match with slice dims");
182
183 return sliceLayout.getParent();
184}
185
186/// Infers the source layout attribute for a transpose operation given the
187/// result layout attribute and permutation.
188xegpu::DistributeLayoutAttr
189xegpu::inferTransposeSourceLayout(xegpu::DistributeLayoutAttr resLayout,
190 ArrayRef<int64_t> permutation) {
191 return resLayout.transposeDims(permutation);
192}
193
194/// Infers the source layout attribute for a bitcast operation given the
195/// result layout attribute, result element type bitwidth, and source element
196/// type bitwidth.
197xegpu::DistributeLayoutAttr
198xegpu::inferBitCastSourceLayout(xegpu::DistributeLayoutAttr resLayout,
199 int resElemTyBitWidth, int srcElemTyBitWidth) {
200
201 SmallVector<int64_t> sgData = resLayout.getEffectiveSgDataAsInt();
202 SmallVector<int64_t> instData = resLayout.getEffectiveInstDataAsInt();
203 SmallVector<int64_t> laneData = resLayout.getEffectiveLaneDataAsInt();
204 size_t sgDataSize = sgData.size();
205 size_t instDataSize = instData.size();
206 size_t laneDataSize = laneData.size();
207 int64_t sgDataValue = -1;
208 int64_t instDataValue = -1;
209 int64_t laneDataValue = -1;
210 int64_t dim = resLayout.getRank() - 1;
211
212 if (srcElemTyBitWidth <= resElemTyBitWidth) {
213 int bitWidthRatio = resElemTyBitWidth / srcElemTyBitWidth;
214 if (sgDataSize)
215 sgDataValue = sgData.back() * bitWidthRatio;
216 if (instDataSize)
217 instDataValue = instData.back() * bitWidthRatio;
218 if (laneDataSize)
219 laneDataValue = laneData.back() * bitWidthRatio;
220 } else {
221 int bitWidthRatio = srcElemTyBitWidth / resElemTyBitWidth;
222 if (sgDataSize) {
223 assert((sgData.back() % bitWidthRatio) == 0 &&
224 "sgData not divisible by bitWidthRatio");
225 sgDataValue = sgData.back() / bitWidthRatio;
226 }
227 if (instDataSize) {
228 assert((instData.back() % bitWidthRatio) == 0 &&
229 "instData not divisible by bitWidthRatio");
230 instDataValue = instData.back() / bitWidthRatio;
231 }
232 if (laneDataSize) {
233 assert((laneData.back() % bitWidthRatio) == 0 &&
234 "laneData not divisible by bitWidthRatio");
235 laneDataValue = laneData.back() / bitWidthRatio;
236 }
237 }
238
239 xegpu::DistributeLayoutAttr finalSrcLayout;
240 finalSrcLayout =
241 resLayout.setDimData(dim, sgDataValue, instDataValue, laneDataValue);
242
243 return finalSrcLayout;
244}
245
246/// Infers the source layout attribute for an insert strided slice operation
247/// given the result layout attribute, result shape, and source shape. Removes
248/// leading dimensions from the result layout to match the source shape size.
249xegpu::DistributeLayoutAttr xegpu::inferInsertStridedSliceSourceLayout(
250 xegpu::DistributeLayoutAttr resLayout, ArrayRef<int64_t> resShape,
251 ArrayRef<int64_t> srcShape) {
252
253 int srcShapeSize = srcShape.size();
254 int resShapeSize = resShape.size();
255 int dimDiff = resShapeSize - srcShapeSize;
256
257 if (dimDiff > 0) {
258 // assert that the leading dimensions being sliced off are not distributed
259 // (i.e. sg_layout and lane_layout for those dimensions are all 1)
260 auto resSgLayout = resLayout.getEffectiveSgLayoutAsInt();
261 auto resLaneLayout = resLayout.getEffectiveLaneLayoutAsInt();
262 for (int i = 0; i < dimDiff; i++) {
263 assert((resSgLayout.size() == 0 || resSgLayout[i] == 1) &&
264 (resLaneLayout.size() == 0 || resLaneLayout[i] == 1) &&
265 "Leading dimensions being sliced off must not be distributed");
266 }
267 return resLayout.dropDims(llvm::to_vector(llvm::seq<int64_t>(0, dimDiff)));
268 }
269 return resLayout;
270}
271
272/// Infers the source layout attribute for a shape cast operation given the
273/// result layout attribute, result shape, and source shape.
274xegpu::DistributeLayoutAttr
275xegpu::inferShapeCastSourceLayout(xegpu::DistributeLayoutAttr resLayout,
276 ArrayRef<int64_t> resShape,
277 ArrayRef<int64_t> srcShape) {
278
279 // There are three use cases:
280 // 1. expand dims of low-rank dimensions (e.g., 1D to 2D): to set up the
281 // tensor before broadcast
282 // 2. split dim of a high-rank dimension (e.g., 1D to 2D): to setup tensor
283 // for multi-stage reduction
284 // 3. combines all dims to a single dim and put in the innermost dim in 2d as
285 // [1, combinedData] or [combinedData]. Say, [2, 4, 8] -> [1, 64] or [64]
286 // Use cases are only supported after workgroup distribution,
287 // like cross-sg reduction saves multidimension data to
288 // 1D slm buffer, shapecast inserted by cse/canonicalization passes.
289
290 // Use case 1: Shapes only differ by expanding unit dimensions, for broadcast
291 SmallVector<int64_t> expandedUnitDims;
292
293 if (xegpu::matchUnitDimExpansion(srcShape, resShape, expandedUnitDims)) {
294 // create a slice layout for the source by removing the expanded unit dims
295 auto sliceDimsAttr = DenseI64ArrayAttr::get(
296 resLayout.getContext(), ArrayRef<int64_t>(expandedUnitDims));
297 auto srcLayout =
298 xegpu::SliceAttr::get(resLayout.getContext(), resLayout, sliceDimsAttr);
299 return srcLayout;
300 }
301
302 // Use case 2: Dim split from source to result, for multi-stage reduction
303 SmallVector<SmallVector<int64_t>> splitDimGroups;
304 if (xegpu::matchSplitDimExpansion(srcShape, resShape, splitDimGroups)) {
305 auto srcLayout = resLayout;
306 for (const auto &dimGroup : splitDimGroups)
307 srcLayout = srcLayout.collapseDims(dimGroup);
308
309 return srcLayout;
310 }
311
312 // Use case 3: Collaspse to innermost dim, for cross-sg reduction to SLM
313 auto matchCollapseToInnermostDim = [&](ArrayRef<int64_t> src,
314 ArrayRef<int64_t> dst) -> bool {
315 // only one non-unit dim in dst which is the innermost dim
316 if ((dst.size() != 2) && (dst.size() != 1))
317 return false;
318 int64_t srcSize = std::accumulate(src.begin(), src.end(), 1LL,
319 std::multiplies<int64_t>());
320 if (dst.size() == 1)
321 return (dst[0] == srcSize);
322 return (dst[0] == 1) && (dst[1] == srcSize);
323 };
324
325 if (matchCollapseToInnermostDim(srcShape, resShape)) {
326 int srcShapeSize = srcShape.size();
327 int resShapeSize = resShape.size();
328 auto context = resLayout.getContext();
329 auto resInstData = resLayout.getEffectiveInstDataAsInt();
330 auto resLaneLayout = resLayout.getEffectiveLaneLayoutAsInt();
331 auto resLaneData = resLayout.getEffectiveLaneDataAsInt();
332
333 // Extract layout info from result's innermost dimension and apply to
334 // source's innermost dimension while setting all other dimensions to 1.
335 // The inferred layout is restricted by srcShape to ensure it fits within
336 // the source dimensions.
337 // Examples 1:
338 // srcShape=[8, 16, 32], resShape=[1, 4096]
339 // resInstData=[1, 16]
340 // -> inferredInstData=[1, 1, min(16, 32)]=[1, 1, 16]
341 // Examples 2:
342 // srcShape=[4, 8, 64], resShape=[2048]
343 // resLaneLayout=[16], resLaneData=[2]
344 // -> inferredLaneLayout=[1, 1, 16]
345 // -> inferredLaneData=[1, 1, min(2, 64/16)]=[1, 1, 2]
346
347 if (resInstData.size() != 0) {
348 // assert resInstData must be 1 for all but the innermost dim
349 for (int i = 0; i < resShapeSize - 1; i++) {
350 assert(resInstData[i] == 1 &&
351 "only innermost dim can have non-unit instData");
352 }
353 SmallVector<int> inferredInstData(srcShapeSize, 1);
354 inferredInstData[srcShapeSize - 1] =
355 std::min(resInstData[resShapeSize - 1], srcShape[srcShapeSize - 1]);
356 return xegpu::LayoutAttr::get(context, inferredInstData);
357 }
358
359 if (resLaneLayout.size() != 0) {
360 for (int i = 0; i < resShapeSize - 1; i++) {
361 assert(resLaneData[i] == 1 &&
362 "only innermost dim can have non-unit instData");
363 }
364 assert(srcShape.back() % resLaneLayout.back() == 0 &&
365 "source innermost dim must be >= result lane layout");
366 SmallVector<int> inferredLaneLayout(srcShapeSize, 1);
367 SmallVector<int> inferredLaneData(srcShapeSize, 1);
368 inferredLaneLayout.back() = resLaneLayout.back();
369 inferredLaneData.back() = std::min(
370 resLaneData.back(), srcShape.back() / inferredLaneLayout.back());
371 return xegpu::LayoutAttr::get(context, inferredLaneLayout,
372 inferredLaneData);
373 }
374 }
375 llvm_unreachable("running into unsupported shape cast scenarios");
376 return nullptr;
377}
378
379/// Sets up layout for reduction operations by creating a SliceAttr for the
380/// result.
381///
382/// Algorithm Overview:
383/// This function attempts to construct a source layout that, when sliced along
384/// reduction dimensions, produces a result layout compatible with the
385/// consumer layout.
386///
387/// For subgroup layouts, it first tries to align the source layout's subgroup
388/// layout and data with the consumer's layout on non-reduction dimensions.
389/// Then, it distributes remaining subgroups across reduction dimensions. This
390/// avoids subgroup data redistribution overhead between the reduced result and
391/// its consumer. When the consumer layout is a slice layout, it attempts to
392/// reuse the slice layout's parent layout for the source to further minimize
393/// potential data redistribution.
394///
395/// InstData requries {1, ..., min(maxReduceVectorSize, srcShape),subgroupSize}
396/// Lane Layout requires {1, ..., 1, subgroupSize}
397/// Lane data requires {1, ..., min(maxReduceVectorSize, srcShape), 1}
398///
399/// Examples:
400/// 1. Subgroup layout - Row reduction on 2D tensor:
401/// srcShape=[32, 128], reductionDims=[1], resShape=[32], subgroupSize=16,
402/// workgroupSize=32
403/// * Consumer Layout:
404/// #xegpu.slice<#xegpu.layout<sg_layout=[4, 8], sg_data=[8, 8]>, dims =
405/// [1]>}
406//// * Result Layout:
407/// #xegpu.slice<#xegpu.layout<sg_layout=[4, 8],sg_data=[8, 16]>, dims =
408/// [1]>}
409/// Note that the sg_layout is reused but sg_data needs to be adjusted to
410/// evenly distribute the source tensor tile among the reduction dim.
411///
412/// 2. Subgroup layout - Same example above but consumer doesn't have a
413/// reusable slice layout.
414/// * Consumer Layout:
415/// #xegpu.layout<sgLayout=[32], sgData=[1]>
416/// * Result Layout:
417/// #xegpu.slice<#xegpu.layout<sgLayout=[32,1], sgData=[1, 64]>, dims =
418/// [1]>}
419/// * Consumer Layout:
420/// #xegpu.slice<#xegpu.layout<sgLayout=[8, 2, 4], sgData=[4, 64, 32]>,
421/// dims = [1, 2]>}
422/// * Result Layout:
423/// #xegpu.slice<#xegpu.layout<sgLayout=[8,4], sgData=[4, 32]>, dims =
424/// [1]>}
425/// Note that the consumer's layout can't be directly reused as is.
426/// So the algorithm distributes all subgroups on non reduction dimensions
427/// first and then distribute remaining subgroups on the reduction
428/// dimension.
429///
430/// 2. InstData layout - Column reduction:
431/// srcShape=[32, 64], reductionDims=[0], subgroupSize=16
432/// Result: instData=[1, 16] (maxReduceVectorSize=1, subgroupSize on
433/// innermost)
434///
435/// 3. Lane layout - Multi-dimensional reduction:
436/// srcShape=[16, 32, 64], reductionDims=[1], subgroupSize=16
437/// Result: laneLayout=[1, 1, 16], laneData=[1, 1, 1]
438/// (subgroupSize on innermost dim, max vector size on reduction dim)
439
441 xegpu::LayoutKind layoutKind, VectorType srcVecTy,
442 DistributeLayoutAttr consumerLayout, SmallVector<int64_t> reductionDims,
443 const xegpu::uArch::uArch *uArch) {
444
445 auto srcShape = srcVecTy.getShape();
446 int srcRank = srcShape.size();
447 auto context = consumerLayout.getContext();
448
449 // Reduction layout requires at least 2D tensors
450 if (srcRank < 2)
451 return nullptr;
452
453 // Helper lambda to convert int64 vectors to int32 DenseArrayAttr
454 auto toInt32Attr = [&](ArrayRef<int64_t> vec) {
455 SmallVector<int32_t> vec32(vec.begin(), vec.end());
456 return DenseI32ArrayAttr::get(context, vec32);
457 };
458
459 const int workgroupSize = consumerLayout.getNumSubgroups();
460 const int subgroupSize = uArch->getSubgroupSize();
461 int64_t maxReduceVectorSize = 1; // could extend to spirv vector Size
462
463 SmallVector<int64_t> consumerSgLayout =
464 consumerLayout.getEffectiveSgLayoutAsInt();
465 SmallVector<int64_t> consumerLaneLayout =
466 consumerLayout.getEffectiveLaneLayoutAsInt();
467 SmallVector<int64_t> consumerOrder = consumerLayout.getEffectiveOrderAsInt();
468 DenseI32ArrayAttr orderAttr = consumerLayout.getOrder();
469
470 xegpu::DistributeLayoutAttr srcLayout;
471 if (layoutKind == xegpu::LayoutKind::Subgroup) {
472 xegpu::SliceAttr consumerSliceLayout =
473 dyn_cast<xegpu::SliceAttr>(consumerLayout);
474 if (consumerSliceLayout &&
475 consumerSliceLayout.getDims().asArrayRef().equals(reductionDims)) {
476 srcLayout = consumerSliceLayout.getParent();
477 SmallVector<int64_t> sgLayoutFromConsumer =
478 srcLayout.getEffectiveSgLayoutAsInt();
479 auto srcSgData = computeShapeRatio(srcShape, sgLayoutFromConsumer);
480 if (srcSgData)
481 for (int dim = 0; dim < srcRank; dim++) {
482 srcLayout = srcLayout.setDimData(dim, srcSgData.value()[dim], -1, -1);
483 }
484 } else {
485
486 SmallVector<int64_t> sgLayout(srcRank), sgData(srcRank), order(srcRank);
487 int remainingSgCount = workgroupSize;
488 int consumerIdx = 0;
489
490 // First pass: Match consumer's layout on non-reduction dimensions
491 for (int i = 0; i < srcRank; i++) {
492 if (!llvm::is_contained(reductionDims, i) &&
493 consumerIdx < static_cast<int>(consumerSgLayout.size())) {
494 sgLayout[i] = consumerSgLayout[consumerIdx];
495 assert((srcShape[i] % sgLayout[i] == 0) &&
496 "source shape not divisible by consumer sg_layout");
497 sgData[i] = srcShape[i] / sgLayout[i];
498 remainingSgCount /= sgLayout[i];
499 order[i] = consumerOrder[consumerIdx];
500 consumerIdx++;
501 }
502 }
503
504 // Second pass: Distribute remaining subgroups across reduction dimensions
505 int64_t remainOrder = consumerSgLayout.size();
506 for (int i = 0; i < srcRank; i++) {
507 if (llvm::is_contained(reductionDims, i)) {
508 sgLayout[i] =
509 std::min(srcShape[i], static_cast<int64_t>(remainingSgCount));
510 assert((srcShape[i] % sgLayout[i] == 0) &&
511 "source shape not divisible by sg_layout");
512 sgData[i] = srcShape[i] / sgLayout[i];
513 remainingSgCount /= sgLayout[i];
514 order[i] = remainOrder++;
515 }
516 }
517
518 assert(remainingSgCount == 1 && "not all subgroups distributed");
519 srcLayout = xegpu::LayoutAttr::get(
520 context, toInt32Attr(sgLayout), toInt32Attr(sgData),
521 /*inst_data =*/nullptr, /*lane_layout =*/nullptr,
522 /*lane_data =*/nullptr, /*order =*/
523 (!orderAttr || orderAttr.empty()) ? nullptr : toInt32Attr(order));
524 }
525 } else if (layoutKind == xegpu::LayoutKind::InstData) {
526
527 SmallVector<int64_t> instData(srcRank, 1);
528 instData[srcRank - 2] =
529 std::min(maxReduceVectorSize, srcShape[srcRank - 2]);
530 instData[srcRank - 1] =
531 std::min(static_cast<int64_t>(subgroupSize), srcShape[srcRank - 1]);
532 srcLayout = xegpu::LayoutAttr::get(context, toInt32Attr(instData));
533
534 } else if (layoutKind == xegpu::LayoutKind::Lane) {
535
536 SmallVector<int64_t> laneLayout(srcRank, 1), laneData(srcRank, 1);
537 laneLayout[srcRank - 1] =
538 std::min(static_cast<int64_t>(subgroupSize), srcShape[srcRank - 1]);
539 laneData[srcRank - 2] =
540 std::min(maxReduceVectorSize, srcShape[srcRank - 2]);
541 srcLayout = xegpu::LayoutAttr::get(context, toInt32Attr(laneLayout),
542 toInt32Attr(laneData));
543 }
544
545 return xegpu::SliceAttr::get(context, srcLayout,
546 DenseI64ArrayAttr::get(context, reductionDims));
547}
548
549/// Sets up the result layout for a bitcast operation.
550/// When casting to a smaller bitwidth, adjusts the layout dimensions (sgData,
551/// instData, or laneData) by multiplying by the bitwidth ratio to ensure the
552/// result layout can be correctly divided back to the source layout during
553/// inference.
554///
555/// Examples:
556/// 1. Casting f32 -> f16 (32-bit to 16-bit, bitWidthRatio = 2):
557/// Consumer layout: instData=[1, 16], subgroupSize=16
558/// Source shape: [8, 32]
559/// Result layout: instData=[1, 32] (16 * 2)
560/// The innermost dimension is multiplied by 2 to maintain consistency.
561///
562/// 2. Casting f32 -> i8 (32-bit to 8-bit, bitWidthRatio = 4):
563/// Consumer instData=[1, 16], subgroupSize=16
564/// Source shape: [4, 128]
565/// adjust the instData from [1, 16] to [1, 16 * 4 = 64]
566///
567/// 3. Casting i8 -> i32 (8-bit to 32-bit, bitWidthRatio = 1/4):
568/// Consumer layout: laneLayout=[1, 16], laneData=[1, 4]
569/// No adjustment needed - returns consumer layout directly.
570///
571xegpu::DistributeLayoutAttr xegpu::setupBitCastResultLayout(
572 xegpu::LayoutKind layoutKind, VectorType srcVecTy, VectorType resVecTy,
573 DistributeLayoutAttr consumerLayout, const xegpu::uArch::uArch *uArch) {
574
575 int srcElemTyBitWidth = srcVecTy.getElementType().getIntOrFloatBitWidth();
576 int resElemTyBitWidth = resVecTy.getElementType().getIntOrFloatBitWidth();
577
578 ArrayRef<int64_t> srcShape = srcVecTy.getShape();
579 SmallVector<int64_t> sgData = consumerLayout.getEffectiveSgDataAsInt();
580 SmallVector<int64_t> instData = consumerLayout.getEffectiveInstDataAsInt();
581 SmallVector<int64_t> laneData = consumerLayout.getEffectiveLaneDataAsInt();
582 assert(consumerLayout.getRank() == static_cast<int64_t>(srcShape.size()) &&
583 "laneData must be available for all dimensions");
584 size_t dim = srcShape.size() - 1;
585 int64_t sgDataValue = -1;
586 int64_t instDataValue = -1;
587 int64_t laneDataValue = -1;
588 const int subgroupSize = uArch->getSubgroupSize();
589
590 if (srcElemTyBitWidth > resElemTyBitWidth) {
591 // When casting to a smaller bitwidth, multiply the result layout
592 // accordingly to ensure it can be divided by the ratio back to the
593 // source layout.
594 int bitWidthRatio = srcElemTyBitWidth / resElemTyBitWidth;
595 int innermostDimLaneLayout = subgroupSize;
596 if (layoutKind == xegpu::LayoutKind::Subgroup) {
597 sgDataValue = sgData[dim];
598 } else if (layoutKind == xegpu::LayoutKind::InstData) {
599 instDataValue = instData[dim];
600 // Adjust instDataValue so it still fits within an instruction after
601 // dividing by bitWidthRatio
602 while ((instDataValue <= srcShape[dim]) &&
603 (instDataValue % (innermostDimLaneLayout * bitWidthRatio) != 0))
604 instDataValue *= 2;
605 assert((srcShape[dim] % instDataValue) == 0 &&
606 "srcShape, instData, and lanelayout for innermost must be 2^n !");
607 } else if (layoutKind == xegpu::LayoutKind::Lane) {
608 laneDataValue = laneData[dim];
609 while ((laneDataValue <= srcShape[dim]) &&
610 (laneDataValue % bitWidthRatio != 0))
611 laneDataValue *= 2;
612 }
613 // Now set only instData and laneData, preserving sgData
614 xegpu::DistributeLayoutAttr resLayout;
615 resLayout = consumerLayout.setDimData(dim, sgDataValue, instDataValue,
616 laneDataValue);
617 return resLayout;
618 }
619 return consumerLayout;
620}
621
622/// Sets up the result layout for an insert strided slice operation.
623/// Creates a result layout based on the specified layout kind (InstData or
624/// Lane).
625xegpu::DistributeLayoutAttr xegpu::setupInsertStridedSliceResultLayout(
626 xegpu::LayoutKind layoutKind, VectorType srcVectorTy,
627 VectorType resVectorTy, xegpu::DistributeLayoutAttr consumerLayout,
628 const xegpu::uArch::uArch *uArch) {
629
630 xegpu::DistributeLayoutAttr requiredResLayout;
631 SmallVector<int64_t> consumerInstData =
632 consumerLayout.getEffectiveInstDataAsInt();
633 SmallVector<int64_t> consumerLaneData =
634 consumerLayout.getEffectiveLaneDataAsInt();
635 SmallVector<int64_t> consumerLaneLayout =
636 consumerLayout.getEffectiveLaneLayoutAsInt();
637 ArrayRef<int64_t> srcShape = srcVectorTy.getShape();
638 int64_t instDataValue = -1;
639 int64_t laneDataValue = -1;
640
641 requiredResLayout = consumerLayout;
642 int srcRank = srcShape.size();
643
644 if (layoutKind == xegpu::LayoutKind::Subgroup) {
645 assert(true &&
646 "subgroup layout assignment not supported for insertStridedSlice.");
647 } else if (layoutKind == xegpu::LayoutKind::InstData) {
648 for (int dim = 0; dim < srcRank; dim++) {
649 instDataValue = std::min(srcShape[dim], consumerInstData[dim]);
650 requiredResLayout =
651 requiredResLayout.setDimData(dim, -1, instDataValue, -1);
652 }
653 } else if (layoutKind == xegpu::LayoutKind::Lane) {
654 for (int dim = 0; dim < srcRank; dim++) {
655 assert(srcShape[dim] % consumerLaneLayout[dim] == 0 &&
656 "srcShape must be divisible by laneLayout for all dimensions");
657 laneDataValue = std::min(srcShape[dim] / consumerLaneLayout[dim],
658 consumerLaneData[dim]);
659
660 requiredResLayout =
661 requiredResLayout.setDimData(dim, -1, -1, laneDataValue);
662 }
663 }
664 return requiredResLayout;
665}
666
667/// Sets up the anchor layout for load gather and load matrix operation.
668/// load matrix lowers to load gather and 1d block load. All of them share the
669/// same layout setup logic.
670/// For Subgroup layout, uses the consumer layout directly.
671/// non-chunked loads:
672/// InstData = {1, ..., min(consumer, maxLaneLoadSize * subgroupSize)}
673/// LaneLayout = {1, ..., subgroupSize}
674/// lane_data = {1, ..., min(consumer, maxLaneLoadSize)}
675/// chunked loads:
676/// InstData = {subgroupSize, min(consumer, maxLaneLoadSize)}
677/// LaneLayout = {subgroupSize, 1}
678/// lane_data={1,min(consumer, maxLaneLoadSize)}
679static xegpu::DistributeLayoutAttr setupGenericLoadAnchorLayout(
680 xegpu::LayoutKind layoutKind, mlir::MLIRContext *context,
681 xegpu::DistributeLayoutAttr consumerLayout, bool isChunkedLoad,
682 int maxChunkSize, ArrayRef<int64_t> resShape, int subgroupSize) {
683
684 if (layoutKind == xegpu::LayoutKind::Subgroup)
685 return consumerLayout;
686
687 SmallVector<int64_t> consumerInstData =
688 consumerLayout.getEffectiveInstDataAsInt();
689 SmallVector<int64_t> consumerLaneData =
690 consumerLayout.getEffectiveLaneDataAsInt();
691
692 SmallVector<int> instData(resShape.size(), 1);
693 SmallVector<int> laneLayout(resShape.size(), 1);
694 SmallVector<int> laneData(resShape.size(), 1);
695
696 if (!isChunkedLoad) {
697 if (layoutKind == xegpu::LayoutKind::InstData) {
698 instData.back() = std::min(static_cast<int>(consumerInstData.back()),
699 maxChunkSize * subgroupSize);
700 return xegpu::LayoutAttr::get(context, instData);
701 } else if (layoutKind == xegpu::LayoutKind::Lane) {
702 laneData.back() =
703 std::min(static_cast<int>(consumerLaneData.back()), maxChunkSize);
704 laneLayout.back() = std::min(static_cast<int64_t>(subgroupSize),
705 resShape.back() / laneData.back());
706 return xegpu::LayoutAttr::get(context, laneLayout, laneData);
707 }
708 } else {
709 assert(resShape.size() == 2 && "Chunked Store must access 2D tensor tile.");
710 if (layoutKind == xegpu::LayoutKind::InstData) {
711 instData[0] = subgroupSize;
712 instData[1] =
713 std::min(static_cast<int>(consumerInstData[1]), maxChunkSize);
714 return xegpu::LayoutAttr::get(context, instData);
715 } else if (layoutKind == xegpu::LayoutKind::Lane) {
716 laneLayout[0] = subgroupSize;
717 laneData[1] =
718 std::min(static_cast<int>(consumerLaneData[1]), maxChunkSize);
719 return xegpu::LayoutAttr::get(context, laneLayout, laneData);
720 }
721 }
722 return nullptr;
723}
724
725/// Sets up the anchor layout for a load gather operation.
726xegpu::DistributeLayoutAttr xegpu::setupLoadGatherAnchorLayout(
727 xegpu::LayoutKind layoutKind, VectorType resVecTy, int chunkSize,
728 xegpu::DistributeLayoutAttr consumerLayout, const uArch::uArch *uArch) {
729
730 const int subgroupSize = uArch->getSubgroupSize();
731 ArrayRef<int64_t> resShape = resVecTy.getShape();
732 auto context = resVecTy.getContext();
733 auto elemBitWidth = resVecTy.getElementType().getIntOrFloatBitWidth();
734
735 const auto *uArchInstruction =
736 dyn_cast<xegpu::uArch::LoadGatherInstructionInterface>(
738 int maxChunkSize = uArchInstruction->getMaxLaneLoadSize(elemBitWidth);
739
740 return setupGenericLoadAnchorLayout(layoutKind, context, consumerLayout,
741 (chunkSize > 1), maxChunkSize, resShape,
742 subgroupSize);
743}
744
745/// Sets up the anchor layout for load matrix operation.
746/// TODO: enhance load matrix to indicate lowering to chunked load or not.
747xegpu::DistributeLayoutAttr
749 VectorType resVecTy,
750 xegpu::DistributeLayoutAttr consumerLayout,
751 const xegpu::uArch::uArch *uArch) {
752
753 const int subgroupSize = uArch->getSubgroupSize();
754 ArrayRef<int64_t> resShape = resVecTy.getShape();
755 auto context = resVecTy.getContext();
756 auto elemBitWidth = resVecTy.getElementType().getIntOrFloatBitWidth();
757
758 const auto *uArchInstruction =
759 dyn_cast<xegpu::uArch::LoadGatherInstructionInterface>(
761 int maxChunkSize = uArchInstruction->getMaxLaneLoadSize(elemBitWidth);
762 return setupGenericLoadAnchorLayout(layoutKind, context, consumerLayout,
763 false, maxChunkSize, resShape,
764 subgroupSize);
765}
766
767/// Sets up the anchor layout for store scatter and store matrix operation.
768/// store matrix lowers to store scatter and 1d block store. All of them share
769/// the same layout setup logic. For Subgroup layout, not support yet.
770/// non-chunked stores:
771/// InstData = {1, ..., subgroupSize}
772/// LaneLayout = {1, ..., subgroupSize}
773/// lane_data = {1, ..., 1}
774/// chunked stores:
775/// InstData = {subgroupSize, min(srcVec, maxLaneStoreSize)}
776/// LaneLayout = {subgroupSize, 1}
777/// lane_data={1,min(srcVec, maxLaneStoreSize)}
778static xegpu::DistributeLayoutAttr
780 mlir::MLIRContext *context, bool isChunkedStore,
781 int maxChunkSize, ArrayRef<int64_t> srcShape,
782 int subgroupSize) {
783
784 int srcShapeSize = srcShape.size();
785 SmallVector<int> instData(srcShapeSize, 1);
786 SmallVector<int> laneLayout(srcShapeSize, 1);
787 SmallVector<int> laneData(srcShapeSize, 1);
788
789 if (layoutKind == xegpu::LayoutKind::Subgroup) {
790 assert(true &&
791 "subgroup layout assignment not supported for storeScatter.");
792 return nullptr;
793 }
794
795 if (!isChunkedStore) {
796 if (layoutKind == xegpu::LayoutKind::InstData) {
797 instData[srcShapeSize - 1] =
798 std::min(subgroupSize, static_cast<int>(srcShape.back()));
799 return xegpu::LayoutAttr::get(context, instData);
800 } else if (layoutKind == xegpu::LayoutKind::Lane) {
801 laneLayout[srcShapeSize - 1] =
802 std::min(subgroupSize, static_cast<int>(srcShape.back()));
803 return xegpu::LayoutAttr::get(context, laneLayout, laneData);
804 }
805 } else {
806 assert(srcShapeSize == 2 && "Chunked Store must access 2D tensor tile.");
807 if (layoutKind == xegpu::LayoutKind::InstData) {
808 instData[0] = subgroupSize;
809 instData[1] = std::min(static_cast<int>(srcShape[1]), maxChunkSize);
810 return xegpu::LayoutAttr::get(context, instData);
811 } else if (layoutKind == xegpu::LayoutKind::Lane) {
812 laneLayout[0] = subgroupSize;
813 laneData[1] = std::min(static_cast<int>(srcShape[1]), maxChunkSize);
814 return xegpu::LayoutAttr::get(context, laneLayout, laneData);
815 }
816 }
817 return nullptr;
818}
819
820/// Sets up the anchor layout for a store scatter operation.
821xegpu::DistributeLayoutAttr
823 VectorType srcVecTy, int chunkSize,
824 const uArch::uArch *uArch) {
825
826 const int subgroupSize = uArch->getSubgroupSize();
827 ArrayRef<int64_t> srcShape = srcVecTy.getShape();
828 auto context = srcVecTy.getContext();
829 auto elemBitWidth = srcVecTy.getElementType().getIntOrFloatBitWidth();
830
831 const auto *uArchInstruction =
832 dyn_cast<xegpu::uArch::StoreScatterInstructionInterface>(
834 int maxChunkSize = uArchInstruction->getMaxLaneStoreSize(elemBitWidth);
835 return setupGenericStoreAnchorLayout(layoutKind, context, (chunkSize > 1),
836 maxChunkSize, srcShape, subgroupSize);
837}
838
839/// Sets up the anchor layout for a store matrix operation.
840xegpu::DistributeLayoutAttr
842 VectorType srcVecTy,
843 const xegpu::uArch::uArch *uArch) {
844
845 const int subgroupSize = uArch->getSubgroupSize();
846 ArrayRef<int64_t> srcShape = srcVecTy.getShape();
847 auto context = srcVecTy.getContext();
848 auto elemBitWidth = srcVecTy.getElementType().getIntOrFloatBitWidth();
849
850 const auto *uArchInstruction =
851 dyn_cast<xegpu::uArch::StoreScatterInstructionInterface>(
853 int maxChunkSize = uArchInstruction->getMaxLaneStoreSize(elemBitWidth);
854
855 return setupGenericStoreAnchorLayout(layoutKind, context, false, maxChunkSize,
856 srcShape, subgroupSize);
857}
858
859// This function returns the default lane layout for a given vector type.
860// - `packingSize` means multiple consecutive elements can be accessed
861// together as a single unit.
862// - `vnni` means data packing is column-wise (i.e., 2x1xf16 with vnni vs.
863// 1x2xf16 w/o vnni).
864template <typename RankedTy>
865static xegpu::LayoutAttr getDefaultLaneLayout2DBlockIo(
866 RankedTy ty, const xegpu::uArch::uArch *uArch,
867 std::optional<unsigned> packingSize = std::nullopt, bool vnni = false) {
868 // Expecting a 1D or 2D vector.
869 assert(((ty.getRank() == 1 && !vnni) || ty.getRank() == 2) &&
870 "Expected 1D non-vnni or 2D vector.");
871 // Expecting int or float element type.
872 assert(ty.getElementType().isIntOrFloat() &&
873 "Expected int or float element type.");
874
875 auto context = ty.getContext();
876 auto rank = ty.getRank();
877 SmallVector<int> laneLayout(rank, 1);
878 SmallVector<int> laneData(rank, 1);
879 if (packingSize.has_value()) {
880 unsigned bitwidth = ty.getElementType().getIntOrFloatBitWidth();
881 int &laneDataPos = vnni ? laneData[rank - 2] : laneData.back();
882 laneDataPos = bitwidth < *packingSize ? *packingSize / bitwidth : 1;
883 }
884 laneLayout.back() = uArch->getSubgroupSize();
885 return xegpu::LayoutAttr::get(context, laneLayout, laneData);
886}
887
888// This function returns all layouts for the given sgCount, whose sgData:
889// 1. Evenly divides the wgShape.
890// 2. Is a multiple of instData.
891// Example:
892// wgShape = [128, 64], instData = [8, 16], sgCount = 32
893// Returns layouts:
894// [(8,4), (16,2)], which correspond to sgData [16,16] and [8,32].
895using LayoutRepresentation = std::pair<int64_t, int64_t>;
898 int64_t sgCount) {
900 for (int sgLayout0 = 1; sgLayout0 <= sgCount; ++sgLayout0) {
901 if (sgCount % sgLayout0)
902 continue;
903 int64_t sgLayout1 = sgCount / sgLayout0;
904 int64_t sgData0 = wgShape[0] / sgLayout0;
905 int64_t sgData1 = wgShape[1] / sgLayout1;
906 if ((wgShape[0] % sgLayout0 || wgShape[1] % sgLayout1) ||
907 (sgData0 % instData[0] || sgData1 % instData[1]))
908 continue;
909 candidates.emplace_back(sgLayout0, sgLayout1);
910 }
911 // Sort primarily by how balanced they are
912 // (i.e., minimize the absolute difference between the two dimensions), and
913 // secondarily by the first dimension in ascending order.
914 llvm::sort(candidates, [](const LayoutRepresentation &lhs,
915 const LayoutRepresentation &rhs) {
916 int diffLhs = std::abs(lhs.first - lhs.second);
917 int diffRhs = std::abs(rhs.first - rhs.second);
918 if (diffLhs != diffRhs)
919 return diffLhs < diffRhs;
920 return lhs.first < rhs.first;
921 });
922 return candidates;
923}
924
925/// Sets up the anchor layouts for dpas operands (A, B, and C/D).
926/// The numSg and consumerLayout (optional) are only used by sg layout
927/// creation.
928std::optional<
929 std::tuple<xegpu::DistributeLayoutAttr, xegpu::DistributeLayoutAttr,
930 xegpu::DistributeLayoutAttr>>
931xegpu::setupDpasLayout(xegpu::LayoutKind layoutKind, VectorType aTy,
932 VectorType bTy, VectorType cdTy,
933 xegpu::DistributeLayoutAttr consumerLayout,
934 const xegpu::uArch::uArch *uArch, int numSg) {
935 auto context = aTy.getContext();
936 const auto *uArchInstruction =
937 dyn_cast<xegpu::uArch::SubgroupMatrixMultiplyAcc>(uArch->getInstruction(
939
940 auto getInstDataVectors = [&]()
941 -> std::optional<std::tuple<SmallVector<int64_t>, SmallVector<int64_t>,
943 const int subgroupSize = uArch->getSubgroupSize();
944 const unsigned dataALen = aTy.getShape().front();
945 auto supportedALen = uArchInstruction->getSupportedM(aTy.getElementType());
946 const int maxALen =
947 xegpu::getLargestDivisor(dataALen, ArrayRef<unsigned>(supportedALen));
948
949 const unsigned dataBLen = bTy.getShape().back();
950 auto supportedBLen = uArchInstruction->getSupportedN(bTy.getElementType());
951 const int maxBLen =
952 xegpu::getLargestDivisor(dataBLen, ArrayRef<unsigned>(supportedBLen));
953
954 auto supportedCLen = uArchInstruction->getSupportedN(cdTy.getElementType());
955 const int maxCLen =
956 xegpu::getLargestDivisor(dataBLen, ArrayRef<unsigned>(supportedCLen));
957 if (maxALen == -1 || maxBLen == -1 || maxCLen == -1)
958 return std::nullopt;
959
960 SmallVector<int64_t> instDataA(aTy.getRank(), 1);
961 instDataA[aTy.getRank() - 2] = maxALen;
962 instDataA[aTy.getRank() - 1] = subgroupSize;
963 SmallVector<int64_t> instDataB(bTy.getRank(), 1);
964 instDataB[bTy.getRank() - 2] = subgroupSize;
965 instDataB[bTy.getRank() - 1] = maxBLen;
966 SmallVector<int64_t> instDataCD(cdTy.getRank(), 1);
967 instDataCD[cdTy.getRank() - 2] = maxALen;
968 instDataCD[cdTy.getRank() - 1] = maxCLen;
969 return std::make_tuple(instDataA, instDataB, instDataCD);
970 };
971
972 if (layoutKind == xegpu::LayoutKind::Subgroup) {
973 assert(numSg > 0 &&
974 "Number of subgroups must be provided for sg layout creation.");
975 auto instDataVecs = getInstDataVectors();
976 if (!instDataVecs)
977 return std::nullopt;
978 auto [instDataA, instDataB, instDataCD] = *instDataVecs;
979 assert(instDataA.size() == 2 && instDataB.size() == 2 &&
980 instDataCD.size() == 2 &&
981 "Sg layout creation expects valid 2D inst data");
982
983 std::optional<LayoutRepresentation> consumerSgLayout = std::nullopt;
984 if (consumerLayout && consumerLayout.isForWorkgroup()) {
985 SmallVector<int64_t> sgLayoutD =
986 consumerLayout.getEffectiveSgLayoutAsInt();
987 consumerSgLayout = std::make_pair(sgLayoutD[0], sgLayoutD[1]);
988 }
989
990 // Step 1. Get all valid layouts for A, B and C/D operands.
991 // Order them from most balanced to least balanced.
992 auto layoutsA = getValidLayouts(aTy.getShape(), instDataA, numSg);
993 auto layoutsB = getValidLayouts(bTy.getShape(), instDataB, numSg);
994 auto layoutsCD = getValidLayouts(cdTy.getShape(), instDataCD, numSg);
995 if (layoutsA.empty() || layoutsB.empty() || layoutsCD.empty())
996 return std::nullopt;
997
998 // Step 2. If the consumer layout can be reused for all operands, that
999 // layout is chosen. Otherwise, pick the most balanced subgroup layout
1000 // that is valid for A, B and C (if present) operands
1001 llvm::DenseSet<LayoutRepresentation> setA(layoutsA.begin(), layoutsA.end());
1002 llvm::DenseSet<LayoutRepresentation> setCD(layoutsCD.begin(),
1003 layoutsCD.end());
1004 std::optional<LayoutRepresentation> bestPick;
1005 for (auto &sgLayout : layoutsB) {
1006 if (setA.contains(sgLayout) && setCD.contains(sgLayout)) {
1007 // Is in (A and B and CD) and matches consumer -> best pick
1008 if (consumerSgLayout.has_value() && sgLayout == *consumerSgLayout) {
1009 bestPick = sgLayout;
1010 break;
1011 }
1012 // Is in (A and B and CD) layoutsB is ordered from most
1013 // balanced to least. So the first one we see is the most balanced
1014 // one, remember it and later only update if there is one that matches
1015 // the consumer.
1016 if (!bestPick)
1017 bestPick = sgLayout;
1018 }
1019 }
1020 // Step 3. If there is no subgroup layout compatible with A, B and C (if
1021 // present) operands, we fail.
1022 if (!bestPick)
1023 return std::nullopt;
1024 SmallVector<int> sgLayout = {static_cast<int>(bestPick->first),
1025 static_cast<int>(bestPick->second)};
1026 SmallVector<int> sgDataA = {
1027 static_cast<int>(aTy.getShape()[0] / sgLayout[0]),
1028 static_cast<int>(aTy.getShape()[1] / sgLayout[1])};
1029 SmallVector<int> sgDataB = {
1030 static_cast<int>(bTy.getShape()[0] / sgLayout[0]),
1031 static_cast<int>(bTy.getShape()[1] / sgLayout[1])};
1032 SmallVector<int> sgDataCD = {
1033 static_cast<int>(cdTy.getShape()[0] / sgLayout[0]),
1034 static_cast<int>(cdTy.getShape()[1] / sgLayout[1])};
1035
1036 auto dpasALayout = xegpu::LayoutAttr::get(
1037 context, DenseI32ArrayAttr::get(context, sgLayout),
1038 DenseI32ArrayAttr::get(context, sgDataA),
1039 /*inst_data =*/nullptr, /*lane_layout =*/nullptr,
1040 /*lane_data =*/nullptr, /*order =*/nullptr);
1041
1042 auto dpasBLayout = xegpu::LayoutAttr::get(
1043 context, DenseI32ArrayAttr::get(context, sgLayout),
1044 DenseI32ArrayAttr::get(context, sgDataB),
1045 /*inst_data =*/nullptr, /*lane_layout =*/nullptr,
1046 /*lane_data =*/nullptr, /*order =*/nullptr);
1047
1048 auto dpasCDLayout = xegpu::LayoutAttr::get(
1049 context, DenseI32ArrayAttr::get(context, sgLayout),
1050 DenseI32ArrayAttr::get(context, sgDataCD),
1051 /*inst_data =*/nullptr, /*lane_layout =*/nullptr,
1052 /*lane_data =*/nullptr, /*order =*/nullptr);
1053 return std::make_tuple(dpasALayout, dpasBLayout, dpasCDLayout);
1054 } else if (layoutKind == xegpu::LayoutKind::InstData) {
1055 auto instDataVecs = getInstDataVectors();
1056 if (!instDataVecs)
1057 return std::nullopt;
1058 auto [instDataA, instDataB, instDataCD] = *instDataVecs;
1059 return std::make_tuple(
1060 xegpu::LayoutAttr::get(
1061 context, SmallVector<int>(instDataA.begin(), instDataA.end())),
1062 xegpu::LayoutAttr::get(
1063 context, SmallVector<int>(instDataB.begin(), instDataB.end())),
1064 xegpu::LayoutAttr::get(
1065 context, SmallVector<int>(instDataCD.begin(), instDataCD.end())));
1066 } else if (layoutKind == xegpu::LayoutKind::Lane) {
1067 auto aLayout = getDefaultLaneLayout2DBlockIo(
1068 aTy, uArch, uArchInstruction->getPackedFormatBitSizeA());
1069 auto bLayout = getDefaultLaneLayout2DBlockIo(
1070 bTy, uArch, uArchInstruction->getPackedFormatBitSizeB(), true);
1071 auto cdLayout = getDefaultLaneLayout2DBlockIo(
1072 cdTy, uArch /*, packingSize = std::nullopt */);
1073 return std::make_tuple(aLayout, bLayout, cdLayout);
1074 }
1075 return std::nullopt;
1076}
1077
1078xegpu::DistributeLayoutAttr xegpu::getConsumerLayoutAt(OpOperand &operand) {
1079 Operation *op = operand.getOwner();
1080 unsigned idx = operand.getOperandNumber();
1081 xegpu::DistributeLayoutAttr resLayout;
1082 if (op->getNumResults() == 1 && isa<VectorType>(op->getResult(0).getType()))
1083 resLayout = xegpu::getDistributeLayoutAttr(op->getResult(0));
1084
1085 // For vector::BroadcastOp, infer the source layout from the result layout.
1086 if (auto broadcast = dyn_cast<vector::BroadcastOp>(op)) {
1087 if (!resLayout)
1088 return xegpu::DistributeLayoutAttr();
1089 auto srcTy = dyn_cast<VectorType>(broadcast.getSourceType());
1090 if (!srcTy)
1091 return xegpu::DistributeLayoutAttr();
1093 resLayout, broadcast.getResultVectorType().getShape(),
1094 srcTy.getShape());
1095 }
1096
1097 // For vector::MultiDimReductionOp, infer source layout from result layout
1098 // using reduction dims. Acc operand is expected to have the same layout as
1099 // the result.
1100 if (auto reduction = dyn_cast<vector::MultiDimReductionOp>(op)) {
1101 if (!resLayout)
1102 return xegpu::DistributeLayoutAttr();
1103 if (idx == 0) {
1104 SmallVector<int64_t> reductionDims(reduction.getReductionDims());
1105 return xegpu::inferMultiReductionSourceLayout(resLayout, reductionDims);
1106 }
1107 if (idx == 1)
1108 return resLayout;
1109 }
1110
1111 // For vector::BitCastOp, infer source layout from result layout using
1112 // element type bitwidths.
1113 if (auto bitcast = dyn_cast<vector::BitCastOp>(op)) {
1114 if (!resLayout)
1115 return xegpu::DistributeLayoutAttr();
1116 int resElemBitWidth =
1117 bitcast.getResultVectorType().getElementType().getIntOrFloatBitWidth();
1118 int srcElemBitWidth =
1119 bitcast.getSourceVectorType().getElementType().getIntOrFloatBitWidth();
1120 return xegpu::inferBitCastSourceLayout(resLayout, resElemBitWidth,
1121 srcElemBitWidth);
1122 }
1123
1124 // For vector::ShapeCastOp, infer source layout from result layout using
1125 // shapes.
1126 if (auto shapeCast = dyn_cast<vector::ShapeCastOp>(op)) {
1127 if (!resLayout)
1128 return xegpu::DistributeLayoutAttr();
1130 resLayout, shapeCast.getResultVectorType().getShape(),
1131 shapeCast.getSourceVectorType().getShape());
1132 }
1133
1134 // For vector::InsertStridedSliceOp, infer source layout from result layout.
1135 // Dest vector must have the same layout as the result.
1136 if (auto insertSlice = dyn_cast<vector::InsertStridedSliceOp>(op)) {
1137 if (!resLayout)
1138 return xegpu::DistributeLayoutAttr();
1139 if (idx == 0)
1141 resLayout, insertSlice.getDestVectorType().getShape(),
1142 insertSlice.getSourceVectorType().getShape());
1143 if (idx == 1)
1144 return resLayout;
1145 }
1146
1147 // For vector::TransposeOp, infer source layout from result layout using
1148 // permutation.
1149 if (auto transpose = dyn_cast<vector::TransposeOp>(op)) {
1150 if (!resLayout)
1151 return xegpu::DistributeLayoutAttr();
1152 return xegpu::inferTransposeSourceLayout(resLayout,
1153 transpose.getPermutation());
1154 }
1155
1156 // For elementwise operations, all operands must have the same layout as the
1157 // result.
1159 if (!resLayout)
1160 return xegpu::DistributeLayoutAttr();
1161 return resLayout;
1162 }
1163 // TODO: Handle more cases as needed here.
1164 // By default, assume no layout conflict and return the current layout of
1165 // the operand.
1166 return xegpu::getDistributeLayoutAttr(operand.get());
1167}
lhs
static Value broadcast(Location loc, Value toBroadcast, unsigned numElements, const TypeConverter &typeConverter, ConversionPatternRewriter &rewriter)
Broadcasts the value to vector with numElements number of elements.
std::pair< int64_t, int64_t > LayoutRepresentation
static xegpu::DistributeLayoutAttr setupGenericStoreAnchorLayout(xegpu::LayoutKind layoutKind, mlir::MLIRContext *context, bool isChunkedStore, int maxChunkSize, ArrayRef< int64_t > srcShape, int subgroupSize)
Sets up the anchor layout for store scatter and store matrix operation.
static SmallVector< LayoutRepresentation > getValidLayouts(ArrayRef< int64_t > wgShape, ArrayRef< int64_t > instData, int64_t sgCount)
static xegpu::LayoutAttr getDefaultLaneLayout2DBlockIo(RankedTy ty, const xegpu::uArch::uArch *uArch, std::optional< unsigned > packingSize=std::nullopt, bool vnni=false)
static xegpu::DistributeLayoutAttr setupGenericLoadAnchorLayout(xegpu::LayoutKind layoutKind, mlir::MLIRContext *context, xegpu::DistributeLayoutAttr consumerLayout, bool isChunkedLoad, int maxChunkSize, ArrayRef< int64_t > resShape, int subgroupSize)
Sets up the anchor layout for load gather and load matrix operation.
IRValueT get() const
Return the current value being used by this operand.
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
This class represents an operand of an operation.
Definition Value.h:254
unsigned getOperandNumber()
Return which operand this is in the OpOperand list of the Operation.
Definition Value.cpp:226
This is a value defined by a result of an operation.
Definition Value.h:454
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
bool hasAttrOfType(NameT &&name)
Definition Operation.h:604
InFlightDiagnostic emitWarning(const Twine &message={})
Emit a warning about this operation, reporting up to any diagnostic handlers that may be listening.
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Definition Operation.h:541
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition Operation.h:436
MutableArrayRef< OpOperand > getOpOperands()
Definition Operation.h:412
OperationName getName()
The name of an operation is the key identifier for it.
Definition Operation.h:119
std::enable_if_t< llvm::function_traits< std::decay_t< FnT > >::num_args==1, RetT > walk(FnT &&callback)
Walk the operation by calling the callback for each nested operation (including this one),...
Definition Operation.h:826
result_range getOpResults()
Definition Operation.h:449
Attribute removeAttr(StringAttr name)
Remove the attribute with the specified name if it exists.
Definition Operation.h:629
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:433
Type getType() const
Return the type of this value.
Definition Value.h:105
static WalkResult advance()
Definition WalkResult.h:47
static DenseArrayAttrImpl get(MLIRContext *context, ArrayRef< int64_t > content)
Operation * getOwner() const
Return the owner of this operand.
Definition UseDefLists.h:38
bool hasElementwiseMappableTraits(Operation *op)
Together, Elementwise, Scalarizable, Vectorizable, and Tensorizable provide an easy way for scalar op...
DistributeLayoutAttr inferShapeCastSourceLayout(DistributeLayoutAttr resLayout, ArrayRef< int64_t > resShape, ArrayRef< int64_t > srcShape)
Infers the source layout attribute for a shape cast operation given the result layout attribute,...
DistributeLayoutAttr inferTransposeSourceLayout(DistributeLayoutAttr resLayout, ArrayRef< int64_t > permutation)
Infers the source layout attribute for a transpose operation given the result layout attribute and pe...
SliceAttr setupMultiReductionResultLayout(LayoutKind layoutKind, VectorType srcVectorTy, DistributeLayoutAttr consumerLayout, SmallVector< int64_t > reductionDims, const uArch::uArch *uArch)
Sets up layout for reduction operations by creating a SliceAttr for the result.
DistributeLayoutAttr inferInsertStridedSliceSourceLayout(DistributeLayoutAttr resLayout, ArrayRef< int64_t > resShape, ArrayRef< int64_t > srcShape)
Infers the source layout attribute for an insert strided slice operation given the result layout attr...
void setTemporaryLayout(const T &operandOrResult, const DistributeLayoutAttr layout)
std::optional< std::tuple< DistributeLayoutAttr, DistributeLayoutAttr, DistributeLayoutAttr > > setupDpasLayout(LayoutKind layoutKind, VectorType aTy, VectorType bTy, VectorType cdTy, DistributeLayoutAttr consumerLayout, const uArch::uArch *uArch, int numSg)
Sets up the anchor layouts for a dpas operands (A, B, and C/D).
LayoutKind
Specifies the level of a layout hierarchy for comparison or propagation.
Definition XeGPU.h:32
void setDistributeLayoutAttr(const OpResult &Result, const DistributeLayoutAttr layout)
[to-be-deprecated] Sets the DistributeLayoutAttr for a given OpResult user should use setAnchorLayout...
SmallVector< NamedAttribute > dropInstDataOnAttrs(ArrayRef< NamedAttribute > attrs)
Updates the NamedAttribute sequence by dropping inst-data information from any DistributeLayoutAttr f...
bool matchUnitDimExpansion(ArrayRef< int64_t > src, ArrayRef< int64_t > dst, SmallVector< int64_t > &expandedUnitDims)
DistributeLayoutAttr setupLoadMatrixAnchorLayout(LayoutKind layoutKind, VectorType vectorTy, DistributeLayoutAttr consumerLayout, const uArch::uArch *uArch)
Sets up the anchor layout for load matrix operation.
int getLargestDivisor(T dim, ArrayRef< T > candidates, ArrayRef< T > candidateMultiples={})
Helper Function to find a proper instruction multiple for the user-supplied sg-level data shape (dive...
bool recoverTemporaryLayouts(Operation *rootOp)
Attach layout attributes to all vector-type operands of operations within the given operation's neste...
DistributeLayoutAttr inferBroadcastSourceLayout(DistributeLayoutAttr resLayout, ArrayRef< int64_t > resShape, ArrayRef< int64_t > srcShape)
Infers the source layout attribute for a broadcast operation given the result layout attribute,...
DistributeLayoutAttr setupStoreScatterAnchorLayout(LayoutKind layoutKind, VectorType vectorTy, int chunkSize, const uArch::uArch *uArch)
Sets up the anchor layout for a store scatter operation.
void recoverTemporaryLayoutsDeprecated(Operation *op)
[to-be-deprecated] Set the DistributeLayoutAttr for each OpOperand and OpResult of of the given opera...
bool matchSplitDimExpansion(ArrayRef< int64_t > src, ArrayRef< int64_t > dst, SmallVector< SmallVector< int64_t > > &splitDimGroups)
DistributeLayoutAttr setupBitCastResultLayout(LayoutKind layoutKind, VectorType srcVectorTy, VectorType resVectorTy, DistributeLayoutAttr consumerLayout, const uArch::uArch *uArch)
Setup the result layout attribute for a bitcast operation based on element type bitwidths.
void removeLayoutAttr(const T &operandOrResult)
Removes the LayoutAttr for a given OpOperand or OpResult if it exists.
DistributeLayoutAttr getDistributeLayoutAttr(const Value value)
Retrieves the DistributeLayoutAttr associated with a given Value.
SmallVector< NamedAttribute > dropSgLayoutAndDataOnAttrs(ArrayRef< NamedAttribute > attrs)
Updates the NamedAttribute sequence by dropping sg-layout and sg-data information from any Distribute...
std::string getTemporaryLayoutName(const OpOperand &operand)
Return the attribute name for the OpOperand to attach DistributeLayoutAttr.
DistributeLayoutAttr inferBitCastSourceLayout(DistributeLayoutAttr resLayout, int resElemTyBitWidth, int srcElemTyBitWidth)
Infers the source layout attribute for a bitcast operation given the result layout attribute,...
DistributeLayoutAttr setupInsertStridedSliceResultLayout(LayoutKind layoutKind, VectorType srcVectorTy, VectorType resVectorTy, DistributeLayoutAttr consumerLayout, const uArch::uArch *uArch)
Sets up the result layout for an insert strided slice operation.
xegpu::DistributeLayoutAttr getConsumerLayoutAt(OpOperand &operand)
Gets the expected layout for a given consumer operand.
void removeLayoutAttrs(Operation *op)
Removes the DistributeLayoutAttr for each OpOperand and OpResult of the given operation if they exist...
DistributeLayoutAttr inferMultiReductionSourceLayout(DistributeLayoutAttr resLayout, SmallVector< int64_t > reduceDims)
Infers the source layout attribute for a reduction operation given the result layout attribute and re...
DistributeLayoutAttr setupLoadGatherAnchorLayout(LayoutKind layoutKind, VectorType vectorTy, int chunkSize, DistributeLayoutAttr consumerLayout, const uArch::uArch *uArch)
Sets up the anchor layout for a load gather operation.
DistributeLayoutAttr setupStoreMatrixAnchorLayout(LayoutKind layoutKind, VectorType vectorTy, const uArch::uArch *uArch)
Sets up the anchor layout for a store matrix operation.
Include the generated interface declarations.
detail::DenseArrayAttrImpl< int32_t > DenseI32ArrayAttr
std::optional< SmallVector< int64_t > > computeShapeRatio(ArrayRef< int64_t > shape, ArrayRef< int64_t > subShape)
Return the multi-dimensional integral ratio of subShape to the trailing dimensions of shape.
virtual int getSubgroupSize() const =0
uArch(StringRef name, StringRef description, llvm::ArrayRef< const Instruction * > instructionRegistry)
Definition uArchBase.h:151
const Instruction * getInstruction(InstructionKind instKind) const
Definition uArchBase.h:163