MLIR 23.0.0git
XeVMToLLVM.cpp
Go to the documentation of this file.
1//===-- XeVMToLLVM.cpp - XeVM to LLVM dialect conversion --------*- C++ -*-===//
2//
3// This file is licensed under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8
10
16#include "mlir/Pass/Pass.h"
17#include "mlir/Support/LLVM.h"
18#include "llvm/ADT/ArrayRef.h"
19#include "llvm/Support/FormatVariadic.h"
20
22#include "mlir/IR/Matchers.h"
23#include "mlir/IR/Types.h"
25
26#include "llvm/ADT/TypeSwitch.h"
27
28namespace mlir {
29#define GEN_PASS_DEF_CONVERTXEVMTOLLVMPASS
30#include "mlir/Conversion/Passes.h.inc"
31} // namespace mlir
32
33using namespace mlir;
34using namespace xevm;
35
36namespace {
37
38struct LLVMFuncAttributeOptions {
39 bool isConvergent = false;
40 bool isNoUnwind = false;
41 bool isWillReturn = false;
42 LLVM::MemoryEffectsAttr memEffectsAttr{};
43};
44static constexpr LLVMFuncAttributeOptions noUnwindAttrs = {
45 false, true, false, {}};
46static constexpr LLVMFuncAttributeOptions noUnwindWillReturnAttrs = {
47 false, true, true, {}};
48static constexpr LLVMFuncAttributeOptions convergentNoUnwindWillReturnAttrs = {
49 true, true, true, {}};
50
51std::string getTypeMangling(Type ty, bool isUnsigned = false) {
53 .Case([isUnsigned](VectorType ty) -> std::string {
54 return "Dv" + std::to_string(ty.getNumElements()) + "_" +
55 getTypeMangling(ty.getElementType(), isUnsigned);
56 })
57 .Case([](Float16Type) -> std::string { return "Dh"; })
58 .Case([](Float32Type) -> std::string { return "f"; })
59 .Case([](Float64Type) -> std::string { return "d"; })
60 .Case([isUnsigned](IntegerType ty) -> std::string {
61 switch (ty.getWidth()) {
62 case 8:
63 return isUnsigned ? "h" : "c";
64 case 16:
65 return isUnsigned ? "t" : "s";
66 case 32:
67 return isUnsigned ? "j" : "i";
68 case 64:
69 return isUnsigned ? "m" : "l";
70 default:
71 llvm_unreachable("unhandled integer type");
72 }
73 })
74 .DefaultUnreachable("unhandled type for mangling");
75}
76
77std::string mangle(StringRef baseName, ArrayRef<Type> types,
78 ArrayRef<bool> isUnsigned = {}) {
79 assert((isUnsigned.empty() || isUnsigned.size() == types.size()) &&
80 "Signedness info doesn't match");
81 std::string s;
82 llvm::raw_string_ostream os(s);
83 llvm::SmallDenseMap<Type, unsigned> substitutions;
84 os << "_Z" << baseName.size() << baseName;
85 for (auto [idx, type] : llvm::enumerate(types)) {
86 auto it = substitutions.find(type);
87 if (it != substitutions.end()) {
88 os << "S";
89 // First substitution is `S_`, second is `S0_`, and so on.
90 if (unsigned firstIdx = it->getSecond(); firstIdx > 0)
91 os << firstIdx - 1;
92 os << "_";
93 } else {
94 if (!type.isIntOrFloat())
95 substitutions[type] = substitutions.size();
96 os << getTypeMangling(type, isUnsigned.empty() ? false : isUnsigned[idx]);
97 }
98 }
99 return os.str();
100}
101
102std::string builtinElemType(ElemType elemType) {
103 switch (elemType) {
104 case ElemType::BF8:
105 return "bf8";
106 case ElemType::F8:
107 return "hf8";
108 case ElemType::BF16:
109 return "bf";
110 case ElemType::F16:
111 return "hf";
112 case ElemType::F32:
113 return "f";
114 default:
115 return stringifyElemType(elemType).str();
116 }
117}
118
119static int32_t getL1CacheControl(LoadCacheControl cc) {
120 int32_t control = 0;
121 switch (cc) {
122 case LoadCacheControl::USE_DEFAULT:
123 control = -1;
124 break;
125 case LoadCacheControl::L1C_L2UC_L3UC:
126 case LoadCacheControl::L1C_L2UC_L3C:
127 case LoadCacheControl::L1C_L2C_L3UC:
128 case LoadCacheControl::L1C_L2C_L3C:
129 control = 1;
130 break;
131 case LoadCacheControl::L1S_L2UC_L3UC:
132 case LoadCacheControl::L1S_L2UC_L3C:
133 case LoadCacheControl::L1S_L2C_L3UC:
134 case LoadCacheControl::L1S_L2C_L3C:
135 control = 2;
136 break;
137 case LoadCacheControl::INVALIDATE_READ:
138 control = 3;
139 break;
140 default:
141 break;
142 }
143 return control;
144}
145
146static int32_t getL1CacheControl(StoreCacheControl cc) {
147 int32_t control = 0;
148 switch (cc) {
149 case StoreCacheControl::USE_DEFAULT:
150 control = -1;
151 break;
152 case StoreCacheControl::L1WT_L2UC_L3UC:
153 case StoreCacheControl::L1WT_L2UC_L3WB:
154 case StoreCacheControl::L1WT_L2WB_L3UC:
155 case StoreCacheControl::L1WT_L2WB_L3WB:
156 control = 1;
157 break;
158 case StoreCacheControl::L1WB_L2UC_L3UC:
159 case StoreCacheControl::L1WB_L2WB_L3UC:
160 case StoreCacheControl::L1WB_L2UC_L3WB:
161 control = 2;
162 break;
163 case StoreCacheControl::L1S_L2UC_L3UC:
164 case StoreCacheControl::L1S_L2UC_L3WB:
165 case StoreCacheControl::L1S_L2WB_L3UC:
166 case StoreCacheControl::L1S_L2WB_L3WB:
167 control = 3;
168 break;
169 default:
170 break;
171 }
172 return control;
173}
174
175static int32_t getL3CacheControl(LoadCacheControl cc) {
176 int32_t control = 0;
177 switch (cc) {
178 case LoadCacheControl::USE_DEFAULT:
179 control = -1;
180 break;
181 case LoadCacheControl::L1UC_L2UC_L3C:
182 case LoadCacheControl::L1UC_L2C_L3C:
183 case LoadCacheControl::L1C_L2UC_L3C:
184 case LoadCacheControl::L1C_L2C_L3C:
185 case LoadCacheControl::L1S_L2UC_L3C:
186 case LoadCacheControl::L1S_L2C_L3C:
187 control = 1;
188 break;
189 case LoadCacheControl::INVALIDATE_READ:
190 control = 3;
191 break;
192 default:
193 break;
194 }
195 return control;
196}
197
198static int32_t getL3CacheControl(StoreCacheControl cc) {
199 int32_t control = 0;
200 switch (cc) {
201 case StoreCacheControl::USE_DEFAULT:
202 control = -1;
203 break;
204 case StoreCacheControl::L1UC_L2UC_L3WB:
205 case StoreCacheControl::L1UC_L2WB_L3WB:
206 case StoreCacheControl::L1WT_L2UC_L3WB:
207 case StoreCacheControl::L1WT_L2WB_L3WB:
208 case StoreCacheControl::L1S_L2UC_L3WB:
209 case StoreCacheControl::L1S_L2WB_L3WB:
210 case StoreCacheControl::L1WB_L2UC_L3WB:
211 control = 2;
212 break;
213 default:
214 break;
215 }
216 return control;
217}
218
219static std::optional<LoadCacheControl> getCacheControl(PrefetchOp op) {
220 return op.getCacheControl();
221}
222
223static std::optional<LoadCacheControl> getCacheControl(BlockLoad2dOp op) {
224 return op.getCacheControl();
225}
226
227static std::optional<LoadCacheControl> getCacheControl(BlockLoadOp op) {
228 return op.getCacheControl();
229}
230
231static std::optional<LoadCacheControl> getCacheControl(BlockPrefetch2dOp op) {
232 return op.getCacheControl();
233}
234
235static std::optional<StoreCacheControl> getCacheControl(BlockStore2dOp op) {
236 return op.getCacheControl();
237}
238
239static std::optional<StoreCacheControl> getCacheControl(BlockStoreOp op) {
240 return op.getCacheControl();
241}
242
243static std::optional<LoadCacheControl> getCacheControl(LLVM::LoadOp op) {
244 if (op->hasAttr("cache_control")) {
245 auto attr = op->getAttrOfType<xevm::LoadCacheControlAttr>("cache_control");
246 if (!attr)
247 return std::nullopt;
248 return std::optional<LoadCacheControl>(attr.getValue());
249 }
250 return std::nullopt;
251}
252
253static std::optional<StoreCacheControl> getCacheControl(LLVM::StoreOp op) {
254 if (op->hasAttr("cache_control")) {
255 auto attr = op->getAttrOfType<xevm::StoreCacheControlAttr>("cache_control");
256 if (!attr)
257 return std::nullopt;
258 return std::optional<StoreCacheControl>(attr.getValue());
259 }
260 return std::nullopt;
261}
262
263template <typename OpType>
264int32_t getL1CacheControl(OpType op) {
265 return getL1CacheControl(*getCacheControl(op));
266}
267
268template <typename OpType>
269int32_t getL3CacheControl(OpType op) {
270 return getL3CacheControl(*getCacheControl(op));
271}
272
273template <typename OpType>
274static std::optional<ArrayAttr>
275getCacheControlMetadata(ConversionPatternRewriter &rewriter, OpType op) {
276 if (!getCacheControl(op))
277 return {};
278
279 constexpr int32_t decorationCacheControlArity{3};
280 constexpr int32_t loadCacheControlKey{6442};
281 constexpr int32_t storeCacheControlKey{6443};
282 constexpr bool isLoad = std::is_same_v<OpType, BlockLoad2dOp> ||
283 std::is_same_v<OpType, BlockPrefetch2dOp> ||
284 std::is_same_v<OpType, LLVM::LoadOp> ||
285 std::is_same_v<OpType, BlockLoadOp> ||
286 std::is_same_v<OpType, PrefetchOp>;
287
288 // If the cache control is USE_DEFAULT, then we don’t emit any metadata.
289 // Assert that if one of the L1 or L3 cache control values is USE_DEFAULT
290 // (represented as -1), then both must be USE_DEFAULT; otherwise there is a
291 // bug.
292 assert(((getL1CacheControl<OpType>(op) == -1) ==
293 (getL3CacheControl<OpType>(op) == -1)) &&
294 "If one of L1 or L3 cache control is USE_DEFAULT, both must be "
295 "USE_DEFAULT");
296
297 if (getL1CacheControl<OpType>(op) == -1 &&
298 getL3CacheControl<OpType>(op) == -1)
299 return {};
300 const int32_t controlKey{isLoad ? loadCacheControlKey : storeCacheControlKey};
302 controlKey, 0, getL1CacheControl<OpType>(op)};
304 controlKey, 1, getL3CacheControl<OpType>(op)};
305 auto arrayAttrL1 = rewriter.getI32ArrayAttr(decorationsL1);
306 auto arrayAttrL3 = rewriter.getI32ArrayAttr(decorationsL3);
307
308 SmallVector<Attribute, 2> combinedAttrs = {arrayAttrL1, arrayAttrL3};
309 return rewriter.getArrayAttr(combinedAttrs);
310}
311
312//===----------------------------------------------------------------------===//
313// Cache control annotation utilities
314//
315// Instead of attaching cache control as MLIR attributes and handling them
316// during LLVM translation, we directly emit llvm.intr.ptr.annotation op in
317// MLIR.
318//===----------------------------------------------------------------------===//
319
320/// Build one cache-control payload string per attribute.
321///
322/// Each Attribute is expected to be an ArrayAttr of 3 IntegerAttr values:
323/// [SPIR-V decoration token, cache level, cache control value]
324///
325/// A single entry produces a string like: {6442:"0,1"}
326/// where the quote characters (0x22) will appear as \22 in LLVM IR textual
327/// form.
329buildCacheControlPayloads(ArrayRef<Attribute> attrs) {
331 llvm::StringMap<bool> seen;
332
333 for (Attribute a : attrs) {
334 auto arr = dyn_cast<ArrayAttr>(a);
335 if (!arr)
336 continue;
337
338 auto vals = arr.getValue();
339 assert(vals.size() == 3 &&
340 "Expected exactly 3 integer values (Token, CacheLevel, "
341 "ControlValue) in cache control attribute.");
342
343 auto tokenAttr = dyn_cast<IntegerAttr>(vals[0]);
344 auto secondAttr = dyn_cast<IntegerAttr>(vals[1]);
345 auto thirdAttr = dyn_cast<IntegerAttr>(vals[2]);
346
347 if (!tokenAttr || !secondAttr || !thirdAttr)
348 continue;
349
350 // Produce: {SPIR-V decoration token:"L1 cache control,L3 cache control"}
351 // The quote char (0x22) is embedded literally; LLVM IR prints it as \22.
352 std::string entry =
353 llvm::formatv("{{{0}:\"{1},{2}\"}", tokenAttr.getValue().getZExtValue(),
354 secondAttr.getValue().getZExtValue(),
355 thirdAttr.getValue().getZExtValue());
356
357 // Deduplicate identical annotations.
358 if (!seen.insert({entry, true}).second)
359 continue;
360
361 payloads.push_back(std::move(entry));
362 }
363 return payloads;
364}
365/// Counter for generating unique global variable names.
366static std::atomic<uint64_t> globalNameCounter{0};
367
368/// Get or create a global metadata string and return a !llvm.ptr<1> value
369/// pointing to it. The AddressOfOp is created at the current rewriter
370/// insertion point; the GlobalOp is created at the module start.
371static Value createMetadataStringPtr(ConversionPatternRewriter &rewriter,
372 Operation *moduleOp, Location loc,
373 StringRef value, StringRef nameHint) {
374 // Build null-terminated string.
375 std::string strWithNull = value.str();
376 strWithNull.push_back('\0');
377 StringRef strRef(strWithNull.data(), strWithNull.size());
378
379 auto as1PtrTy = LLVM::LLVMPointerType::get(rewriter.getContext(), 1);
380
381 // Search for an existing global with the same content.
382 for (auto &op : moduleOp->getRegion(0).front()) {
383 if (auto existingGlobal = dyn_cast<LLVM::GlobalOp>(&op)) {
384 if (!existingGlobal.getSection() ||
385 *existingGlobal.getSection() != "llvm.metadata")
386 continue;
387 if (auto strAttr =
388 dyn_cast_or_null<StringAttr>(existingGlobal.getValueOrNull())) {
389 if (strAttr.getValue() == strRef) {
390 return LLVM::AddressOfOp::create(rewriter, loc, as1PtrTy,
391 existingGlobal.getSymName());
392 }
393 }
394 }
395 }
396
397 // Create new global at module start.
398 auto i8Type = rewriter.getI8Type();
399 auto arrayType = LLVM::LLVMArrayType::get(i8Type, strWithNull.size());
400 std::string globalName =
401 llvm::formatv("{0}.{1}", nameHint,
402 globalNameCounter.fetch_add(1, std::memory_order_relaxed))
403 .str();
404
405 {
406 OpBuilder::InsertionGuard guard(rewriter);
407 rewriter.setInsertionPointToStart(&moduleOp->getRegion(0).front());
408
409 auto globalOp =
410 LLVM::GlobalOp::create(rewriter, loc, arrayType,
411 /*isConstant=*/true, LLVM::Linkage::Private,
412 globalName, rewriter.getStringAttr(strRef));
413 globalOp.setSection(StringRef("llvm.metadata"));
414 globalOp.setUnnamedAddr(LLVM::UnnamedAddr::Global);
415 globalOp.setAlignment(1);
416 globalOp.setAddrSpace(1);
417 }
418 // InsertionGuard restores the original insertion point here.
419
420 return LLVM::AddressOfOp::create(rewriter, loc, as1PtrTy, globalName);
421}
422
423/// Annotate a pointer value with cache control metadata by emitting chained
424/// `llvm.intr.ptr.annotation` ops (LLVM::PtrAnnotation).
425///
426/// This is the MLIR-level equivalent of handleDecorationCacheControl() from
427/// the LLVM translation layer. For each cache control attribute, it emits:
428///
429/// %ann = llvm.intr.ptr.annotation %ptr, @".str.cachecontrol.N",
430/// @".str.file.N", 0, null : !llvm.ptr<AS>
431///
432/// Multiple annotations are chained: the result of each annotation op is
433/// fed as the pointer input to the next one.
434///
435/// \param rewriter The pattern rewriter.
436/// \param loc Source location for created ops.
437/// \param ptr The pointer value to annotate.
438/// \param cacheControls The cache control ArrayAttr (from
439/// getCacheControlMetadata).
440/// \param moduleOp The enclosing module (for creating globals).
441/// \returns The annotated pointer value (or the original ptr if no
442/// annotations).
443static Value annotatePtrWithCacheControl(ConversionPatternRewriter &rewriter,
444 Location loc, Value ptr,
445 ArrayAttr cacheControls,
446 Operation *moduleOp) {
447 SmallVector<std::string> payloads =
448 buildCacheControlPayloads(cacheControls.getValue());
449 if (payloads.empty())
450 return ptr;
451
452 auto ptrType = cast<LLVM::LLVMPointerType>(ptr.getType());
453 auto as1PtrTy = LLVM::LLVMPointerType::get(rewriter.getContext(), 1);
454 auto i32Ty = rewriter.getI32Type();
455
456 // Create shared constants for all annotations on this pointer.
457 Value fileStr =
458 createMetadataStringPtr(rewriter, moduleOp, loc, "", ".str.file");
459 Value lineVal = LLVM::ConstantOp::create(rewriter, loc, i32Ty, 0);
460 Value nullAS1 = LLVM::ZeroOp::create(rewriter, loc, as1PtrTy);
461
462 // Chain: each annotation takes the result of the previous one as its
463 // pointer operand.
464 Value curPtr = ptr;
465 for (const std::string &payload : payloads) {
466 Value annStr = createMetadataStringPtr(rewriter, moduleOp, loc, payload,
467 ".str.cachecontrol");
468 auto annOp = LLVM::PtrAnnotation::create(rewriter, loc, ptrType, curPtr,
469 annStr, fileStr, lineVal, nullAS1);
470 curPtr = annOp.getResult();
471 }
472
473 return curPtr;
474}
475
476/// Helper to apply cache control annotation on a pointer operand of a call.
477/// Replaces the pointer argument of the call with an annotated version.
478///
479/// For operations that produce a call (like block load/store/prefetch), the
480/// pointer is typically the first argument. This function:
481/// 1. Builds the annotation chain on the pointer.
482/// 2. Replaces the pointer operand in the provided args list.
483///
484/// \param rewriter The pattern rewriter.
485/// \param loc Source location.
486/// \param ptr The original pointer value (first arg to the call).
487/// \param cacheControls The cache control metadata.
488/// \param moduleOp The enclosing module.
489/// \param args The argument list (modified in place: args[ptrIdx] is
490/// replaced).
491/// \param ptrIdx Index of the pointer in the args list (default 0).
492template <typename OpType>
493static void
494applyCacheControlAnnotation(ConversionPatternRewriter &rewriter, Location loc,
495 OpType op, SmallVectorImpl<Value> &args,
496 Operation *moduleOp, unsigned ptrIdx = 0) {
497 std::optional<ArrayAttr> optCacheControls =
498 getCacheControlMetadata(rewriter, op);
499 if (!optCacheControls)
500 return;
501
502 Value annotatedPtr = annotatePtrWithCacheControl(rewriter, loc, args[ptrIdx],
503 *optCacheControls, moduleOp);
504 args[ptrIdx] = annotatedPtr;
505}
506
507//===----------------------------------------------------------------------===//
508// End cache control annotation utilities
509//===----------------------------------------------------------------------===//
510
511static LLVM::CallOp createDeviceFunctionCall(
512 ConversionPatternRewriter &rewriter, StringRef funcName, Type retType,
513 ArrayRef<Type> argTypes, ArrayRef<Value> args,
514 mlir::ArrayRef<std::pair<unsigned, mlir::StringRef>> paramAttrs,
515 LLVMFuncAttributeOptions funcAttributeOptions, Operation *op) {
516 auto *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
517 assert(moduleOp && "Expecting module");
518 Location loc = op->getLoc();
519
520 auto funcOpRes =
521 LLVM::lookupOrCreateFn(rewriter, moduleOp, funcName, argTypes, retType);
522 assert(!failed(funcOpRes));
523 LLVM::LLVMFuncOp funcOp = funcOpRes.value();
524 funcOp.setCConv(LLVM::cconv::CConv::SPIR_FUNC);
525 funcOp.setConvergent(funcAttributeOptions.isConvergent);
526 funcOp.setNoUnwind(funcAttributeOptions.isNoUnwind);
527 funcOp.setWillReturn(funcAttributeOptions.isWillReturn);
528
529 if (funcAttributeOptions.memEffectsAttr)
530 funcOp.setMemoryEffectsAttr(funcAttributeOptions.memEffectsAttr);
531
532 for (auto [idx, attrName] : paramAttrs)
533 funcOp.setArgAttr(idx, attrName, rewriter.getUnitAttr());
534
535 auto callOp = LLVM::CallOp::create(rewriter, loc, funcOp, args);
536 callOp->setAttrs(funcOp->getAttrs());
537
538 return callOp;
539}
540
541static unsigned getNumOperandsPerDword(xevm::ElemType pTy) {
542 switch (pTy) {
543 case xevm::ElemType::F32:
544 case xevm::ElemType::TF32:
545 return 1;
546 case xevm::ElemType::BF16:
547 case xevm::ElemType::F16:
548 return 2;
549 case xevm::ElemType::U8:
550 case xevm::ElemType::S8:
551 case xevm::ElemType::BF8:
552 case xevm::ElemType::F8:
553 return 4;
554 case xevm::ElemType::E2M1:
555 case xevm::ElemType::U4:
556 case xevm::ElemType::S4:
557 return 8;
558 default:
559 llvm_unreachable("unsupported xevm::ElemType");
560 }
561}
562
563class MMAToOCLPattern : public OpConversionPattern<xevm::MMAOp> {
564 using OpConversionPattern::OpConversionPattern;
565 LogicalResult
566 matchAndRewrite(xevm::MMAOp op, xevm::MMAOp::Adaptor adaptor,
567 ConversionPatternRewriter &rewriter) const override {
568 if (!op.getC()) {
569 return rewriter.notifyMatchFailure(op, "OCL requires C operand");
570 }
571 auto precisionA = op.getTypes().getA();
572 auto precisionB = op.getTypes().getB();
573 auto precisionC = op.getTypes().getC();
574 auto precisionD = op.getTypes().getD();
575 if (precisionC != precisionD) {
576 return rewriter.notifyMatchFailure(op, "type of C and D need to match");
577 }
578 if (precisionC != xevm::ElemType::S32 &&
579 precisionC != xevm::ElemType::F32 &&
580 precisionC != xevm::ElemType::F16 &&
581 precisionC != xevm::ElemType::BF16) {
582 return rewriter.notifyMatchFailure(
583 op, "type of C and D must be S32, F32, F16 or BF16");
584 }
585 if (precisionA == xevm::ElemType::S32 ||
586 precisionA == xevm::ElemType::F32) {
587 return rewriter.notifyMatchFailure(op, "type of A cannot be S32 or F32");
588 }
589 if (precisionB == xevm::ElemType::S32 ||
590 precisionB == xevm::ElemType::F32) {
591 return rewriter.notifyMatchFailure(op, "type of B cannot be S32 or F32");
592 }
593 constexpr uint32_t bitWidthPackedA{16};
594 constexpr uint32_t bitWidthPackedB{32};
595 auto loc = op.getLoc();
596
597 auto castIfNeeded = [&](Value val, Type packedType) -> Value {
598 VectorType origTy = cast<VectorType>(val.getType());
599 const uint32_t vecBitSize =
600 origTy.getNumElements() *
601 origTy.getElementType().getIntOrFloatBitWidth();
602 VectorType newTy = VectorType::get(
603 vecBitSize / packedType.getIntOrFloatBitWidth(), packedType);
604 if (origTy != newTy)
605 val = LLVM::BitcastOp::create(rewriter, loc, newTy, val);
606 return val;
607 };
608
609 Value a = op.getA();
610 Type packedAType = (op.getTypes().getA() == xevm::ElemType::TF32)
611 ? cast<Type>(rewriter.getF32Type())
612 : rewriter.getIntegerType(bitWidthPackedA);
613 a = castIfNeeded(a, packedAType);
614
615 Value b = op.getB();
616 Type packedBType = (op.getTypes().getB() == xevm::ElemType::TF32)
617 ? cast<Type>(rewriter.getF32Type())
618 : rewriter.getIntegerType(bitWidthPackedB);
619 b = castIfNeeded(b, packedBType);
620
621 Value c = op.getC();
622 VectorType cOrigTy = cast<VectorType>(c.getType());
623 VectorType resOrigTy = cast<VectorType>(op->getResultTypes()[0]);
624 assert(cOrigTy == resOrigTy && "Accumulator and result type mismatch");
625 // OCL builtins encode bfloat16 as int16
626 VectorType cTy =
627 cOrigTy.getElementType().isBF16()
628 ? VectorType::get(cOrigTy.getShape(), rewriter.getIntegerType(16))
629 : cOrigTy;
630 VectorType resTy = cTy;
631 if (cOrigTy != cTy)
632 c = LLVM::BitcastOp::create(rewriter, loc, cTy, c);
633
634 constexpr int32_t systolicDepth{8};
635 std::string fnName =
636 llvm::formatv("intel_sub_group_{0}_{1}_matrix_mad_k{2}",
637 stringifyElemType(op.getTypes().getA()).str(),
638 stringifyElemType(op.getTypes().getB()).str(),
639 systolicDepth *
640 getNumOperandsPerDword(op.getTypes().getA()))
641 .str();
642 SmallVector<Type> argTypes{a.getType(), b.getType(), cTy};
643 fnName = mangle(fnName, argTypes);
644 SmallVector<Value> args{a, b, c};
645
646 auto memAttr = rewriter.getAttr<LLVM::MemoryEffectsAttr>(
647 /*other=*/LLVM::ModRefInfo::NoModRef,
648 /*argMem=*/LLVM::ModRefInfo::NoModRef,
649 /*inaccessibleMem=*/LLVM::ModRefInfo::NoModRef,
650 /*errnoMem=*/LLVM::ModRefInfo::NoModRef,
651 /*targetMem0=*/LLVM::ModRefInfo::NoModRef,
652 /*targetMem1=*/LLVM::ModRefInfo::NoModRef);
653 auto funcAttrs = convergentNoUnwindWillReturnAttrs;
654 funcAttrs.memEffectsAttr = memAttr;
655 Value result =
656 createDeviceFunctionCall(rewriter, fnName, resTy, argTypes, args, {},
657 funcAttrs, op.getOperation())
658 ->getResult(0);
659
660 if (resOrigTy != resTy)
661 result = LLVM::BitcastOp::create(rewriter, loc, resOrigTy, result);
662
663 rewriter.replaceOp(op, result);
664 return success();
665 }
666};
667
668class PrefetchToOCLPattern : public OpConversionPattern<PrefetchOp> {
669 using OpConversionPattern::OpConversionPattern;
670 LogicalResult
671 matchAndRewrite(PrefetchOp op, PrefetchOp::Adaptor adaptor,
672 ConversionPatternRewriter &rewriter) const override {
673 auto loc = op.getLoc();
674 auto *moduleOp = op->getParentWithTrait<OpTrait::SymbolTable>();
675
676 const std::string fnName{"_Z8prefetchPU3AS1Kcm"};
677 Value one =
678 LLVM::ConstantOp::create(rewriter, loc, rewriter.getI64Type(), 1);
679 SmallVector<Value> args{op.getPtr(), one};
680
681 // Annotate pointer with cache control before passing to the call.
682 applyCacheControlAnnotation(rewriter, loc, op, args, moduleOp,
683 /*ptrIdx=*/0);
684
685 SmallVector<Type> argTypes;
686 for (auto arg : args)
687 argTypes.push_back(arg.getType());
688 auto funcAttr = noUnwindAttrs;
689 auto memAttr = rewriter.getAttr<LLVM::MemoryEffectsAttr>(
690 /*other=*/LLVM::ModRefInfo::NoModRef,
691 /*argMem=*/LLVM::ModRefInfo::Ref,
692 /*inaccessibleMem=*/LLVM::ModRefInfo::NoModRef,
693 /*errnoMem=*/LLVM::ModRefInfo::NoModRef,
694 /*targetMem0=*/LLVM::ModRefInfo::NoModRef,
695 /*targetMem1=*/LLVM::ModRefInfo::NoModRef);
696 funcAttr.memEffectsAttr = memAttr;
697
698 createDeviceFunctionCall(rewriter, fnName,
699 LLVM::LLVMVoidType::get(rewriter.getContext()),
700 argTypes, args, {}, funcAttr, op.getOperation());
701 rewriter.eraseOp(op);
702 return success();
703 }
704};
705
706class MemfenceToOCLPattern : public OpConversionPattern<MemfenceOp> {
707 using OpConversionPattern::OpConversionPattern;
708 LogicalResult
709 matchAndRewrite(MemfenceOp op, MemfenceOp::Adaptor adaptor,
710 ConversionPatternRewriter &rewriter) const override {
711 auto loc = op.getLoc();
712 const std::string fnName{"atomic_work_item_fence"};
713 int memScope, addrSpace;
714 switch (op.getAddrspace()) {
715 case xevm::AddrSpace::SHARED:
716 addrSpace = 1; // CLK_LOCAL_MEM_FENCE
717 break;
718 case xevm::AddrSpace::GLOBAL:
719 addrSpace = 2; // CLK_GLOBAL_MEM_FENCE
720 break;
721 default:
722 // GENERIC is not supported in OpenCL
723 return rewriter.notifyMatchFailure(
724 op, "Fence only supports global and shared address spaces.");
725 }
726 switch (op.getScope()) {
727 case xevm::MemScope::WORKGROUP:
728 memScope = 1;
729 break;
730 case xevm::MemScope::DEVICE:
731 memScope = 2;
732 break;
733 default:
734 // CLUSTER and SYSTEM are not supported in OpenCL
735 return rewriter.notifyMatchFailure(
736 op, "Fence only supports workgroup and device memory scopes.");
737 }
738 Type i32Type = rewriter.getI32Type();
739 Value acqRel = LLVM::ConstantOp::create(rewriter, loc, i32Type, 4);
740 Value memScopeConst =
741 LLVM::ConstantOp::create(rewriter, loc, i32Type, memScope);
742 Value addrSpaceConst =
743 LLVM::ConstantOp::create(rewriter, loc, i32Type, addrSpace);
744 SmallVector<Value> args{addrSpaceConst, acqRel, memScopeConst};
745 SmallVector<Type> argTypes{3, i32Type};
746 createDeviceFunctionCall(rewriter, mangle(fnName, argTypes),
747 LLVM::LLVMVoidType::get(rewriter.getContext()),
748 argTypes, args, {}, noUnwindAttrs,
749 op.getOperation());
750 rewriter.eraseOp(op);
751 return success();
752 }
753};
754template <typename OpType>
755class LoadStorePrefetchToOCLPattern : public OpConversionPattern<OpType> {
756 using OpConversionPattern<OpType>::OpConversionPattern;
757 LogicalResult
758 matchAndRewrite(OpType op, typename OpType::Adaptor adaptor,
759 ConversionPatternRewriter &rewriter) const override {
760 constexpr bool isLoad = std::is_same_v<OpType, BlockLoad2dOp>;
761 constexpr bool isPrefetch = std::is_same_v<OpType, BlockPrefetch2dOp>;
762
763 auto loc = op.getLoc();
764 auto *moduleOp = op->template getParentWithTrait<OpTrait::SymbolTable>();
765 VectorType vecType;
766 bool packReg = false;
767 bool transpose = false;
768 if constexpr (isLoad) {
769 vecType = op.getRes().getType();
770 packReg = op.getPackRegister();
771 transpose = op.getTranspose();
772 } else if constexpr (!isPrefetch) {
773 vecType = op.getStoredVal().getType();
774 }
775
776 auto i32Type = rewriter.getI32Type();
777 Value byteCoord =
778 LLVM::UndefOp::create(rewriter, loc, VectorType::get(2, i32Type));
779 Value zero = LLVM::ConstantOp::create(rewriter, loc, i32Type, 0);
780 Value one = LLVM::ConstantOp::create(rewriter, loc, i32Type, 1);
781 byteCoord = LLVM::InsertElementOp::create(
782 rewriter, loc, VectorType::get(2, i32Type), byteCoord, op.getX(), zero);
783 byteCoord = LLVM::InsertElementOp::create(
784 rewriter, loc, VectorType::get(2, i32Type), byteCoord, op.getY(), one);
785 SmallVector<Value> args{op.getPtr(), op.getBaseWidth(), op.getBaseHeight(),
786 op.getBasePitch(), byteCoord};
787
788 // Annotate pointer (args[0]) with cache control before the call.
789 applyCacheControlAnnotation(rewriter, loc, op, args, moduleOp,
790 /*ptrIdx=*/0);
791
792 SmallVector<Type> retTypes;
793 Value spvLoadDstPtr;
794 std::string funcName{"intel_sub_group_2d_block_"};
795 std::string bitWidthId;
796 LLVMFuncAttributeOptions funcAttr{noUnwindWillReturnAttrs};
797 SmallVector<std::pair<unsigned, StringRef>, 4> paramAttrs;
798 if constexpr (isPrefetch) { // Prefetch
799 funcName += "prefetch";
800 paramAttrs = {std::make_pair(0, LLVM::LLVMDialect::getNonNullAttrName())};
801 auto memAttr = rewriter.getAttr<LLVM::MemoryEffectsAttr>(
802 /*other=*/LLVM::ModRefInfo::NoModRef,
803 /*argMem=*/LLVM::ModRefInfo::Ref,
804 /*inaccessibleMem=*/LLVM::ModRefInfo::NoModRef,
805 /*errnoMem=*/LLVM::ModRefInfo::NoModRef,
806 /*targetMem0=*/LLVM::ModRefInfo::NoModRef,
807 /*targetMem1=*/LLVM::ModRefInfo::NoModRef);
808 funcAttr = noUnwindAttrs;
809 funcAttr.memEffectsAttr = memAttr;
810 } else {
811 auto vecElemType = vecType.getElementType();
812 auto vecElemBitWidth = vecElemType.getIntOrFloatBitWidth();
813 Value numElems = LLVM::ConstantOp::create(rewriter, loc, i32Type,
814 vecType.getNumElements());
815 auto dstOrSrcPtr = LLVM::AllocaOp::create(
816 rewriter, loc, LLVM::LLVMPointerType::get(rewriter.getContext()),
817 vecElemType, numElems);
818 args.push_back(dstOrSrcPtr);
819 if constexpr (isLoad) { // Load
820 funcName += "read";
821 bitWidthId = getTypeMangling(vecElemType, /*isUnsigned=*/true);
822 if (packReg)
823 funcName += "_transform";
824 else if (transpose)
825 funcName += "_transpose";
826 spvLoadDstPtr = dstOrSrcPtr;
827 retTypes.push_back(vecType);
828 paramAttrs = {
829 std::make_pair(0, LLVM::LLVMDialect::getNonNullAttrName()),
830 std::make_pair(0, LLVM::LLVMDialect::getReadonlyAttrName()),
831 std::make_pair(5, LLVM::LLVMDialect::getNonNullAttrName()),
832 std::make_pair(5, LLVM::LLVMDialect::getWriteOnlyAttrName()),
833 };
834 } else { // Store
835 funcName += "write";
836 bitWidthId = (vecElemBitWidth == 32)
837 ? "j"
838 : ((vecElemBitWidth == 16) ? "t" : "h");
839 LLVM::StoreOp::create(rewriter, loc, op.getStoredVal(), dstOrSrcPtr);
840 paramAttrs = {
841 std::make_pair(0, LLVM::LLVMDialect::getNonNullAttrName()),
842 std::make_pair(0, LLVM::LLVMDialect::getWriteOnlyAttrName()),
843 std::make_pair(5, LLVM::LLVMDialect::getNonNullAttrName()),
844 std::make_pair(5, LLVM::LLVMDialect::getReadonlyAttrName()),
845 };
846 }
847 }
848
849 funcName =
850 llvm::formatv("{0}_{1}b_{2}r{3}x{4}c", funcName, op.getElemSizeInBits(),
851 op.getTileHeight(), op.getTileWidth(), op.getVBlocks())
852 .str();
853 std::string prefetchCode("");
854 if (!isPrefetch)
855 prefetchCode += "P";
856 funcName = llvm::formatv("_Z{0}{1}PU3AS1viiiDv2_i{2}{3}", funcName.size(),
857 funcName, prefetchCode, bitWidthId)
858 .str();
859 SmallVector<Type> argTypes;
860 for (auto arg : args) {
861 argTypes.push_back(arg.getType());
862 }
863 createDeviceFunctionCall(
864 rewriter, funcName, LLVM::LLVMVoidType::get(rewriter.getContext()),
865 argTypes, args, paramAttrs, funcAttr, op.getOperation());
866
867 if constexpr (isLoad)
868 rewriter.replaceOp(
869 op, LLVM::LoadOp::create(rewriter, loc, vecType, spvLoadDstPtr));
870 else
871 rewriter.eraseOp(op);
872 return success();
873 }
874};
875
876template <typename OpType>
877class BlockLoadStore1DToOCLPattern : public OpConversionPattern<OpType> {
878 using OpConversionPattern<OpType>::OpConversionPattern;
879 LogicalResult
880 matchAndRewrite(OpType op, typename OpType::Adaptor adaptor,
881 ConversionPatternRewriter &rewriter) const override {
882 constexpr bool isStore = std::is_same_v<OpType, xevm::BlockStoreOp>;
883 auto loc = op.getLoc();
884 auto *moduleOp = op->template getParentWithTrait<OpTrait::SymbolTable>();
885
886 // Get OpenCL function name
887 // https://registry.khronos.org/OpenCL/extensions/
888 // intel/cl_intel_subgroup_local_block_io.html
889 std::string funcName{"intel_sub_group_block_"};
890 // Value or Result type can be vector or scalar
891 Type valOrResTy;
892 if constexpr (isStore) {
893 funcName += "write_u";
894 valOrResTy = op.getVal().getType();
895 } else {
896 funcName += "read_u";
897 valOrResTy = op.getType();
898 }
899 // Get element type of the vector/scalar
900 VectorType vecTy = dyn_cast<VectorType>(valOrResTy);
901 Type elemType = vecTy ? vecTy.getElementType() : valOrResTy;
902 funcName += getTypeMangling(elemType);
903 if (vecTy)
904 funcName += std::to_string(vecTy.getNumElements());
905 SmallVector<Type, 2> argTypes{};
906 // XeVM BlockLoad/StoreOp always use signless integer types
907 // but OpenCL builtins expect unsigned types
908 // use unsigned types for mangling
909 SmallVector<bool, 2> isUnsigned{};
910 // arg0: pointer to the src/dst address
911 // arg1 - only if store : vector to store
912 // Prepare arguments
913 SmallVector<Value, 2> args{};
914 args.push_back(op.getPtr());
915 argTypes.push_back(op.getPtr().getType());
916 isUnsigned.push_back(true);
917
918 // Annotate pointer (args[0]) with cache control.
919 applyCacheControlAnnotation(rewriter, loc, op, args, moduleOp,
920 /*ptrIdx=*/0);
921 // Update argTypes[0] in case the pointer type changed (it shouldn't
922 // change type, but the value is now the annotated pointer).
923 argTypes[0] = args[0].getType();
924
925 Type retType;
926 if constexpr (isStore) {
927 args.push_back(op.getVal());
928 argTypes.push_back(op.getVal().getType());
929 isUnsigned.push_back(true);
930 retType = LLVM::LLVMVoidType::get(rewriter.getContext());
931 } else {
932 retType = valOrResTy;
933 }
934 funcName = std::string("_Z") + std::to_string(funcName.size()) + funcName +
935 "PU3AS" +
936 std::to_string(op.getPtr().getType().getAddressSpace());
937 funcName += getTypeMangling(elemType, /*isUnsigned=*/true);
938 if constexpr (isStore)
939 funcName += getTypeMangling(valOrResTy, /*isUnsigned=*/true);
940 LLVMFuncAttributeOptions funcAttr{noUnwindWillReturnAttrs};
941
942 LLVM::CallOp call =
943 createDeviceFunctionCall(rewriter, funcName, retType, argTypes, args,
944 {}, funcAttr, op.getOperation());
945
946 if constexpr (isStore)
947 rewriter.eraseOp(op);
948 else
949 rewriter.replaceOp(op, call->getResult(0));
950 return success();
951 }
952};
953
954template <typename OpType>
955class LLVMLoadStoreToOCLPattern : public OpConversionPattern<OpType> {
956 using OpConversionPattern<OpType>::OpConversionPattern;
957 LogicalResult
958 matchAndRewrite(OpType op, typename OpType::Adaptor adaptor,
959 ConversionPatternRewriter &rewriter) const override {
960 if (!op->hasAttr("cache_control"))
961 return failure();
962
963 auto *moduleOp = op->template getParentWithTrait<OpTrait::SymbolTable>();
964 std::optional<ArrayAttr> optCacheControls =
965 getCacheControlMetadata(rewriter, op);
966 if (!optCacheControls) {
967 rewriter.modifyOpInPlace(op, [&]() { op->removeAttr("cache_control"); });
968 return success();
969 }
970
971 // Determine which operand is the pointer.
972 constexpr bool isStore = std::is_same_v<OpType, LLVM::StoreOp>;
973 unsigned ptrIdx = isStore ? 1 : 0;
974 Value ptr = op->getOperand(ptrIdx);
975
976 // Emit annotation intrinsic calls on the pointer.
977 Value annotatedPtr = annotatePtrWithCacheControl(
978 rewriter, op->getLoc(), ptr, *optCacheControls, moduleOp);
979
980 // Replace the pointer operand with the annotated one.
981 rewriter.modifyOpInPlace(op, [&]() {
982 op->setOperand(ptrIdx, annotatedPtr);
983 op->removeAttr("cache_control");
984 });
985 return success();
986 }
987};
988
989//===----------------------------------------------------------------------===//
990// GPU index id operations
991//===----------------------------------------------------------------------===//
992/*
993// Launch Config ops
994// dimidx - x, y, z - is fixed to i32
995// return type is set by XeVM type converter
996// get_local_id
997xevm::WorkitemIdXOp;
998xevm::WorkitemIdYOp;
999xevm::WorkitemIdZOp;
1000// get_local_size
1001xevm::WorkgroupDimXOp;
1002xevm::WorkgroupDimYOp;
1003xevm::WorkgroupDimZOp;
1004// get_group_id
1005xevm::WorkgroupIdXOp;
1006xevm::WorkgroupIdYOp;
1007xevm::WorkgroupIdZOp;
1008// get_num_groups
1009xevm::GridDimXOp;
1010xevm::GridDimYOp;
1011xevm::GridDimZOp;
1012// get_global_id : to be added if needed
1013*/
1014
1015// Helpers to get the OpenCL function name and dimension argument for each op.
1016static std::pair<StringRef, int64_t> getConfig(xevm::WorkitemIdXOp) {
1017 return {"get_local_id", 0};
1018}
1019static std::pair<StringRef, int64_t> getConfig(xevm::WorkitemIdYOp) {
1020 return {"get_local_id", 1};
1021}
1022static std::pair<StringRef, int64_t> getConfig(xevm::WorkitemIdZOp) {
1023 return {"get_local_id", 2};
1024}
1025static std::pair<StringRef, int64_t> getConfig(xevm::WorkgroupDimXOp) {
1026 return {"get_local_size", 0};
1027}
1028static std::pair<StringRef, int64_t> getConfig(xevm::WorkgroupDimYOp) {
1029 return {"get_local_size", 1};
1030}
1031static std::pair<StringRef, int64_t> getConfig(xevm::WorkgroupDimZOp) {
1032 return {"get_local_size", 2};
1033}
1034static std::pair<StringRef, int64_t> getConfig(xevm::WorkgroupIdXOp) {
1035 return {"get_group_id", 0};
1036}
1037static std::pair<StringRef, int64_t> getConfig(xevm::WorkgroupIdYOp) {
1038 return {"get_group_id", 1};
1039}
1040static std::pair<StringRef, int64_t> getConfig(xevm::WorkgroupIdZOp) {
1041 return {"get_group_id", 2};
1042}
1043static std::pair<StringRef, int64_t> getConfig(xevm::GridDimXOp) {
1044 return {"get_num_groups", 0};
1045}
1046static std::pair<StringRef, int64_t> getConfig(xevm::GridDimYOp) {
1047 return {"get_num_groups", 1};
1048}
1049static std::pair<StringRef, int64_t> getConfig(xevm::GridDimZOp) {
1050 return {"get_num_groups", 2};
1051}
1052/// Replace `xevm.*` with an `llvm.call` to the corresponding OpenCL func with
1053/// a constant argument for the dimension - x, y or z.
1054template <typename OpType>
1055class LaunchConfigOpToOCLPattern : public OpConversionPattern<OpType> {
1056 using OpConversionPattern<OpType>::OpConversionPattern;
1057 LogicalResult
1058 matchAndRewrite(OpType op, typename OpType::Adaptor adaptor,
1059 ConversionPatternRewriter &rewriter) const override {
1060 Location loc = op->getLoc();
1061 auto [baseName, dim] = getConfig(op);
1062 Type dimTy = rewriter.getI32Type();
1063 Value dimVal = LLVM::ConstantOp::create(rewriter, loc, dimTy,
1064 static_cast<int64_t>(dim));
1065 std::string func = mangle(baseName, {dimTy}, {true});
1066 Type resTy = op.getType();
1067 auto call =
1068 createDeviceFunctionCall(rewriter, func, resTy, {dimTy}, {dimVal}, {},
1069 noUnwindWillReturnAttrs, op.getOperation());
1070 constexpr auto noModRef = LLVM::ModRefInfo::NoModRef;
1071 auto memAttr = rewriter.getAttr<LLVM::MemoryEffectsAttr>(
1072 /*other=*/noModRef,
1073 /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef,
1074 /*errnoMem=*/noModRef,
1075 /*targetMem0=*/noModRef,
1076 /*targetMem1=*/noModRef);
1077 call.setMemoryEffectsAttr(memAttr);
1078 rewriter.replaceOp(op, call);
1079 return success();
1080 }
1081};
1082
1083/*
1084// Subgroup ops
1085// get_sub_group_local_id
1086xevm::LaneIdOp;
1087// get_sub_group_id
1088xevm::SubgroupIdOp;
1089// get_sub_group_size
1090xevm::SubgroupSizeOp;
1091// get_num_sub_groups : to be added if needed
1092*/
1093
1094// Helpers to get the OpenCL function name for each op.
1095static StringRef getConfig(xevm::LaneIdOp) { return "get_sub_group_local_id"; }
1096static StringRef getConfig(xevm::SubgroupIdOp) { return "get_sub_group_id"; }
1097static StringRef getConfig(xevm::SubgroupSizeOp) {
1098 return "get_sub_group_size";
1099}
1100template <typename OpType>
1101class SubgroupOpWorkitemOpToOCLPattern : public OpConversionPattern<OpType> {
1102 using OpConversionPattern<OpType>::OpConversionPattern;
1103 LogicalResult
1104 matchAndRewrite(OpType op, typename OpType::Adaptor adaptor,
1105 ConversionPatternRewriter &rewriter) const override {
1106 std::string func = mangle(getConfig(op).str(), {});
1107 Type resTy = op.getType();
1108 auto call =
1109 createDeviceFunctionCall(rewriter, func, resTy, {}, {}, {},
1110 noUnwindWillReturnAttrs, op.getOperation());
1111 constexpr auto noModRef = LLVM::ModRefInfo::NoModRef;
1112 auto memAttr = rewriter.getAttr<LLVM::MemoryEffectsAttr>(
1113 /*other=*/noModRef,
1114 /*argMem=*/noModRef, /*inaccessibleMem=*/noModRef,
1115 /*errnoMem=*/noModRef,
1116 /*targetMem0=*/noModRef,
1117 /*targetMem1=*/noModRef);
1118 call.setMemoryEffectsAttr(memAttr);
1119 rewriter.replaceOp(op, call);
1120 return success();
1121 }
1122};
1123
1124class TruncfToOCLPattern : public OpConversionPattern<TruncfOp> {
1125 using OpConversionPattern::OpConversionPattern;
1126 LogicalResult
1127 matchAndRewrite(TruncfOp op, TruncfOp::Adaptor adaptor,
1128 ConversionPatternRewriter &rewriter) const override {
1129 // Supported source and result types are resticted for now.
1130 auto srcEtype = op.getSrcEtype().getEtype();
1131 auto dstEtype = op.getDstEtype().getEtype();
1132 // Currently only 16 input elements are supported as
1133 // - Any vector beyond 16 elements not a valid OpenCL vector.
1134 // - 2D block load can only load up to 16 16bit elements per lane.
1135 // Widest load is 8x16xi32 with 16 lanes, which is 16 16bit
1136 // elements per lane.
1137 // - mma_mx A and B operands need more than 16 elements per lane
1138 //
1139 // Conversion is done in batches depending on the dst type.
1140 // batch_size =
1141 // 16 if dst type == fp8
1142 // 8 if dst type == fp4
1143 // For num_elem > batch_size
1144 // convert batch of batch_size
1145 // cast batch to i32 elem type vector
1146 // concat batches by shufflevector
1147 // For num_elem = batch_size
1148 // use API for conversion
1149 // Scalar case is not supported until usage case become clear.
1150 auto vecSrcTy = dyn_cast<VectorType>(op.getSrc().getType());
1151 if (!vecSrcTy) {
1152 return rewriter.notifyMatchFailure(op, "Scalar src is not supported.");
1153 }
1154 if (vecSrcTy.getNumElements() != 16)
1155 return rewriter.notifyMatchFailure(
1156 op, "Only vector src of 16 elements is supported");
1157 auto vecDstTy = dyn_cast<VectorType>(op.getDst().getType());
1158 if (!vecDstTy)
1159 return rewriter.notifyMatchFailure(op, "Scalar dst is not supported.");
1160 Value src = op.getSrc();
1161 auto memAttr = rewriter.getAttr<LLVM::MemoryEffectsAttr>(
1162 /*other=*/LLVM::ModRefInfo::NoModRef,
1163 /*argMem=*/LLVM::ModRefInfo::NoModRef,
1164 /*inaccessibleMem=*/LLVM::ModRefInfo::NoModRef,
1165 /*errnoMem=*/LLVM::ModRefInfo::NoModRef,
1166 /*targetMem0=*/LLVM::ModRefInfo::NoModRef,
1167 /*targetMem1=*/LLVM::ModRefInfo::NoModRef);
1168 auto funcAttrs = convergentNoUnwindWillReturnAttrs;
1169 funcAttrs.memEffectsAttr = memAttr;
1170
1171 // Handle the case where dst type is fp4 first.
1172 if (dstEtype == TruncfDstElemTypes::E2M1) {
1173 // Convert 8 elements at a time.
1174 // To convert 8 elements, vector<8xf16>:
1175 // Use:
1176 // uint __builtin_IB_dnscl_hf16(uint, uint, 1, 0)
1177 // uint __builtin_IB_dnscl_hf16(uint, uint, 1, 3)
1178 // llvm.or
1179 Value cast = LLVM::BitcastOp::create(
1180 rewriter, op.getLoc(), VectorType::get(8, rewriter.getI32Type()),
1181 src);
1182
1183 std::string fnName = "__builtin_IB_dnscl_";
1184 fnName += (srcEtype == TruncfSrcElemTypes::F16) ? "hf16" : "bf16";
1185 auto genDnscl = [&](Value input, Value idx0, Value idx1, Value dstTy,
1186 Value mode) -> Value {
1187 Value arg1 =
1188 LLVM::ExtractElementOp::create(rewriter, op.getLoc(), input, idx0)
1189 ->getResult(0);
1190 Value arg2 =
1191 LLVM::ExtractElementOp::create(rewriter, op.getLoc(), input, idx1)
1192 ->getResult(0);
1193 SmallVector<Type> argTypes{arg1.getType(), arg2.getType(),
1194 dstTy.getType(), mode.getType()};
1195 SmallVector<Value> args{arg1, arg2, dstTy, mode};
1196 Value dnscl = createDeviceFunctionCall(
1197 rewriter, fnName, rewriter.getI32Type(), argTypes,
1198 args, {}, funcAttrs, op.getOperation())
1199 ->getResult(0);
1200 return dnscl;
1201 };
1202
1203 Value zero = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1204 rewriter.getI32Type(), 0);
1205 Value one = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1206 rewriter.getI32Type(), 1);
1207 Value two = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1208 rewriter.getI32Type(), 2);
1209 Value three = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1210 rewriter.getI32Type(), 3);
1211 Value even = genDnscl(cast, zero, two, one, zero);
1212 Value odd = genDnscl(cast, one, three, one, two);
1213 Value firstHalf = LLVM::OrOp::create(rewriter, op.getLoc(), even, odd);
1214 Value four = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1215 rewriter.getI32Type(), 4);
1216 Value five = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1217 rewriter.getI32Type(), 5);
1218 Value six = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1219 rewriter.getI32Type(), 6);
1220 Value seven = LLVM::ConstantOp::create(rewriter, op.getLoc(),
1221 rewriter.getI32Type(), 7);
1222 even = genDnscl(cast, four, six, one, zero);
1223 odd = genDnscl(cast, five, seven, one, two);
1224 Value secondHalf = LLVM::OrOp::create(rewriter, op.getLoc(), even, odd);
1225 // Create vector<2xi32> from two i32 values and then bitcast to
1226 // vector<8xi8> to match the dst type.
1227 Value combined = LLVM::UndefOp::create(
1228 rewriter, op.getLoc(), VectorType::get(2, rewriter.getI32Type()));
1229 combined = LLVM::InsertElementOp::create(rewriter, op.getLoc(), combined,
1230 firstHalf, zero)
1231 ->getResult(0);
1232 combined = LLVM::InsertElementOp::create(rewriter, op.getLoc(), combined,
1233 secondHalf, one)
1234 ->getResult(0);
1235 Value result =
1236 LLVM::BitcastOp::create(rewriter, op.getLoc(), vecDstTy, combined);
1237 rewriter.replaceOp(op, result);
1238 return success();
1239 }
1240
1241 // Handle the case where dst type is fp8.
1242 // BF16 type needs some preprocessing before conversion,
1243 // First extended to F32 and then truncated to F16.
1244 if (srcEtype == TruncfSrcElemTypes::BF16) {
1245 // Step 1: Extend to F32
1246 // Use float16 __builtin_IB_bftof_16(short16)
1247 src = LLVM::BitcastOp::create(
1248 rewriter, op.getLoc(),
1249 VectorType::get(vecSrcTy.getShape(), rewriter.getI16Type()), src);
1250 std::string fnName = "__builtin_IB_bftof_16";
1251 SmallVector<Type> argTypes{src.getType()};
1252 SmallVector<Value> args{src};
1253 Type resTy = VectorType::get(vecSrcTy.getShape(), rewriter.getF32Type());
1254 src = createDeviceFunctionCall(rewriter, fnName, resTy, argTypes, args,
1255 {}, funcAttrs, op.getOperation())
1256 ->getResult(0);
1257 // Step 2: Truncf to F16
1258 // Use half16 convert_half16(float16)
1259 std::string truncFnName = "convert_half16";
1260 SmallVector<Type> truncArgTypes{src.getType()};
1261 SmallVector<Value> truncArgs{src};
1262 truncFnName = mangle(truncFnName, truncArgTypes);
1263 resTy = VectorType::get(vecSrcTy.getShape(), rewriter.getF16Type());
1264 src =
1265 createDeviceFunctionCall(rewriter, truncFnName, resTy, truncArgTypes,
1266 truncArgs, {}, funcAttrs, op.getOperation())
1267 ->getResult(0);
1268 }
1269 if (dstEtype == TruncfDstElemTypes::BF8) { // Float8E5M2Type
1270 // Use char16 __builtin_IB_hftobf8_16(half16)
1271 std::string fnName = "__builtin_IB_hftobf8_16";
1272 SmallVector<Type> argTypes{src.getType()};
1273 SmallVector<Value> args{src};
1274 Value result =
1275 createDeviceFunctionCall(rewriter, fnName, vecDstTy, argTypes, args,
1276 {}, funcAttrs, op.getOperation())
1277 ->getResult(0);
1278
1279 rewriter.replaceOp(op, result);
1280 } else if (dstEtype == TruncfDstElemTypes::F8) { // Float8E4M3FNType
1281 // Use char16 __builtin_IB_hftohf8_16(half16)
1282 std::string fnName = "__builtin_IB_hftohf8_16";
1283 SmallVector<Type> argTypes{src.getType()};
1284 SmallVector<Value> args{src};
1285 Value result =
1286 createDeviceFunctionCall(rewriter, fnName, vecDstTy, argTypes, args,
1287 {}, funcAttrs, op.getOperation())
1288 ->getResult(0);
1289
1290 rewriter.replaceOp(op, result);
1291 } else {
1292 return rewriter.notifyMatchFailure(
1293 op, "Unsupported src, dst element type pair.");
1294 }
1295 return success();
1296 }
1297};
1298
1299class MMAMxToOCLPattern : public OpConversionPattern<MMAMxOp> {
1300 using OpConversionPattern::OpConversionPattern;
1301 LogicalResult
1302 matchAndRewrite(MMAMxOp op, MMAMxOp::Adaptor adaptor,
1303 ConversionPatternRewriter &rewriter) const override {
1304 if (!op.getC()) {
1305 return rewriter.notifyMatchFailure(op, "OCL requires C operand");
1306 }
1307 auto precisionC = op.getTypes().getC();
1308 auto precisionD = op.getTypes().getD();
1309 if (precisionC != precisionD) {
1310 return rewriter.notifyMatchFailure(op, "type of C and D need to match");
1311 }
1312
1313 constexpr uint32_t bitWidthPackedA{16};
1314 constexpr uint32_t bitWidthPackedB{32};
1315 auto loc = op.getLoc();
1316
1317 auto castIfNeeded = [&](Value val, Type packedType) -> Value {
1318 VectorType origTy = cast<VectorType>(val.getType());
1319 const uint32_t vecBitSize =
1320 origTy.getNumElements() *
1321 origTy.getElementType().getIntOrFloatBitWidth();
1322 VectorType newTy = VectorType::get(
1323 vecBitSize / packedType.getIntOrFloatBitWidth(), packedType);
1324 if (origTy != newTy)
1325 val = LLVM::BitcastOp::create(rewriter, loc, newTy, val);
1326 return val;
1327 };
1328
1329 Value a = op.getA();
1330 Type packedAType = (op.getTypes().getA() == xevm::ElemType::TF32)
1331 ? cast<Type>(rewriter.getF32Type())
1332 : rewriter.getIntegerType(bitWidthPackedA);
1333 a = castIfNeeded(a, packedAType);
1334
1335 Value b = op.getB();
1336 Type packedBType = (op.getTypes().getB() == xevm::ElemType::TF32)
1337 ? cast<Type>(rewriter.getF32Type())
1338 : rewriter.getIntegerType(bitWidthPackedB);
1339 b = castIfNeeded(b, packedBType);
1340
1341 Value c = op.getC();
1342 VectorType cOrigTy = cast<VectorType>(c.getType());
1343 VectorType resOrigTy = cast<VectorType>(op->getResultTypes()[0]);
1344 assert(cOrigTy == resOrigTy && "Accumulator and result type mismatch");
1345 // OCL builtins encode bfloat16 as int16
1346 VectorType cTy =
1347 cOrigTy.getElementType().isBF16()
1348 ? VectorType::get(cOrigTy.getShape(), rewriter.getIntegerType(16))
1349 : cOrigTy;
1350 VectorType resTy = cTy;
1351 if (cOrigTy != cTy)
1352 c = LLVM::BitcastOp::create(rewriter, loc, cTy, c);
1353
1354 std::string fnName =
1355 llvm::formatv("__builtin_IB_sub_group16_bdpas_{0}_{1}_{2}_{3}_8_8",
1356 builtinElemType(op.getTypes().getD()),
1357 builtinElemType(op.getTypes().getC()),
1358 builtinElemType(op.getTypes().getA()),
1359 builtinElemType(op.getTypes().getB()))
1360 .str();
1361 auto scaleA = op.getScaleA();
1362 auto scaleB = op.getScaleB();
1363 SmallVector<Type> argTypes{cTy, a.getType(), b.getType(), scaleA.getType(),
1364 scaleB.getType()};
1365 SmallVector<Value> args{c, a, b, scaleA, scaleB};
1366
1367 auto memAttr = rewriter.getAttr<LLVM::MemoryEffectsAttr>(
1368 /*other=*/LLVM::ModRefInfo::NoModRef,
1369 /*argMem=*/LLVM::ModRefInfo::NoModRef,
1370 /*inaccessibleMem=*/LLVM::ModRefInfo::NoModRef,
1371 /*errnoMem=*/LLVM::ModRefInfo::NoModRef,
1372 /*targetMem0=*/LLVM::ModRefInfo::NoModRef,
1373 /*targetMem1=*/LLVM::ModRefInfo::NoModRef);
1374 auto funcAttrs = convergentNoUnwindWillReturnAttrs;
1375 funcAttrs.memEffectsAttr = memAttr;
1376 Value result =
1377 createDeviceFunctionCall(rewriter, fnName, resTy, argTypes, args, {},
1378 funcAttrs, op.getOperation())
1379 ->getResult(0);
1380
1381 if (resOrigTy != resTy)
1382 result = LLVM::BitcastOp::create(rewriter, loc, resOrigTy, result);
1383
1384 rewriter.replaceOp(op, result);
1385 return success();
1386 }
1387};
1388
1389class AllocaToGlobalPattern : public OpConversionPattern<LLVM::AllocaOp> {
1390 using OpConversionPattern::OpConversionPattern;
1391 LogicalResult
1392 matchAndRewrite(LLVM::AllocaOp op, LLVM::AllocaOp::Adaptor adaptor,
1393 ConversionPatternRewriter &rewriter) const override {
1394 auto ptrType = cast<LLVM::LLVMPointerType>(op.getType());
1395 auto addrSpace = ptrType.getAddressSpace();
1396 if (addrSpace != 3)
1397 return failure();
1398 auto symTable = op->getParentWithTrait<OpTrait::SymbolTable>();
1399 if (!symTable)
1400 return failure();
1401 Block *moduleBody;
1402 if (ModuleOp mod = dyn_cast<ModuleOp>(*symTable)) {
1403 moduleBody = mod.getBody();
1404 } else if (gpu::GPUModuleOp gpuMod =
1405 dyn_cast<gpu::GPUModuleOp>(*symTable)) {
1406 moduleBody = gpuMod.getBody();
1407 } else {
1408 return failure();
1409 }
1410 auto val = op.getArraySize();
1411 APInt cst;
1412 if (!matchPattern(val, m_ConstantInt(&cst)))
1413 return failure();
1414 auto loc = op.getLoc();
1415 auto globalType = LLVM::LLVMArrayType::get(
1416 rewriter.getContext(), op.getElemType(), cst.getZExtValue());
1417 LLVM::GlobalOp globalVar;
1418 {
1419 OpBuilder::InsertionGuard guard(rewriter);
1420 rewriter.setInsertionPointToStart(moduleBody);
1421 auto alignment = op.getAlignment();
1422 globalVar = LLVM::GlobalOp::create(
1423 rewriter, loc, globalType, /*isConstant=*/false,
1424 /*linkage=*/LLVM::Linkage::Internal,
1425 /*name=*/std::string("__global_alloca_") +
1426 std::to_string(getNextGlobalIdx()),
1427 /*value=*/Attribute(),
1428 /*alignment=*/alignment ? *alignment : 0, /*addrSpace=*/addrSpace);
1429 }
1430 rewriter.replaceOpWithNewOp<LLVM::AddressOfOp>(op, globalVar);
1431 return success();
1432 }
1433
1434private:
1435 static unsigned getNextGlobalIdx() {
1436 static unsigned globalIdx = 0;
1437 return globalIdx++;
1438 }
1439};
1440
1441// Checks if shufflevector is used as a way to extract a contiguous slice
1442// from a vector.
1443// - source vector V1 and V2 are the same vector.
1444// - mask size is not greater than the source vector size
1445// - mask values represent a sequence of consecutive increasing numbers
1446// that stay in bounds of the source vector when used for indexing.
1447static bool isExtractingContiguousSlice(LLVM::ShuffleVectorOp op) {
1448 if (op.getV1() != op.getV2())
1449 return false;
1450 auto maskAttr = op.getMask();
1451 int64_t maskSize = static_cast<int64_t>(maskAttr.size());
1452 int64_t sourceSize = op.getV1().getType().getNumElements();
1453 if (maskSize > sourceSize)
1454 return false;
1455 int64_t firstIndex = maskAttr[0];
1456 for (int64_t i = 1; i < maskSize; ++i) {
1457 int64_t index = maskAttr[i];
1458 if (index != firstIndex + i)
1459 return false;
1460 if (index >= sourceSize)
1461 return false;
1462 }
1463 return true;
1464}
1465
1466// Input vector of a shuffle vector op extracting a contiguous slice is an
1467// illegal vector in SPIRV kernel if the vector size is > 16 elements.
1468// To legalize this case, keep applying the following transformations until no
1469// more match:
1470// 1. keep hoisting the shuffle vector op past unary element-wise operations
1471// start with fpext, fptrunc and bitcast for now.
1472// 2. merge with another shuffle vector op
1473// 3. merge with load as a smaller load
1474class HandleVectorExtractPattern
1475 : public OpRewritePattern<LLVM::ShuffleVectorOp> {
1476 using OpRewritePattern<LLVM::ShuffleVectorOp>::OpRewritePattern;
1477
1478 void initialize() { setHasBoundedRewriteRecursion(); }
1479
1480 LogicalResult matchAndRewrite(LLVM::ShuffleVectorOp op,
1481 PatternRewriter &rewriter) const override {
1482
1483 if (!isExtractingContiguousSlice(op))
1484 return failure();
1485
1486 auto mask = op.getMask();
1487 auto loc = op.getLoc();
1488 auto ty = op.getType();
1489 // Check source operand to determine rewrite pattern.
1490 auto src = op.getV1();
1491 // 1. Hoist past unary element-wise operations
1492 if (auto srcOp = src.getDefiningOp()) {
1493 if (isa<LLVM::FPExtOp>(srcOp) || isa<LLVM::FPTruncOp>(srcOp)) {
1494 Value srcInput = srcOp->getOperand(0);
1495 // Create new shuffle vector op with unary input as source.
1496 auto srcVecTy = dyn_cast<VectorType>(srcInput.getType());
1497 auto newShuffleVecTy =
1498 VectorType::get(mask.size(), srcVecTy.getElementType());
1499 auto newShuffle = LLVM::ShuffleVectorOp::create(
1500 rewriter, loc, newShuffleVecTy, srcInput, srcInput, mask);
1501 // Create new unary op with new shuffle as input.
1502 Value newUnaryOp;
1503 if (isa<LLVM::FPExtOp>(srcOp)) {
1504 newUnaryOp = LLVM::FPExtOp::create(rewriter, loc, ty, newShuffle);
1505 } else {
1506 newUnaryOp = LLVM::FPTruncOp::create(rewriter, loc, ty, newShuffle);
1507 }
1508 rewriter.replaceOp(op, newUnaryOp);
1509 } else if (isa<LLVM::BitcastOp>(srcOp)) {
1510 Value srcInput = srcOp->getOperand(0);
1511 // Create new shuffle vector op with unary input as source.
1512 auto srcInputVecTy = dyn_cast<VectorType>(srcInput.getType());
1513 auto srcInputSize = srcInputVecTy.getNumElements();
1514 auto srcResVecTy = dyn_cast<VectorType>(srcOp->getResult(0).getType());
1515 auto srcResSize = srcResVecTy.getNumElements();
1516 auto maskSize = static_cast<int32_t>(mask.size());
1517 if (srcInputSize > srcResSize) {
1518 return failure();
1519 }
1520 if (srcResSize % srcInputSize != 0) {
1521 return failure();
1522 }
1523 auto maskScale = srcResSize / srcInputSize;
1524 if (maskScale != 1) {
1525 if (mask[0] % maskScale != 0) {
1526 return failure();
1527 }
1528 // Create a new mask that maps to the source vector
1529 SmallVector<int32_t> newMask;
1530 int32_t newMaskSize = maskSize / maskScale;
1531 int32_t maskStart = mask[0] / maskScale;
1532 for (int32_t i = 0; i < newMaskSize; ++i) {
1533 newMask.push_back(maskStart + i);
1534 }
1535 mask = newMask;
1536 }
1537 auto newShuffleVecTy =
1538 VectorType::get(srcInputSize, srcInputVecTy.getElementType());
1539 auto newShuffle = LLVM::ShuffleVectorOp::create(
1540 rewriter, loc, newShuffleVecTy, srcInput, srcInput, mask);
1541 // Create new unary op with new shuffle as input.
1542 auto newBitcast =
1543 LLVM::BitcastOp::create(rewriter, loc, ty, newShuffle);
1544 rewriter.replaceOp(op, newBitcast);
1545 } else if (isa<LLVM::ShuffleVectorOp>(srcOp)) {
1546 // 2. Merge with source shuffle vector op if, the source op is
1547 // also extracting a contigous slice and create a new
1548 // shuffle vector op directly from the source of
1549 // the first shuffle.
1550 auto srcShuffle = cast<LLVM::ShuffleVectorOp>(srcOp);
1551 if (!isExtractingContiguousSlice(srcShuffle))
1552 return failure();
1553 auto srcMask = srcShuffle.getMask();
1554 SmallVector<int32_t> combinedMask;
1555 for (auto index : mask) {
1556 combinedMask.push_back(srcMask[index]);
1557 }
1558 auto newShuffle = LLVM::ShuffleVectorOp::create(
1559 rewriter, loc, ty, srcShuffle.getV1(), srcShuffle.getV1(),
1560 DenseI32ArrayAttr::get(rewriter.getContext(), combinedMask));
1561 rewriter.replaceOp(op, newShuffle);
1562 } else if (isa<LLVM::LoadOp>(srcOp)) {
1563 // 3. Merge with load as a smaller load
1564 auto loadOp = cast<LLVM::LoadOp>(srcOp);
1565 auto loadPtr = loadOp.getAddr();
1566 auto loadAddrSpace = loadPtr.getType().getAddressSpace();
1567 if (loadAddrSpace != 0)
1568 return failure();
1569 auto loadTy = dyn_cast<VectorType>(loadOp.getType());
1570 auto elemTy = loadTy.getElementType();
1571 auto firstIndex = mask[0];
1572 auto newVecTy = VectorType::get(mask.size(), elemTy);
1573 // GEPOp is needed if first index is not zero
1574 if (firstIndex) {
1575 auto newPtr = LLVM::GEPOp::create(
1576 rewriter, loc,
1577 LLVM::LLVMPointerType::get(rewriter.getContext(), loadAddrSpace),
1578 elemTy, loadPtr, ArrayRef<LLVM::GEPArg>{firstIndex});
1579 auto newLoad = LLVM::LoadOp::create(rewriter, loc, newVecTy, newPtr);
1580 rewriter.replaceOp(op, newLoad);
1581 } else {
1582 auto newLoad = LLVM::LoadOp::create(rewriter, loc, newVecTy, loadPtr);
1583 rewriter.replaceOp(op, newLoad);
1584 }
1585 } else {
1586 return failure();
1587 }
1588 } else {
1589 // No defining op (e.g. function argument): nothing to hoist/merge.
1590 return failure();
1591 }
1592 return success();
1593 }
1594};
1595
1596//===----------------------------------------------------------------------===//
1597// Pass Definition
1598//===----------------------------------------------------------------------===//
1599
1600struct ConvertXeVMToLLVMPass
1601 : public impl::ConvertXeVMToLLVMPassBase<ConvertXeVMToLLVMPass> {
1602 using Base::Base;
1603
1604 void getDependentDialects(DialectRegistry &registry) const override {
1605 registry.insert<LLVM::LLVMDialect, XeVMDialect>();
1606 }
1607
1608 void runOnOperation() override {
1609 ConversionTarget target(getContext());
1610 RewritePatternSet patterns(&getContext());
1612 if (failed(applyPartialConversion(getOperation(), target,
1613 std::move(patterns))))
1614 signalPassFailure();
1615
1616 // Apply in-dialect lowerings to handle illegal vectors
1617 {
1618 RewritePatternSet vectorPatterns(&getContext());
1619 vectorPatterns.add<HandleVectorExtractPattern>(&getContext());
1620 GreedyRewriteConfig config{};
1621 // folding can remove ops with temporary attributes used to
1622 // represent LLVM metadata, so disable it here.
1623 // Effectively just this single pattern is applied without any
1624 // op folding patterns from dialects.
1625 config.enableFolding(false);
1626 // config.setMaxIterations(GreedyRewriteConfig::kNoLimit);
1627 // config.setMaxNumRewrites(GreedyRewriteConfig::kNoLimit);
1628 (void)applyPatternsGreedily(getOperation(), std::move(vectorPatterns),
1629 config);
1630 }
1631 }
1632};
1633} // namespace
1634
1635//===----------------------------------------------------------------------===//
1636// Pattern Population
1637//===----------------------------------------------------------------------===//
1638
1639void ::mlir::populateXeVMToLLVMConversionPatterns(ConversionTarget &target,
1640 RewritePatternSet &patterns) {
1641 // some LLVM operations need to be converted.
1642 target.addDynamicallyLegalDialect<LLVM::LLVMDialect>([](Operation *op) {
1643 // llvm alloca op with addrspace 3 for OpenCL (Workgroup) is not handled
1644 // properly by SPIRV backend. It needs to be rewritten as a sequence with
1645 // llvm global.
1646 if (isa<LLVM::AllocaOp>(op)) {
1647 LLVM::AllocaOp aOp = cast<LLVM::AllocaOp>(op);
1648 LLVM::LLVMPointerType pTy = cast<LLVM::LLVMPointerType>(aOp.getType());
1649 auto addrSpace = pTy.getAddressSpace();
1650 return addrSpace != 3;
1651 }
1652 // cache_control attribute should be converted.
1653 return !op->hasAttr("cache_control");
1654 });
1655 target.addIllegalDialect<XeVMDialect>();
1656 patterns.add<LoadStorePrefetchToOCLPattern<BlockLoad2dOp>,
1657 LoadStorePrefetchToOCLPattern<BlockStore2dOp>,
1658 LoadStorePrefetchToOCLPattern<BlockPrefetch2dOp>,
1659 MMAToOCLPattern, MemfenceToOCLPattern, PrefetchToOCLPattern,
1660 LLVMLoadStoreToOCLPattern<LLVM::LoadOp>,
1661 LLVMLoadStoreToOCLPattern<LLVM::StoreOp>,
1662 BlockLoadStore1DToOCLPattern<BlockLoadOp>,
1663 BlockLoadStore1DToOCLPattern<BlockStoreOp>,
1664 LaunchConfigOpToOCLPattern<WorkitemIdXOp>,
1665 LaunchConfigOpToOCLPattern<WorkitemIdYOp>,
1666 LaunchConfigOpToOCLPattern<WorkitemIdZOp>,
1667 LaunchConfigOpToOCLPattern<WorkgroupDimXOp>,
1668 LaunchConfigOpToOCLPattern<WorkgroupDimYOp>,
1669 LaunchConfigOpToOCLPattern<WorkgroupDimZOp>,
1670 LaunchConfigOpToOCLPattern<WorkgroupIdXOp>,
1671 LaunchConfigOpToOCLPattern<WorkgroupIdYOp>,
1672 LaunchConfigOpToOCLPattern<WorkgroupIdZOp>,
1673 LaunchConfigOpToOCLPattern<GridDimXOp>,
1674 LaunchConfigOpToOCLPattern<GridDimYOp>,
1675 LaunchConfigOpToOCLPattern<GridDimZOp>,
1676 SubgroupOpWorkitemOpToOCLPattern<LaneIdOp>,
1677 SubgroupOpWorkitemOpToOCLPattern<SubgroupIdOp>,
1678 SubgroupOpWorkitemOpToOCLPattern<SubgroupSizeOp>,
1679 TruncfToOCLPattern, MMAMxToOCLPattern, AllocaToGlobalPattern>(
1680 patterns.getContext());
1681}
return success()
LogicalResult initialize(unsigned origNumLoops, ArrayRef< ReassociationIndices > foldedIterationDims)
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
ArrayAttr()
b getContext())
Attributes are known-constant values of operations.
Definition Attributes.h:25
MLIRContext * getContext() const
Definition Builders.h:56
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
RAII guard to reset the insertion point of the builder when destroyed.
Definition Builders.h:350
A trait used to provide symbol table functionalities to a region operation.
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
Region & getRegion(unsigned index)
Returns the region held by this operation at position 'index'.
Definition Operation.h:712
Operation * getParentWithTrait()
Returns the closest surrounding parent operation with trait Trait.
Definition Operation.h:274
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:241
Block & front()
Definition Region.h:65
MLIRContext * getContext() const
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
virtual void replaceOp(Operation *op, ValueRange newValues)
Replace the results of the given (original) operation with the specified list of values (replacements...
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition Value.cpp:18
static DenseArrayAttrImpl get(MLIRContext *context, ArrayRef< int32_t > content)
FailureOr< LLVM::LLVMFuncOp > lookupOrCreateFn(OpBuilder &b, Operation *moduleOp, StringRef name, ArrayRef< Type > paramTypes={}, Type resultType={}, bool isVarArg=false, bool isReserved=false, SymbolTableCollection *symbolTables=nullptr)
Create a FuncOp with signature resultType(paramTypes) and name name`.
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:717
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition Matchers.h:527
LogicalResult applyPatternsGreedily(Region &region, const FrozenRewritePatternSet &patterns, GreedyRewriteConfig config=GreedyRewriteConfig(), bool *changed=nullptr)
Rewrite ops in the given region, which must be isolated from above, by repeatedly applying the highes...
void populateXeVMToLLVMConversionPatterns(ConversionTarget &target, RewritePatternSet &patterns)
llvm::TypeSwitch< T, ResultT > TypeSwitch
Definition LLVM.h:139
OpRewritePattern is a wrapper around RewritePattern that allows for matching and rewriting against an...