MLIR 23.0.0git
NVVMDialect.cpp
Go to the documentation of this file.
1//===- NVVMDialect.cpp - NVVM IR Ops and Dialect registration -------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file defines the types and operation details for the NVVM IR dialect in
10// MLIR, and the LLVM IR dialect. It also registers the dialect.
11//
12// The NVVM dialect only contains GPU specific additions on top of the general
13// LLVM dialect.
14//
15//===----------------------------------------------------------------------===//
16
18
22#include "mlir/IR/Builders.h"
25#include "mlir/IR/Diagnostics.h"
27#include "mlir/IR/MLIRContext.h"
28#include "mlir/IR/Operation.h"
30#include "mlir/IR/Types.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/TypeSwitch.h"
33#include "llvm/IR/IRBuilder.h"
34#include "llvm/IR/NVVMIntrinsicUtils.h"
35#include "llvm/Support/Casting.h"
36#include "llvm/Support/FormatVariadic.h"
37#include "llvm/Support/NVPTXAddrSpace.h"
38#include "llvm/Support/raw_ostream.h"
39#include <cassert>
40#include <optional>
41#include <string>
42
43using namespace mlir;
44using namespace NVVM;
45
46#include "mlir/Dialect/LLVMIR/NVVMOpsDialect.cpp.inc"
47#include "mlir/Dialect/LLVMIR/NVVMOpsEnums.cpp.inc"
48
49static constexpr unsigned notIntrinsic = llvm::Intrinsic::not_intrinsic;
50
51//===----------------------------------------------------------------------===//
52// Helper/Utility methods
53//===----------------------------------------------------------------------===//
54
55static bool isPtrInAddrSpace(mlir::Value ptr, NVVMMemorySpace targetAS) {
56 auto ptrTy = llvm::cast<LLVM::LLVMPointerType>(ptr.getType());
57 return ptrTy.getAddressSpace() == static_cast<unsigned>(targetAS);
58}
59
61 return isPtrInAddrSpace(ptr, NVVMMemorySpace::Generic);
62}
63
65 return isPtrInAddrSpace(ptr, NVVMMemorySpace::Shared);
66}
67
69 return isPtrInAddrSpace(ptr, NVVMMemorySpace::SharedCluster);
70}
71
72static llvm::Value *castPtrToAddrSpace(llvm::IRBuilderBase &builder,
73 llvm::Value *ptr,
74 NVVMMemorySpace targetAS) {
75 unsigned AS = static_cast<unsigned>(targetAS);
76 return builder.CreateAddrSpaceCast(
77 ptr, llvm::PointerType::get(builder.getContext(), AS));
78}
79
80// Helper method to convert CtaGroupKind in NVVM Dialect to CtaGroupKind in LLVM
81static llvm::nvvm::CTAGroupKind
82getNVVMCtaGroupKind(NVVM::CTAGroupKind ctaGroup) {
83 switch (ctaGroup) {
84 case NVVM::CTAGroupKind::CTA_1:
85 return llvm::nvvm::CTAGroupKind::CG_1;
86 case NVVM::CTAGroupKind::CTA_2:
87 return llvm::nvvm::CTAGroupKind::CG_2;
88 }
89 llvm_unreachable("unsupported cta_group value");
90}
91
92//===----------------------------------------------------------------------===//
93// Verifier methods
94//===----------------------------------------------------------------------===//
95
96// This verifier is shared among the following Ops:
97// CpAsyncBulkTensorSharedCTAToGlobalOp (TMA Store)
98// CpAsyncBulkTensorReduceOp (TMA Store-Reduce)
99static LogicalResult cpAsyncBulkTensorCommonVerifier(size_t tensorDims,
100 bool isIm2Col,
101 size_t numIm2ColOffsets,
102 Location loc) {
103 if (tensorDims < 1 || tensorDims > 5)
104 return emitError(loc, "expects coordinates between 1 to 5 dimension");
105
106 // For Im2Col mode, there are two constraints:
107 if (isIm2Col) {
108 // 1. Tensor must always be at least 3-d.
109 if (tensorDims < 3)
110 return emitError(
111 loc,
112 "to use im2col mode, the tensor has to be at least 3-dimensional");
113 // 2. When there are Im2ColOffsets, they must be (Dims - 2) in number.
114 if (numIm2ColOffsets && (tensorDims != (numIm2ColOffsets + 2)))
115 return emitError(
116 loc, "im2col offsets must be 2 less than number of coordinates");
117 }
118 return success();
119}
120
121LogicalResult CpAsyncBulkTensorSharedCTAToGlobalOp::verify() {
122 TMAStoreMode mode = getMode();
123 // We lower through inline-ptx when getPredicate() is true.
124 // a) Only TILE mode is supported
125 // b) Cache-hint is not supported
126 if (getPredicate()) {
127 if (mode != TMAStoreMode::TILE)
128 return emitError("Inline-ptx lowering supported only for Tile mode.");
129 if (getL2CacheHint())
130 return emitError("Inline-ptx lowering unsupported with L2 cache-hint.");
131 }
132
133 size_t dims = getCoordinates().size();
134 switch (mode) {
135 case TMAStoreMode::TILE:
136 return cpAsyncBulkTensorCommonVerifier(dims, false, 0, getLoc());
137 case TMAStoreMode::IM2COL:
138 return cpAsyncBulkTensorCommonVerifier(dims, true, 0, getLoc());
139 case TMAStoreMode::TILE_SCATTER4:
140 if (dims != 5)
141 return emitError("Scatter4 mode expects 5 coordinates");
142 }
143 return success();
144}
145
146LogicalResult CpAsyncOp::verify() {
147 if (getModifier() != LoadCacheModifierKind::CG &&
148 getModifier() != LoadCacheModifierKind::CA)
149 return emitError("Only CG and CA cache modifiers are supported.");
150 if (getSize() != 4 && getSize() != 8 && getSize() != 16)
151 return emitError("expected byte size to be either 4, 8 or 16.");
152 if (getModifier() == LoadCacheModifierKind::CG && getSize() != 16)
153 return emitError("CG cache modifier is only support for 16 bytes copy.");
154 return success();
155}
156
157// This verify params can be shared across TMA Load and Prefetch Ops.
158static LogicalResult verifyTMALoadParams(size_t tensorDims, size_t numIm2colOff,
159 TMALoadMode mode, Location loc) {
160 if (tensorDims < 1 || tensorDims > 5)
161 return emitError(loc, "expects coordinates between 1 to 5 dimension");
162
163 auto checkTMALoadParams = [&](TMALoadMode mode, bool isIm2col,
164 size_t expectedIm2colOff) -> LogicalResult {
165 if (isIm2col && (tensorDims < 3))
166 return emitError(loc)
167 << "to use " << mode
168 << " mode, the tensor has to be at least 3-dimensional";
169
170 if (numIm2colOff != expectedIm2colOff)
171 return emitError(loc) << " im2col offsets expected " << expectedIm2colOff
172 << " (provided " << numIm2colOff << ")";
173
174 return success();
175 };
176
177 switch (mode) {
178 case TMALoadMode::TILE:
179 return checkTMALoadParams(mode, false, 0);
180 case TMALoadMode::IM2COL:
181 return checkTMALoadParams(mode, true, tensorDims - 2);
182 case TMALoadMode::IM2COL_W:
183 case TMALoadMode::IM2COL_W_128:
184 return checkTMALoadParams(mode, true, 2);
185 case TMALoadMode::TILE_GATHER4:
186 return (tensorDims == 5)
187 ? checkTMALoadParams(mode, false, 0)
188 : emitError(loc, "Gather4 mode expects 5 coordinates");
189 }
190 return success();
191}
192
193LogicalResult CpAsyncBulkTensorPrefetchOp::verify() {
194 return verifyTMALoadParams(getCoordinates().size(), getIm2colOffsets().size(),
195 getMode(), getLoc());
196}
197
198LogicalResult CpAsyncBulkTensorGlobalToSharedClusterOp::verify() {
199 TMALoadMode mode = getMode();
200 bool isCTAOnly = getIsCTAOnly();
201 if (getPredicate()) { // Inline-asm based lowering
202 if (isCTAOnly)
203 return emitError("Predicate is supported only for shared::cluster mode.");
204 if (mode != TMALoadMode::TILE && mode != TMALoadMode::IM2COL)
205 return emitError(
206 "Predicate is supported only for Tile and Im2col modes.");
207 } else { // Intrinsics-based lowering
208 NVVMMemorySpace expectedAS =
209 isCTAOnly ? NVVMMemorySpace::Shared : NVVMMemorySpace::SharedCluster;
210 unsigned AS = llvm::cast<LLVM::LLVMPointerType>(getDstMem().getType())
211 .getAddressSpace();
212 if (AS != expectedAS)
213 return emitError()
214 << (isCTAOnly
215 ? "Shared::cta destination requires address-space 3."
216 : "Shared::cluster destination requires address-space 7.");
217 // Checks specific to shared::cta mode
218 if (isCTAOnly) {
219 if (getMulticastMask())
220 return emitError("Multicast is not supported with shared::cta mode.");
221 if (getGroup())
222 return emitError("CTAGroup is not supported with shared::cta mode.");
223 }
224 }
225
226 return verifyTMALoadParams(getCoordinates().size(), getIm2colOffsets().size(),
227 getMode(), getLoc());
228}
229
230LogicalResult CpAsyncBulkTensorReduceOp::verify() {
231 TMAStoreMode mode = getMode();
232 size_t dims = getCoordinates().size();
233 switch (mode) {
234 case TMAStoreMode::TILE:
235 return cpAsyncBulkTensorCommonVerifier(dims, false, 0, getLoc());
236 case TMAStoreMode::IM2COL:
237 return cpAsyncBulkTensorCommonVerifier(dims, true, 0, getLoc());
238 case TMAStoreMode::TILE_SCATTER4:
239 return emitError("Scatter mode unsupported for CpAsyncBulkTensorReduceOp");
240 }
241 return success();
242}
243
244LogicalResult CpAsyncBulkGlobalToSharedClusterOp::verify() {
245 bool isSharedCTA = isPtrInSharedCTASpace(getDstMem());
246 if (isSharedCTA && getMulticastMask())
247 return emitError("Multicast is not supported with shared::cta mode.");
248
249 return success();
250}
251
252static LogicalResult verifyMBarrierArriveLikeOp(Operation *op, Value addr,
253 NVVM::MemScopeKind scope,
254 Value retVal = nullptr) {
255 if (scope != NVVM::MemScopeKind::CTA && scope != NVVM::MemScopeKind::CLUSTER)
256 return op->emitError("mbarrier scope must be either CTA or Cluster");
257
258 bool isSharedCluster = isPtrInSharedClusterSpace(addr);
259 bool hasRetValue = static_cast<bool>(retVal);
260 if (isSharedCluster && hasRetValue)
261 return op->emitError(
262 "mbarrier in shared_cluster space cannot return any value");
263
264 return success();
265}
266
267LogicalResult MBarrierArriveOp::verify() {
268 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope(),
269 getRes());
270}
271
272LogicalResult MBarrierArriveDropOp::verify() {
273 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope(),
274 getRes());
275}
276
277LogicalResult MBarrierArriveExpectTxOp::verify() {
278 // The inline-ptx version of this Op does not support all features.
279 // With predicate, this Op lowers to inline-ptx. So, verify and
280 // error-out if there are unsupported features.
281 if (getPredicate()) {
282 if (getScope() != NVVM::MemScopeKind::CTA)
283 return emitError("mbarrier scope must be CTA when using predicate");
284
285 if (isPtrInSharedClusterSpace(getAddr()))
286 return emitError("mbarrier in shared_cluster space is not supported when "
287 "using predicate");
288
289 if (getRes())
290 return emitError("return-value is not supported when using predicate");
291
292 if (getRelaxed() == true)
293 return emitError("mbarrier with relaxed semantics is not supported when "
294 "using predicate");
295 }
296 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope(),
297 getRes());
298}
299
300LogicalResult MBarrierArriveDropExpectTxOp::verify() {
301 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope(),
302 getRes());
303}
304
305//===----------------------------------------------------------------------===//
306// inferReturnTypes for mbarrier arrive-like ops
307//===----------------------------------------------------------------------===//
308
309/// Only shared_cluster (ptr<7>) produces zero results; all other address
310/// spaces (including generic) return i64.
311static LogicalResult
313 SmallVectorImpl<Type> &inferredReturnTypes) {
314 if (!isPtrInSharedClusterSpace(addr))
315 inferredReturnTypes.push_back(IntegerType::get(context, 64));
316 return success();
317}
318
319LogicalResult
320MBarrierArriveOp::inferReturnTypes(MLIRContext *context,
321 std::optional<Location> location,
322 MBarrierArriveOp::Adaptor adaptor,
323 SmallVectorImpl<Type> &inferredReturnTypes) {
324 return inferMBarrierArriveResultTypes(context, adaptor.getAddr(),
325 inferredReturnTypes);
326}
327
328LogicalResult MBarrierArriveDropOp::inferReturnTypes(
329 MLIRContext *context, std::optional<Location> location,
330 MBarrierArriveDropOp::Adaptor adaptor,
331 SmallVectorImpl<Type> &inferredReturnTypes) {
332 return inferMBarrierArriveResultTypes(context, adaptor.getAddr(),
333 inferredReturnTypes);
334}
335
336LogicalResult MBarrierArriveExpectTxOp::inferReturnTypes(
337 MLIRContext *context, std::optional<Location> location,
338 MBarrierArriveExpectTxOp::Adaptor adaptor,
339 SmallVectorImpl<Type> &inferredReturnTypes) {
340 // Predicate forces no return value (inline PTX path).
341 // Note: predicate + shared_cluster is rejected by the verifier separately.
342 if (adaptor.getPredicate())
343 return success();
344 return inferMBarrierArriveResultTypes(context, adaptor.getAddr(),
345 inferredReturnTypes);
346}
347
348LogicalResult MBarrierArriveDropExpectTxOp::inferReturnTypes(
349 MLIRContext *context, std::optional<Location> location,
350 MBarrierArriveDropExpectTxOp::Adaptor adaptor,
351 SmallVectorImpl<Type> &inferredReturnTypes) {
352 return inferMBarrierArriveResultTypes(context, adaptor.getAddr(),
353 inferredReturnTypes);
354}
355
356/// For ops with optional results, allow the user to omit the result even when
357/// inference would produce one. This preserves backward compatibility: the
358/// result can be silently discarded (e.g., for fire-and-forget arrive ops).
360 TypeRange actual) {
361 if (actual.empty())
362 return true;
363 return inferred == actual;
364}
365
366bool MBarrierArriveOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
368}
369bool MBarrierArriveDropOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
371}
372bool MBarrierArriveExpectTxOp::isCompatibleReturnTypes(TypeRange l,
373 TypeRange r) {
375}
376bool MBarrierArriveDropExpectTxOp::isCompatibleReturnTypes(TypeRange l,
377 TypeRange r) {
379}
380
381LogicalResult MBarrierExpectTxOp::verify() {
382 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope());
383}
384
385LogicalResult MBarrierCompleteTxOp::verify() {
386 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope());
387}
388
389LogicalResult MBarrierTestWaitOp::verify() {
390 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope());
391}
392
393LogicalResult MBarrierTryWaitOp::verify() {
394 return verifyMBarrierArriveLikeOp(getOperation(), getAddr(), getScope());
395}
396
397LogicalResult ConvertFloatToTF32Op::verify() {
398 using RndMode = NVVM::FPRoundingMode;
399 switch (getRnd()) {
400 case RndMode::RNA:
401 if (getRelu())
402 return emitError("Relu not supported with rna rounding mode.");
403 break;
404 case RndMode::RN:
405 case RndMode::RZ:
406 break;
407 default:
408 return emitError(
409 "Only {rn,rz,rna} rounding modes supported for ConvertFloatToTF32Op.");
410 }
411 return success();
412}
413
414LogicalResult ConvertF32x2ToF6x2Op::verify() {
416
417 if (!llvm::isa<mlir::Float6E2M3FNType, mlir::Float6E3M2FNType>(getDstTy())) {
418 return emitOpError("Only ")
419 << mlir::Float6E2M3FNType::get(ctx) << " and "
420 << mlir::Float6E3M2FNType::get(ctx)
421 << " types are supported for conversions from f32x2 to f6x2.";
422 }
423 return success();
424}
425
426LogicalResult ConvertF32x2ToF8x2Op::verify() {
427 using RndMode = NVVM::FPRoundingMode;
428 using SatMode = NVVM::SaturationMode;
429
430 bool isRoundingModeRN = getRnd() == RndMode::RN;
431 bool isRoundingModeRZ = getRnd() == RndMode::RZ;
432 bool isRoundingModeRP = getRnd() == RndMode::RP;
433 bool isSatFinite = getSat() == SatMode::SATFINITE;
434
435 bool hasRelu = getRelu();
436
438
440 .Case<mlir::Float8E4M3FNType, mlir::Float8E5M2Type>(
441 [&](mlir::Type) -> LogicalResult {
442 if (!isRoundingModeRN) {
443 return emitOpError("Only RN rounding mode is supported for "
444 "conversions from f32x2 to ")
445 << mlir::Float8E4M3FNType::get(ctx) << " and "
446 << mlir::Float8E5M2Type::get(ctx) << " types";
447 }
448 if (!isSatFinite) {
449 return emitOpError("Only SATFINITE saturation mode is supported "
450 "for conversions "
451 "from f32x2 to ")
452 << mlir::Float8E4M3FNType::get(ctx) << " and "
453 << mlir::Float8E5M2Type::get(ctx) << " types";
454 }
455 return success();
456 })
457 .Case<mlir::Float8E8M0FNUType>([&](mlir::Type) -> LogicalResult {
458 if (!(isRoundingModeRZ || isRoundingModeRP)) {
459 return emitOpError("Only RZ and RP rounding modes are supported for "
460 "conversions from f32x2 to ")
461 << mlir::Float8E8M0FNUType::get(ctx) << " type";
462 }
463 if (hasRelu) {
464 return emitOpError("relu not supported for conversions to ")
465 << mlir::Float8E8M0FNUType::get(ctx) << " type";
466 }
467 return success();
468 })
469 .Default([&](mlir::Type) {
470 return emitOpError("Only ")
471 << mlir::Float8E4M3FNType::get(ctx) << ", "
472 << mlir::Float8E5M2Type::get(ctx) << ", and "
473 << mlir::Float8E8M0FNUType::get(ctx)
474 << " types are "
475 "supported for conversions from f32x2 to f8x2";
476 });
477}
478
479LogicalResult ConvertF16x2ToF8x2Op::verify() {
481
482 if (!llvm::isa<mlir::Float8E4M3FNType, mlir::Float8E5M2Type>(getDstTy())) {
483 return emitOpError("Only ")
484 << mlir::Float8E4M3FNType::get(ctx) << " and "
485 << mlir::Float8E5M2Type::get(ctx)
486 << " types are supported for conversions from f16x2 to f8x2.";
487 }
488 return success();
489}
490
491LogicalResult ConvertBF16x2ToF8x2Op::verify() {
492 using RndMode = NVVM::FPRoundingMode;
493 using SatMode = NVVM::SaturationMode;
494
495 bool isRoundingModeRN = getRnd() == RndMode::RN;
496 bool isRoundingModeRZ = getRnd() == RndMode::RZ;
497 bool isRoundingModeRP = getRnd() == RndMode::RP;
498 bool isSatFinite = getSat() == SatMode::SATFINITE;
499 bool hasRelu = getRelu();
500
502
504 .Case<mlir::Float8E4M3FNType, mlir::Float8E5M2Type>(
505 [&](mlir::Type) -> LogicalResult {
506 if (!isRoundingModeRN)
507 return emitOpError("Only RN rounding mode is supported for "
508 "conversions from bf16x2 to ")
509 << mlir::Float8E4M3FNType::get(ctx) << " and "
510 << mlir::Float8E5M2Type::get(ctx) << " types";
511 if (!isSatFinite)
512 return emitOpError("Only SATFINITE saturation mode is supported "
513 "for conversions from bf16x2 to ")
514 << mlir::Float8E4M3FNType::get(ctx) << " and "
515 << mlir::Float8E5M2Type::get(ctx) << " types";
516 return success();
517 })
518 .Case<mlir::Float8E8M0FNUType>([&](mlir::Type) -> LogicalResult {
519 if (!(isRoundingModeRZ || isRoundingModeRP))
520 return emitOpError("Only RZ and RP rounding modes are supported for "
521 "conversions from bf16x2 to ")
522 << mlir::Float8E8M0FNUType::get(ctx) << " type";
523 if (hasRelu)
524 return emitOpError("relu not supported for conversions to ")
525 << mlir::Float8E8M0FNUType::get(ctx) << " type";
526 return success();
527 })
528 .Default([&](mlir::Type) -> LogicalResult {
529 llvm_unreachable("Invalid conversion in ConvertBF16x2ToF8x2Op");
530 return failure();
531 });
532}
533
534LogicalResult ConvertF32x2ToF4x2Op::verify() {
536
537 if (!llvm::isa<mlir::Float4E2M1FNType>(getDstTy()))
538 return emitOpError("Only ")
539 << mlir::Float4E2M1FNType::get(ctx)
540 << " type is supported for conversions from f32x2 to f4x2.";
541
542 return success();
543}
544
545LogicalResult ConvertF8x2ToF16x2Op::verify() {
547
548 if (!llvm::isa<Float8E4M3FNType, Float8E5M2Type>(getSrcType()))
549 return emitOpError("Only ")
550 << mlir::Float8E4M3FNType::get(ctx) << " and "
551 << mlir::Float8E5M2Type::get(ctx)
552 << " types are supported for conversions from f8x2 to f16x2.";
553
554 return success();
555}
556
557LogicalResult ConvertF8x2ToBF16x2Op::verify() {
559 if (!llvm::isa<Float8E8M0FNUType>(getSrcType()))
560 return emitOpError("Only ")
561 << mlir::Float8E8M0FNUType::get(ctx)
562 << " type is supported for conversions from f8x2 to bf16x2.";
563
564 return success();
565}
566
567LogicalResult ConvertF6x2ToF16x2Op::verify() {
569
570 if (!llvm::isa<Float6E2M3FNType, Float6E3M2FNType>(getSrcType()))
571 return emitOpError("Only ")
572 << mlir::Float6E2M3FNType::get(ctx) << " and "
573 << mlir::Float6E3M2FNType::get(ctx)
574 << " types are supported for conversions from f6x2 to f16x2.";
575
576 return success();
577}
578
579LogicalResult ConvertF4x2ToF16x2Op::verify() {
581
582 if (!llvm::isa<Float4E2M1FNType>(getSrcType()))
583 return emitOpError("Only ")
584 << mlir::Float4E2M1FNType::get(ctx)
585 << " type is supported for conversions from f4x2 to f16x2.";
586
587 return success();
588}
589
590LogicalResult PermuteOp::verify() {
591 using Mode = NVVM::PermuteMode;
592 bool hasHi = static_cast<bool>(getHi());
593
594 switch (getMode()) {
595 case Mode::DEFAULT:
596 case Mode::F4E:
597 case Mode::B4E:
598 if (!hasHi)
599 return emitError("mode '") << getMode() << "' requires 'hi' operand.";
600 break;
601 case Mode::RC8:
602 case Mode::ECL:
603 case Mode::ECR:
604 case Mode::RC16:
605 if (hasHi)
606 return emitError("mode '")
607 << getMode() << "' does not accept 'hi' operand.";
608 break;
609 }
610
611 return success();
612}
613
614//===----------------------------------------------------------------------===//
615// Stochastic Rounding Conversion Ops
616//===----------------------------------------------------------------------===//
617
618static LogicalResult verifyConvertF32x2ToFP16x2Op(Twine dstType,
619 FPRoundingMode rnd,
620 bool hasRandomBits,
621 Operation *op) {
622 static constexpr FPRoundingMode validRndModes[] = {
623 FPRoundingMode::RN, FPRoundingMode::RZ, FPRoundingMode::RS};
624
625 if (!llvm::is_contained(validRndModes, rnd)) {
626 return op->emitOpError(
627 "Only RN, RZ, and RS rounding modes are supported for "
628 "conversions from f32x2 to ")
629 << dstType << ".";
630 }
631
632 if (rnd == FPRoundingMode::RS) {
633 if (!hasRandomBits) {
634 return op->emitOpError("random_bits is required for RS rounding mode.");
635 }
636 } else {
637 if (hasRandomBits) {
638 return op->emitOpError(
639 "random_bits not supported for RN and RZ rounding modes.");
640 }
641 }
642
643 return success();
644}
645
646LogicalResult ConvertF32x2ToF16x2Op::verify() {
647 return verifyConvertF32x2ToFP16x2Op("f16x2", getRnd(),
648 getRandomBits() ? true : false, *this);
649}
650
651LogicalResult ConvertF32x2ToBF16x2Op::verify() {
652 return verifyConvertF32x2ToFP16x2Op("bf16x2", getRnd(),
653 getRandomBits() ? true : false, *this);
654}
655
656LogicalResult ConvertF32x4ToF8x4Op::verify() {
658
659 if (!llvm::isa<mlir::Float8E4M3FNType, mlir::Float8E5M2Type>(getDstTy()))
660 return emitOpError("Only ")
661 << mlir::Float8E4M3FNType::get(ctx) << " and "
662 << mlir::Float8E5M2Type::get(ctx)
663 << " types are supported for conversions from f32x4 to f8x4.";
664
665 return success();
666}
667
668LogicalResult ConvertF32x4ToF6x4Op::verify() {
670
671 if (!llvm::isa<mlir::Float6E2M3FNType, mlir::Float6E3M2FNType>(getDstTy()))
672 return emitOpError("Only ")
673 << mlir::Float6E2M3FNType::get(ctx) << " and "
674 << mlir::Float6E3M2FNType::get(ctx)
675 << " types are supported for conversions from f32x4 to f6x4.";
676
677 return success();
678}
679
680LogicalResult ConvertF32x4ToF4x4Op::verify() {
682
683 if (!llvm::isa<mlir::Float4E2M1FNType>(getDstTy()))
684 return emitOpError("Only ") << mlir::Float4E2M1FNType::get(ctx)
685 << " type is supported for conversions from "
686 "f32x4 to f4x4.";
687
688 return success();
689}
690
691LogicalResult BulkStoreOp::verify() {
692 if (getInitVal() != 0)
693 return emitOpError("only 0 is supported for initVal, got ") << getInitVal();
694 return success();
695}
696
697LogicalResult PMEventOp::verify() {
698 auto eventId = getEventId();
699 auto maskedEventId = getMaskedEventId();
700 if (!maskedEventId && !eventId) {
701 return emitOpError() << "either `id` or `mask` must be set";
702 }
703
704 if (maskedEventId && eventId) {
705 return emitOpError() << "`id` and `mask` cannot be set at the same time";
706 }
707
708 if (eventId) {
709 if (eventId < 0 || eventId > 15) {
710 return emitOpError() << "`id` must be between 0 and 15";
711 }
712 }
713
714 return llvm::success();
715}
716
717// Given the element type of an operand and whether or not it is an accumulator,
718// this function returns the PTX type (`NVVM::MMATypes`) that corresponds to the
719// operand's element type.
720std::optional<mlir::NVVM::MMATypes>
721MmaOp::inferOperandMMAType(Type operandElType, bool isAccumulator) {
722 auto half2Type =
723 VectorType::get(2, Float16Type::get(operandElType.getContext()));
724 if (operandElType.isF64())
725 return NVVM::MMATypes::f64;
726 if (operandElType.isF16() || operandElType == half2Type)
727 return NVVM::MMATypes::f16;
728 if (operandElType.isF32() && isAccumulator)
729 return NVVM::MMATypes::f32;
730 if (operandElType.isF32() && !isAccumulator)
731 return NVVM::MMATypes::tf32;
732 if (llvm::isa<IntegerType>(operandElType)) {
733 if (isAccumulator)
734 return NVVM::MMATypes::s32;
735 return std::nullopt;
736 }
737
738 if (auto structType = llvm::dyn_cast<LLVM::LLVMStructType>(operandElType)) {
739 if (structType.getBody().empty())
740 return std::nullopt;
741 return inferOperandMMAType(structType.getBody()[0], isAccumulator);
742 }
743
744 return std::nullopt;
745}
746
747static bool isInt4PtxType(MMATypes type) {
748 return (type == MMATypes::u4 || type == MMATypes::s4);
749}
750
751static bool isInt8PtxType(MMATypes type) {
752 return (type == MMATypes::u8 || type == MMATypes::s8);
753}
754
755static bool isIntegerPtxType(MMATypes type) {
756 return isInt4PtxType(type) || isInt8PtxType(type) || type == MMATypes::b1 ||
757 type == MMATypes::s32;
758}
759
760MMATypes MmaOp::accumPtxType() {
761 std::optional<mlir::NVVM::MMATypes> val = inferOperandMMAType(
762 getODSOperands(2).getTypes().front(), /*isAccumulator=*/true);
763 assert(val.has_value() && "accumulator PTX type should always be inferrable");
764 return val.value();
765}
766
767MMATypes MmaOp::resultPtxType() {
768 std::optional<mlir::NVVM::MMATypes> val =
769 inferOperandMMAType(getResult().getType(), /*isAccumulator=*/true);
770 assert(val.has_value() && "result PTX type should always be inferrable");
771 return val.value();
772}
773
774void MmaOp::print(OpAsmPrinter &p) {
775 SmallVector<Type, 4> regTypes;
776 struct MMAOperandFragment {
777 StringRef operandName;
778 StringRef ptxTypeAttr;
779 SmallVector<Value, 4> regs;
780 explicit MMAOperandFragment(StringRef name, StringRef ptxTypeName)
781 : operandName(name), ptxTypeAttr(ptxTypeName) {}
782 };
783
784 std::array<MMAOperandFragment, 3> frags{
785 MMAOperandFragment("A", getMultiplicandAPtxTypeAttrName()),
786 MMAOperandFragment("B", getMultiplicandBPtxTypeAttrName()),
787 MMAOperandFragment("C", "")};
788 SmallVector<StringRef, 4> ignoreAttrNames{
789 mlir::NVVM::MmaOp::getOperandSegmentSizeAttr()};
790
791 for (unsigned fragIdx = 0; fragIdx < frags.size(); fragIdx++) {
792 auto &frag = frags[fragIdx];
793 auto varOperandSpec = getODSOperandIndexAndLength(fragIdx);
794 for (auto operandIdx = varOperandSpec.first;
795 operandIdx < varOperandSpec.first + varOperandSpec.second;
796 operandIdx++) {
797 frag.regs.push_back(this->getOperand(operandIdx));
798 if (operandIdx == 0) {
799 regTypes.push_back(this->getOperand(operandIdx).getType());
800 }
801 }
802 std::optional<MMATypes> inferredType = MmaOp::inferOperandMMAType(
803 regTypes.back(), /*isAccumulator=*/fragIdx >= 2);
804 if (inferredType)
805 ignoreAttrNames.push_back(frag.ptxTypeAttr);
806 }
807
808 auto printMmaOperand = [&](const MMAOperandFragment &frag) -> void {
809 p << " " << frag.operandName;
810 p << "[";
811 p.printOperands(frag.regs);
812 p << "] ";
813 };
814
815 for (const auto &frag : frags) {
816 printMmaOperand(frag);
817 }
818
819 p.printOptionalAttrDict(this->getOperation()->getAttrs(), ignoreAttrNames);
820
821 // Print the types of the operands and result.
822 p << " : "
823 << "(";
824 llvm::interleaveComma(SmallVector<Type, 3>{frags[0].regs[0].getType(),
825 frags[1].regs[0].getType(),
826 frags[2].regs[0].getType()},
827 p);
828 p << ")";
829 p.printArrowTypeList(TypeRange{this->getRes().getType()});
830}
831
832void MmaOp::build(OpBuilder &builder, OperationState &result, Type resultType,
833 ValueRange operandA, ValueRange operandB, ValueRange operandC,
834 ArrayRef<int64_t> shape, std::optional<MMAB1Op> b1Op,
835 std::optional<MMAIntOverflow> intOverflow,
836 std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes,
837 std::optional<std::array<MMALayout, 2>> multiplicandLayouts) {
838
839 assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
840 MLIRContext *ctx = builder.getContext();
841 result.addAttribute(
842 "shape", builder.getAttr<MMAShapeAttr>(shape[0], shape[1], shape[2]));
843
844 result.addOperands(operandA);
845 result.addOperands(operandB);
846 result.addOperands(operandC);
847
848 if (multiplicandPtxTypes) {
849 result.addAttribute("multiplicandAPtxType",
850 MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[0]));
851 result.addAttribute("multiplicandBPtxType",
852 MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[1]));
853 } else {
854 if (auto res = inferOperandMMAType(operandA[0].getType(), false))
855 result.addAttribute("multiplicandAPtxType", MMATypesAttr::get(ctx, *res));
856 if (auto res = inferOperandMMAType(operandB[0].getType(), false))
857 result.addAttribute("multiplicandBPtxType", MMATypesAttr::get(ctx, *res));
858 }
859
860 if (multiplicandLayouts) {
861 result.addAttribute("layoutA",
862 MMALayoutAttr::get(ctx, (*multiplicandLayouts)[0]));
863 result.addAttribute("layoutB",
864 MMALayoutAttr::get(ctx, (*multiplicandLayouts)[1]));
865 } else {
866 result.addAttribute("layoutA", MMALayoutAttr::get(ctx, MMALayout::row));
867 result.addAttribute("layoutB", MMALayoutAttr::get(ctx, MMALayout::col));
868 }
869
870 if (intOverflow.has_value())
871 result.addAttribute("intOverflowBehavior",
872 MMAIntOverflowAttr::get(ctx, *intOverflow));
873 if (b1Op.has_value())
874 result.addAttribute("b1Op", MMAB1OpAttr::get(ctx, *b1Op));
875
876 result.addTypes(resultType);
877 result.addAttribute(
878 MmaOp::getOperandSegmentSizeAttr(),
879 builder.getDenseI32ArrayAttr({static_cast<int32_t>(operandA.size()),
880 static_cast<int32_t>(operandB.size()),
881 static_cast<int32_t>(operandC.size())}));
882}
883
884// <operation> :=
885// A `[` $operandA `]` B `[` $operandB `]` C `[` $operandC `]`
886// attr-dict : (type($operandA[0]), type($operandB[0]), type($operandC[0]))
887// `->` type($res)
888ParseResult MmaOp::parse(OpAsmParser &parser, OperationState &result) {
889 struct MMAOperandFragment {
890 std::optional<MMATypes> elemtype;
891 SmallVector<OpAsmParser::UnresolvedOperand, 4> regs;
892 SmallVector<Type> regTypes;
893 };
894
895 Builder &builder = parser.getBuilder();
896 std::array<MMAOperandFragment, 4> frags;
897
898 NamedAttrList namedAttributes;
899
900 // A helper to parse the operand segments.
901 auto parseMmaOperand = [&](StringRef operandName,
902 MMAOperandFragment &frag) -> LogicalResult {
903 if (parser.parseKeyword(operandName).failed())
904 return failure();
905 if (parser
906 .parseOperandList(frag.regs, OpAsmParser::Delimiter::OptionalSquare)
907 .failed())
908 return failure();
909 return success();
910 };
911
912 // Parse the operand segments.
913 if (parseMmaOperand("A", frags[0]).failed())
914 return failure();
915 if (parseMmaOperand("B", frags[1]).failed())
916 return failure();
917 if (parseMmaOperand("C", frags[2]).failed())
918 return failure();
919
920 if (parser.parseOptionalAttrDict(namedAttributes).failed())
921 return failure();
922
923 // Parse the type specification and resolve operands.
924 SmallVector<Type, 3> operandTypes;
925 if (failed(parser.parseColon()))
926 return failure();
927 if (failed(parser.parseLParen()))
928 return failure();
929 if (failed(parser.parseTypeList(operandTypes)))
930 return failure();
931 if (failed(parser.parseRParen()))
932 if (operandTypes.size() != 3)
933 return parser.emitError(
934 parser.getNameLoc(),
935 "expected one type for each operand segment but got " +
936 Twine(operandTypes.size()) + " types");
937 for (const auto &iter : llvm::enumerate(operandTypes)) {
938 auto &frag = frags[iter.index()];
939 frag.regTypes.resize(frag.regs.size(), iter.value());
940 if (failed(parser.resolveOperands(frag.regs, frag.regTypes,
941 parser.getNameLoc(), result.operands)))
942 return failure();
943 frag.elemtype = inferOperandMMAType(frag.regTypes[0],
944 /*isAccumulator*/ iter.index() < 2);
945 }
946
947 Type resultType;
948 if (parser.parseArrow() || parser.parseType(resultType))
949 return failure();
950 frags[3].elemtype = inferOperandMMAType(resultType, /*isAccumulator*/ true);
951
952 std::array<StringRef, 2> names{"multiplicandAPtxType",
953 "multiplicandBPtxType"};
954 for (unsigned idx = 0; idx < names.size(); idx++) {
955 const auto &frag = frags[idx];
956 std::optional<NamedAttribute> attr = namedAttributes.getNamed(names[idx]);
957 if (!frag.elemtype.has_value() && !attr.has_value()) {
958 return parser.emitError(
959 parser.getNameLoc(),
960 "attribute " + names[idx] +
961 " is not provided explicitly and cannot be inferred");
962 }
963 if (!attr.has_value())
964 result.addAttribute(
965 names[idx], MMATypesAttr::get(parser.getContext(), *frag.elemtype));
966 }
967
968 result.addTypes(resultType);
969 if (!namedAttributes.empty())
970 result.addAttributes(namedAttributes);
971 result.addAttribute(MmaOp::getOperandSegmentSizeAttr(),
972 builder.getDenseI32ArrayAttr({
973 static_cast<int32_t>(frags[0].regs.size()),
974 static_cast<int32_t>(frags[1].regs.size()),
975 static_cast<int32_t>(frags[2].regs.size()),
976 }));
977 return success();
978}
979
980LogicalResult MmaOp::verify() {
981 MLIRContext *context = getContext();
982 auto f16Ty = Float16Type::get(context);
983 auto i32Ty = IntegerType::get(context, 32);
984 auto f16x2Ty = VectorType::get(2, f16Ty);
985 auto f32Ty = Float32Type::get(context);
986 auto f16x2x4StructTy = LLVM::LLVMStructType::getLiteral(
987 context, {f16x2Ty, f16x2Ty, f16x2Ty, f16x2Ty});
988
989 auto s32x4StructTy =
990 LLVM::LLVMStructType::getLiteral(context, {i32Ty, i32Ty, i32Ty, i32Ty});
991 auto f32x8StructTy =
992 LLVM::LLVMStructType::getLiteral(context, SmallVector<Type>(8, f32Ty));
993 auto f16x2x2StructTy =
994 LLVM::LLVMStructType::getLiteral(context, {f16x2Ty, f16x2Ty});
995 auto f32x4StructTy =
996 LLVM::LLVMStructType::getLiteral(context, {f32Ty, f32Ty, f32Ty, f32Ty});
997 auto s32x2StructTy =
998 LLVM::LLVMStructType::getLiteral(context, {i32Ty, i32Ty});
999
1000 std::array<int64_t, 3> mmaShape{getShapeAttr().getM(), getShapeAttr().getN(),
1001 getShapeAttr().getK()};
1002
1003 // These variables define the set of allowed data types for matrices A, B, C,
1004 // and result.
1005 using AllowedShapes = SmallVector<std::array<int64_t, 3>, 2>;
1006 using AllowedTypes = SmallVector<SmallVector<Type, 4>, 2>;
1007 AllowedShapes allowedShapes;
1008 AllowedTypes expectedA;
1009 AllowedTypes expectedB;
1010 AllowedTypes expectedC;
1011 SmallVector<Type> expectedResult;
1012
1013 // When M = 16, we just need to calculate the number of 8xk tiles, where
1014 // k is a factor that depends on the data type.
1015 if (mmaShape[0] == 16) {
1016 int64_t kFactor;
1017 Type multiplicandFragType;
1018 switch (*getMultiplicandAPtxType()) {
1019 case MMATypes::tf32:
1020 kFactor = 4;
1021 multiplicandFragType = i32Ty;
1022 expectedResult.push_back(LLVM::LLVMStructType::getLiteral(
1023 context, {f32Ty, f32Ty, f32Ty, f32Ty}));
1024 break;
1025 case MMATypes::bf16:
1026 kFactor = 8;
1027 multiplicandFragType = i32Ty;
1028 expectedResult.push_back(LLVM::LLVMStructType::getLiteral(
1029 context, {f32Ty, f32Ty, f32Ty, f32Ty}));
1030 break;
1031 case MMATypes::f16:
1032 kFactor = 8;
1033 multiplicandFragType = f16x2Ty;
1034 expectedResult.push_back(f16x2x2StructTy);
1035 expectedResult.push_back(f32x4StructTy);
1036 break;
1037 case MMATypes::s4:
1038 case MMATypes::u4:
1039 kFactor = 32;
1040 break;
1041 case MMATypes::b1:
1042 kFactor = 128;
1043 break;
1044 case MMATypes::s8:
1045 case MMATypes::u8:
1046 kFactor = 16;
1047 break;
1048 default:
1049 return emitError("invalid shape or multiplicand type: ")
1050 << getMultiplicandAPtxType().value();
1051 }
1052
1053 if (isIntegerPtxType(getMultiplicandAPtxType().value())) {
1054 expectedResult.push_back(s32x4StructTy);
1055 expectedC.emplace_back(4, i32Ty);
1056 multiplicandFragType = i32Ty;
1057 } else {
1058 expectedC.emplace_back(2, f16x2Ty);
1059 expectedC.emplace_back(4, f32Ty);
1060 }
1061
1062 int64_t unitA = (mmaShape[0] / 8) * (mmaShape[2] / kFactor);
1063 int64_t unitB = (mmaShape[1] / 8) * (mmaShape[2] / kFactor);
1064 expectedA.emplace_back(unitA, multiplicandFragType);
1065 expectedB.emplace_back(unitB, multiplicandFragType);
1066 allowedShapes.push_back({16, 8, kFactor});
1067 allowedShapes.push_back({16, 8, kFactor * 2});
1068
1069 if (resultPtxType() != accumPtxType())
1070 return emitOpError("ctype does not match dtype");
1071 }
1072
1073 // In the M=8 case, there is only 1 possible case per data type.
1074 if (mmaShape[0] == 8) {
1075 if (*getMultiplicandAPtxType() == MMATypes::f16) {
1076 expectedA.emplace_back(2, f16x2Ty);
1077 expectedB.emplace_back(2, f16x2Ty);
1078 expectedResult.push_back(f16x2x4StructTy);
1079 expectedResult.push_back(f32x8StructTy);
1080 expectedC.emplace_back(4, f16x2Ty);
1081 expectedC.emplace_back(8, f32Ty);
1082 allowedShapes.push_back({8, 8, 4});
1083 }
1084 if (*getMultiplicandAPtxType() == MMATypes::f64) {
1085 Type f64Ty = Float64Type::get(context);
1086 expectedA.emplace_back(1, f64Ty);
1087 expectedB.emplace_back(1, f64Ty);
1088 expectedC.emplace_back(2, f64Ty);
1089 expectedResult.emplace_back(LLVM::LLVMStructType::getLiteral(
1090 context, SmallVector<Type>(2, f64Ty)));
1091 allowedShapes.push_back({8, 8, 4});
1092 }
1093 if (isIntegerPtxType(getMultiplicandAPtxType().value())) {
1094 expectedA.push_back({i32Ty});
1095 expectedB.push_back({i32Ty});
1096 expectedC.push_back({i32Ty, i32Ty});
1097 expectedResult.push_back(s32x2StructTy);
1098 if (isInt4PtxType(getMultiplicandAPtxType().value()))
1099 allowedShapes.push_back({8, 8, 32});
1100 if (isInt8PtxType(getMultiplicandAPtxType().value()))
1101 allowedShapes.push_back({8, 8, 16});
1102 if (getMultiplicandAPtxType().value() == MMATypes::b1)
1103 allowedShapes.push_back({8, 8, 128});
1104 }
1105 }
1106
1107 std::string errorMessage;
1108 llvm::raw_string_ostream errorStream(errorMessage);
1109
1110 // Check that we matched an existing shape/dtype combination.
1111 if (expectedA.empty() || expectedB.empty() || expectedC.empty() ||
1112 !llvm::is_contained(allowedShapes, mmaShape)) {
1113 errorStream << "unimplemented variant for MMA shape <";
1114 llvm::interleaveComma(mmaShape, errorStream);
1115 errorStream << ">";
1116 return emitOpError(errorMessage);
1117 }
1118
1119 // Verify the operand types for segments of A, B, and C operands.
1120 std::array<StringRef, 3> operandNames{"A", "B", "C"};
1121 for (const auto &iter : llvm::enumerate(
1122 SmallVector<AllowedTypes, 3>{expectedA, expectedB, expectedC})) {
1123 auto spec = this->getODSOperandIndexAndLength(iter.index());
1124 SmallVector<Type, 4> operandTySeg(operand_type_begin() + spec.first,
1125 operand_type_begin() + spec.first +
1126 spec.second);
1127 bool match = llvm::is_contained(iter.value(), operandTySeg);
1128
1129 if (!match) {
1130 errorStream << "Could not match types for the "
1131 << operandNames[iter.index()]
1132 << " operands; expected one of ";
1133 for (const auto &x : iter.value()) {
1134 errorStream << x.size() << "x" << x[0] << " ";
1135 }
1136 errorStream << "but got ";
1137 llvm::interleaveComma(operandTySeg, errorStream);
1138 return emitOpError(errorMessage);
1139 }
1140 }
1141
1142 // Check the result type
1143 if (!llvm::any_of(expectedResult, [&](Type expectedResultType) {
1144 return expectedResultType == getResult().getType();
1145 })) {
1146 errorStream
1147 << "Could not match allowed types for the result; expected one of ";
1148 llvm::interleaveComma(expectedResult, errorStream);
1149 errorStream << " but got " << getResult().getType();
1150 return emitOpError(errorMessage);
1151 }
1152
1153 // Ensure that binary MMA variants have a b1 MMA operation defined.
1154 if (getMultiplicandAPtxType() == MMATypes::b1 && !getB1Op()) {
1155 return emitOpError("op requires " + getB1OpAttrName().strref() +
1156 " attribute");
1157 }
1158
1159 // Ensure int4/int8 MMA variants specify the accum overflow behavior
1160 // attribute.
1161 if (isInt4PtxType(*getMultiplicandAPtxType()) ||
1162 isInt8PtxType(*getMultiplicandAPtxType())) {
1163 if (!getIntOverflowBehavior())
1164 return emitOpError("op requires " +
1165 getIntOverflowBehaviorAttrName().strref() +
1166 " attribute");
1167 }
1168
1169 // Validate layout combinations. According to the operation description, most
1170 // MMA operations require layoutA=row and layoutB=col. Only m8n8k4 with f16
1171 // can use other layout combinations.
1172 bool isM8N8K4_F16 =
1173 (mmaShape[0] == 8 && mmaShape[1] == 8 && mmaShape[2] == 4 &&
1174 getMultiplicandAPtxType() == MMATypes::f16);
1175
1176 if (!isM8N8K4_F16) {
1177 // For all other shapes/types, layoutA must be row and layoutB must be col
1178 if (getLayoutA() != MMALayout::row || getLayoutB() != MMALayout::col) {
1179 return emitOpError("requires layoutA = #nvvm.mma_layout<row> and "
1180 "layoutB = #nvvm.mma_layout<col> for shape <")
1181 << mmaShape[0] << ", " << mmaShape[1] << ", " << mmaShape[2]
1182 << "> with element types " << *getMultiplicandAPtxType() << " and "
1183 << *getMultiplicandBPtxType()
1184 << ". Only m8n8k4 with f16 supports other layouts.";
1185 }
1186 }
1187
1188 return success();
1189}
1190
1191MMATypes MmaSpOp::accumPtxType() {
1192 std::optional<mlir::NVVM::MMATypes> val = MmaOp::inferOperandMMAType(
1193 getODSOperands(2).getTypes().front(), /*isAccumulator=*/true);
1194 assert(val.has_value() && "accumulator PTX type should always be inferrable");
1195 return val.value();
1196}
1197
1198MMATypes MmaSpOp::resultPtxType() {
1199 std::optional<mlir::NVVM::MMATypes> val =
1200 MmaOp::inferOperandMMAType(getResult().getType(), /*isAccumulator=*/true);
1201 assert(val.has_value() && "result PTX type should always be inferrable");
1202 return val.value();
1203}
1204
1206MmaSpOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
1207 llvm::IRBuilderBase &builder) {
1208 auto thisOp = cast<NVVM::MmaSpOp>(op);
1209
1210 // Get operands
1212 for (mlir::Value v : thisOp.getOperands())
1213 args.push_back(mt.lookupValue(v));
1214
1215 // Get intrinsic ID using the existing getIntrinsicID method
1216 auto intId = MmaSpOp::getIntrinsicID(
1217 thisOp.getShape().getM(), thisOp.getShape().getN(),
1218 thisOp.getShape().getK(), thisOp.getIntOverflowBehavior(),
1219 thisOp.getOrderedMetadata(), thisOp.getKind(),
1220 *thisOp.getMultiplicandAPtxType(), *thisOp.getMultiplicandBPtxType(),
1221 thisOp.accumPtxType(), thisOp.resultPtxType());
1222
1223 return {intId, args};
1224}
1225
1226void MmaSpOp::print(OpAsmPrinter &p) {
1227 SmallVector<Type, 4> regTypes;
1228 struct MMAOperandFragment {
1229 StringRef operandName;
1230 StringRef ptxTypeAttr;
1231 SmallVector<Value, 4> regs;
1232 explicit MMAOperandFragment(StringRef name, StringRef ptxTypeName)
1233 : operandName(name), ptxTypeAttr(ptxTypeName) {}
1234 };
1235
1236 std::array<MMAOperandFragment, 5> frags{
1237 MMAOperandFragment("A", getMultiplicandAPtxTypeAttrName()),
1238 MMAOperandFragment("B", getMultiplicandBPtxTypeAttrName()),
1239 MMAOperandFragment("C", ""), MMAOperandFragment("sparseMetadata", ""),
1240 MMAOperandFragment("selector", "")};
1241 SmallVector<StringRef, 4> ignoreAttrNames{
1242 mlir::NVVM::MmaSpOp::getOperandSegmentSizeAttr()};
1243
1244 // Handle variadic operands A, B, C
1245 for (unsigned fragIdx = 0; fragIdx < 3; fragIdx++) {
1246 auto &frag = frags[fragIdx];
1247 auto varOperandSpec = getODSOperandIndexAndLength(fragIdx);
1248 for (auto operandIdx = varOperandSpec.first;
1249 operandIdx < varOperandSpec.first + varOperandSpec.second;
1250 operandIdx++) {
1251 frag.regs.push_back(this->getOperand(operandIdx));
1252 if (operandIdx == varOperandSpec.first) {
1253 regTypes.push_back(this->getOperand(operandIdx).getType());
1254 }
1255 }
1256 std::optional<MMATypes> inferredType = MmaOp::inferOperandMMAType(
1257 regTypes.back(), /*isAccumulator=*/fragIdx >= 2);
1258 if (inferredType)
1259 ignoreAttrNames.push_back(frag.ptxTypeAttr);
1260 }
1261
1262 // Handle sparse metadata and selector (single operands)
1263 frags[3].regs.push_back(getSparseMetadata());
1264 frags[4].regs.push_back(getSparsitySelector());
1265
1266 auto printMmaSpOperand = [&](const MMAOperandFragment &frag) -> void {
1267 p << " " << frag.operandName;
1268 p << "[";
1269 p.printOperands(frag.regs);
1270 p << "]";
1271 };
1272
1273 for (const auto &frag : frags)
1274 printMmaSpOperand(frag);
1275
1276 p.printOptionalAttrDict((*this)->getAttrs(), ignoreAttrNames);
1277 p << " : ";
1278 p << "(";
1279 for (int i = 0; i < 3; ++i) {
1280 p << regTypes[i];
1281 if (i < 2)
1282 p << ", ";
1283 }
1284 p << ") -> " << getResult().getType();
1285}
1286
1287void MmaSpOp::build(
1288 OpBuilder &builder, OperationState &result, Type resultType,
1289 ValueRange operandA, ValueRange operandB, ValueRange operandC,
1290 Value sparseMetadata, Value sparsitySelector, ArrayRef<int64_t> shape,
1291 std::optional<MMAIntOverflow> intOverflow,
1292 std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes) {
1293
1294 assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
1295 MLIRContext *ctx = builder.getContext();
1296 result.addAttribute(
1297 "shape", builder.getAttr<MMAShapeAttr>(shape[0], shape[1], shape[2]));
1298
1299 result.addOperands(operandA);
1300 result.addOperands(operandB);
1301 result.addOperands(operandC);
1302 result.addOperands(sparseMetadata);
1303 result.addOperands(sparsitySelector);
1304
1305 if (multiplicandPtxTypes) {
1306 result.addAttribute("multiplicandAPtxType",
1307 MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[0]));
1308 result.addAttribute("multiplicandBPtxType",
1309 MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[1]));
1310 } else {
1311 if (auto res = MmaOp::inferOperandMMAType(operandA[0].getType(), false))
1312 result.addAttribute("multiplicandAPtxType", MMATypesAttr::get(ctx, *res));
1313 if (auto res = MmaOp::inferOperandMMAType(operandB[0].getType(), false))
1314 result.addAttribute("multiplicandBPtxType", MMATypesAttr::get(ctx, *res));
1315 }
1316
1317 if (intOverflow.has_value())
1318 result.addAttribute("intOverflowBehavior",
1319 MMAIntOverflowAttr::get(ctx, *intOverflow));
1320
1321 result.addTypes(resultType);
1322 result.addAttribute(
1323 MmaSpOp::getOperandSegmentSizeAttr(),
1324 builder.getDenseI32ArrayAttr({static_cast<int32_t>(operandA.size()),
1325 static_cast<int32_t>(operandB.size()),
1326 static_cast<int32_t>(operandC.size()), 1,
1327 1})); // sparseMetadata and sparsitySelector
1328}
1329
1330ParseResult MmaSpOp::parse(OpAsmParser &parser, OperationState &result) {
1331 struct MMAOperandFragment {
1332 std::optional<MMATypes> elemtype;
1333 SmallVector<OpAsmParser::UnresolvedOperand, 4> regs;
1334 SmallVector<Type> regTypes;
1335 };
1336
1337 Builder &builder = parser.getBuilder();
1338 std::array<MMAOperandFragment, 6> frags; // A, B, C, sparseMetadata, selector
1339
1340 NamedAttrList namedAttributes;
1341
1342 // A helper to parse the operand segments.
1343 auto parseMmaSpOperand = [&](StringRef operandName,
1344 MMAOperandFragment &frag) -> LogicalResult {
1345 if (parser.parseKeyword(operandName).failed())
1346 return failure();
1347 if (parser
1348 .parseOperandList(frag.regs, OpAsmParser::Delimiter::OptionalSquare)
1349 .failed())
1350 return failure();
1351 return success();
1352 };
1353
1354 // Parse the operand segments.
1355 if (parseMmaSpOperand("A", frags[0]).failed())
1356 return failure();
1357 if (parseMmaSpOperand("B", frags[1]).failed())
1358 return failure();
1359 if (parseMmaSpOperand("C", frags[2]).failed())
1360 return failure();
1361 if (parseMmaSpOperand("sparseMetadata", frags[3]).failed())
1362 return failure();
1363 if (parseMmaSpOperand("selector", frags[4]).failed())
1364 return failure();
1365
1366 if (parser.parseOptionalAttrDict(namedAttributes).failed())
1367 return failure();
1368
1369 // Parse the type specification and resolve operands.
1370 SmallVector<Type, 3> operandTypes;
1371 if (failed(parser.parseColon()))
1372 return failure();
1373 if (failed(parser.parseLParen()))
1374 return failure();
1375 if (failed(parser.parseTypeList(operandTypes)))
1376 return failure();
1377 if (failed(parser.parseRParen()))
1378 return failure();
1379 if (operandTypes.size() != 3)
1380 return parser.emitError(
1381 parser.getNameLoc(),
1382 "expected one type for each operand segment but got " +
1383 Twine(operandTypes.size()) + " types");
1384 for (const auto &iter : llvm::enumerate(operandTypes)) {
1385 auto &frag = frags[iter.index()];
1386 frag.regTypes.resize(frag.regs.size(), iter.value());
1387 if (failed(parser.resolveOperands(frag.regs, frag.regTypes,
1388 parser.getNameLoc(), result.operands)))
1389 return failure();
1390 frag.elemtype =
1391 MmaOp::inferOperandMMAType(frag.regTypes[0],
1392 /*isAccumulator*/ iter.index() >= 2);
1393 }
1394
1395 Type resultType;
1396 if (parser.parseArrow() || parser.parseType(resultType))
1397 return failure();
1398 frags[5].elemtype =
1399 MmaOp::inferOperandMMAType(resultType, /*isAccumulator*/ true);
1400
1401 // Resolve sparse metadata and selector (assume i32 type)
1402 Type i32Type = builder.getIntegerType(32);
1403 if (parser
1404 .resolveOperands(frags[3].regs, i32Type, parser.getCurrentLocation(),
1405 result.operands)
1406 .failed())
1407 return failure();
1408 if (parser
1409 .resolveOperands(frags[4].regs, i32Type, parser.getCurrentLocation(),
1410 result.operands)
1411 .failed())
1412 return failure();
1413
1414 std::array<StringRef, 2> names{"multiplicandAPtxType",
1415 "multiplicandBPtxType"};
1416 for (unsigned idx = 0; idx < names.size(); idx++) {
1417 const auto &frag = frags[idx];
1418 std::optional<NamedAttribute> attr = namedAttributes.getNamed(names[idx]);
1419 if (!frag.elemtype.has_value() && !attr.has_value()) {
1420 return parser.emitError(
1421 parser.getNameLoc(),
1422 "attribute " + names[idx] +
1423 " is not provided explicitly and cannot be inferred");
1424 }
1425 if (!attr.has_value())
1426 result.addAttribute(
1427 names[idx], MMATypesAttr::get(parser.getContext(), *frag.elemtype));
1428 }
1429
1430 result.addTypes(resultType);
1431 if (!namedAttributes.empty())
1432 result.addAttributes(namedAttributes);
1433 result.addAttribute(MmaSpOp::getOperandSegmentSizeAttr(),
1434 builder.getDenseI32ArrayAttr({
1435 static_cast<int32_t>(frags[0].regs.size()),
1436 static_cast<int32_t>(frags[1].regs.size()),
1437 static_cast<int32_t>(frags[2].regs.size()),
1438 1, // sparseMetadata
1439 1 // sparsitySelector
1440 }));
1441 return success();
1442}
1443
1444LogicalResult MmaSpOp::verify() {
1445 MLIRContext *context = getContext();
1446 auto f16Ty = Float16Type::get(context);
1447 auto i32Ty = IntegerType::get(context, 32);
1448 auto f16x2Ty = VectorType::get(2, f16Ty);
1449 auto f32Ty = Float32Type::get(context);
1450 auto f16x2x4StructTy = LLVM::LLVMStructType::getLiteral(
1451 context, {f16x2Ty, f16x2Ty, f16x2Ty, f16x2Ty});
1452
1453 auto s32x4StructTy =
1454 LLVM::LLVMStructType::getLiteral(context, {i32Ty, i32Ty, i32Ty, i32Ty});
1455 auto f32x8StructTy =
1456 LLVM::LLVMStructType::getLiteral(context, SmallVector<Type>(8, f32Ty));
1457 auto f16x2x2StructTy =
1458 LLVM::LLVMStructType::getLiteral(context, {f16x2Ty, f16x2Ty});
1459 auto f32x4StructTy =
1460 LLVM::LLVMStructType::getLiteral(context, {f32Ty, f32Ty, f32Ty, f32Ty});
1461 auto s32x2StructTy =
1462 LLVM::LLVMStructType::getLiteral(context, {i32Ty, i32Ty});
1463
1464 std::array<int64_t, 3> mmaShape{getShapeAttr().getM(), getShapeAttr().getN(),
1465 getShapeAttr().getK()};
1466
1467 // These variables define the set of allowed data types for matrices A, B, C,
1468 // and result.
1469 using AllowedShapes = SmallVector<std::array<int64_t, 3>, 2>;
1470 using AllowedTypes = SmallVector<SmallVector<Type, 4>, 2>;
1471 AllowedShapes allowedShapes;
1472 AllowedTypes expectedA;
1473 AllowedTypes expectedB;
1474 AllowedTypes expectedC;
1475 SmallVector<Type> expectedResult;
1476
1477 // When M = 16, we just need to calculate the number of 8xk tiles, where
1478 // k is a factor that depends on the data type.
1479 if (mmaShape[0] == 16) {
1480 int64_t kFactor;
1481 Type multiplicandFragType;
1482 switch (*getMultiplicandAPtxType()) {
1483 case MMATypes::tf32:
1484 kFactor = 4;
1485 multiplicandFragType = i32Ty;
1486 expectedResult.push_back(LLVM::LLVMStructType::getLiteral(
1487 context, {f32Ty, f32Ty, f32Ty, f32Ty}));
1488 // Sparse MMA supports m16n8k8 and m16n8k16 for tf32
1489 allowedShapes.push_back({16, 8, 8});
1490 allowedShapes.push_back({16, 8, 16});
1491 break;
1492 case MMATypes::bf16:
1493 kFactor = 8;
1494 multiplicandFragType = i32Ty;
1495 expectedResult.push_back(LLVM::LLVMStructType::getLiteral(
1496 context, {f32Ty, f32Ty, f32Ty, f32Ty}));
1497 // Sparse MMA supports m16n8k16 and m16n8k32 for bf16
1498 allowedShapes.push_back({16, 8, 16});
1499 allowedShapes.push_back({16, 8, 32});
1500 break;
1501 case MMATypes::f16:
1502 kFactor = 8;
1503 multiplicandFragType = f16x2Ty;
1504 expectedResult.push_back(f16x2x2StructTy);
1505 expectedResult.push_back(f32x4StructTy);
1506 // Sparse MMA supports m16n8k16 and m16n8k32 for f16
1507 allowedShapes.push_back({16, 8, 16});
1508 allowedShapes.push_back({16, 8, 32});
1509 break;
1510 case MMATypes::s4:
1511 case MMATypes::u4:
1512 kFactor = 32;
1513 // Sparse MMA supports m16n8k64 and m16n8k128 for s4/u4
1514 allowedShapes.push_back({16, 8, 64});
1515 allowedShapes.push_back({16, 8, 128});
1516 break;
1517 case MMATypes::s8:
1518 case MMATypes::u8:
1519 kFactor = 16;
1520 // Sparse MMA supports m16n8k32 and m16n8k64 for s8/u8
1521 allowedShapes.push_back({16, 8, 32});
1522 allowedShapes.push_back({16, 8, 64});
1523 break;
1524 case MMATypes::e4m3:
1525 case MMATypes::e5m2:
1526 case MMATypes::e3m2:
1527 case MMATypes::e2m3:
1528 case MMATypes::e2m1:
1529 kFactor = 16;
1530 multiplicandFragType = i32Ty;
1531 expectedResult.push_back(f16x2x2StructTy);
1532 expectedResult.push_back(f32x4StructTy);
1533 // Sparse MMA supports m16n8k64 for FP8 types
1534 allowedShapes.push_back({16, 8, 64});
1535 break;
1536 default:
1537 return emitError("invalid shape or multiplicand type: ")
1538 << getMultiplicandAPtxType().value();
1539 }
1540
1541 if (isIntegerPtxType(getMultiplicandAPtxType().value())) {
1542 expectedResult.push_back(s32x4StructTy);
1543 expectedC.emplace_back(4, i32Ty);
1544 multiplicandFragType = i32Ty;
1545 } else if (*getMultiplicandAPtxType() >= MMATypes::e4m3 &&
1546 *getMultiplicandAPtxType() <= MMATypes::e2m1) {
1547 // FP8 types
1548 expectedC.emplace_back(2, f16x2Ty);
1549 expectedC.emplace_back(4, f32Ty);
1550 } else {
1551 expectedC.emplace_back(2, f16x2Ty);
1552 expectedC.emplace_back(4, f32Ty);
1553 }
1554
1555 // For sparse MMA, A operand is compressed (2:4 sparsity means half the
1556 // elements)
1557 int64_t unitA = (mmaShape[0] / 8) * (mmaShape[2] / kFactor) / 2;
1558 int64_t unitB = (mmaShape[1] / 8) * (mmaShape[2] / kFactor);
1559 expectedA.emplace_back(unitA, multiplicandFragType);
1560 expectedB.emplace_back(unitB, multiplicandFragType);
1561
1562 if (resultPtxType() != accumPtxType())
1563 return emitOpError("ctype does not match dtype");
1564 }
1565
1566 // In the M=8 case, there is only 1 possible case per data type.
1567 if (mmaShape[0] == 8) {
1568 if (*getMultiplicandAPtxType() == MMATypes::f16) {
1569 expectedA.emplace_back(2, f16x2Ty);
1570 expectedB.emplace_back(2, f16x2Ty);
1571 expectedResult.push_back(f16x2x4StructTy);
1572 expectedResult.push_back(f32x8StructTy);
1573 expectedC.emplace_back(4, f16x2Ty);
1574 expectedC.emplace_back(8, f32Ty);
1575 allowedShapes.push_back({8, 8, 4});
1576 }
1577 if (*getMultiplicandAPtxType() == MMATypes::f64) {
1578 Type f64Ty = Float64Type::get(context);
1579 expectedA.emplace_back(1, f64Ty);
1580 expectedB.emplace_back(1, f64Ty);
1581 expectedC.emplace_back(2, f64Ty);
1582 expectedResult.emplace_back(LLVM::LLVMStructType::getLiteral(
1583 context, SmallVector<Type>(2, f64Ty)));
1584 allowedShapes.push_back({8, 8, 4});
1585 }
1586 if (isIntegerPtxType(getMultiplicandAPtxType().value())) {
1587 expectedA.push_back({i32Ty});
1588 expectedB.push_back({i32Ty});
1589 expectedC.push_back({i32Ty, i32Ty});
1590 expectedResult.push_back(s32x2StructTy);
1591 if (isInt4PtxType(getMultiplicandAPtxType().value()))
1592 allowedShapes.push_back({8, 8, 32});
1593 if (isInt8PtxType(getMultiplicandAPtxType().value()))
1594 allowedShapes.push_back({8, 8, 16});
1595 }
1596 }
1597
1598 std::string errorMessage;
1599 llvm::raw_string_ostream errorStream(errorMessage);
1600
1601 // Check that we matched an existing shape/dtype combination.
1602 if (expectedA.empty() || expectedB.empty() || expectedC.empty() ||
1603 !llvm::is_contained(allowedShapes, mmaShape)) {
1604 errorStream << "unimplemented variant for MMA shape <";
1605 llvm::interleaveComma(mmaShape, errorStream);
1606 errorStream << ">";
1607 return emitOpError(errorMessage);
1608 }
1609
1610 // Verify the operand types for segments of A, B, and C operands.
1611 std::array<StringRef, 3> operandNames{"A", "B", "C"};
1612 for (const auto &iter : llvm::enumerate(
1613 SmallVector<AllowedTypes, 3>{expectedA, expectedB, expectedC})) {
1614 auto spec = this->getODSOperandIndexAndLength(iter.index());
1615 SmallVector<Type, 4> operandTySeg(operand_type_begin() + spec.first,
1616 operand_type_begin() + spec.first +
1617 spec.second);
1618 bool match = llvm::is_contained(iter.value(), operandTySeg);
1619
1620 if (!match) {
1621 errorStream << "Could not match types for the "
1622 << operandNames[iter.index()]
1623 << " operands; expected one of ";
1624 for (const auto &x : iter.value()) {
1625 errorStream << x.size() << "x" << x[0] << " ";
1626 }
1627 errorStream << "but got ";
1628 llvm::interleaveComma(operandTySeg, errorStream);
1629 return emitOpError(errorMessage);
1630 }
1631 }
1632
1633 // Check the result type
1634 if (!llvm::any_of(expectedResult, [&](Type expectedResultType) {
1635 return expectedResultType == getResult().getType();
1636 })) {
1637 errorStream
1638 << "Could not match allowed types for the result; expected one of ";
1639 llvm::interleaveComma(expectedResult, errorStream);
1640 errorStream << " but got " << getResult().getType();
1641 return emitOpError(errorMessage);
1642 }
1643
1644 // Ensure int4/int8 MMA variants specify the accum overflow behavior
1645 // attribute.
1646 if (isInt4PtxType(*getMultiplicandAPtxType()) ||
1647 isInt8PtxType(*getMultiplicandAPtxType())) {
1648 if (!getIntOverflowBehavior())
1649 return emitOpError("op requires " +
1650 getIntOverflowBehaviorAttrName().strref() +
1651 " attribute");
1652 }
1653
1654 // Validate sparse metadata type (should be i32)
1655 if (!getSparseMetadata().getType().isInteger(32)) {
1656 return emitOpError() << "sparse metadata must be i32 type";
1657 }
1658
1659 // Validate sparsity selector type (should be i32)
1660 if (!getSparsitySelector().getType().isInteger(32)) {
1661 return emitOpError() << "sparsity selector must be i32 type";
1662 }
1663
1664 return success();
1665}
1666
1667//===----------------------------------------------------------------------===//
1668// MMA Block Scale Operations - Shared Helpers
1669//===----------------------------------------------------------------------===//
1670
1671namespace {
1672// Shared structure for MMA operand fragments (A, B, C)
1673struct MMAOperandFragment {
1674 StringRef operandName;
1675 StringRef ptxTypeAttr;
1676 SmallVector<Value, 4> regs;
1677 explicit MMAOperandFragment(StringRef name, StringRef ptxTypeName)
1678 : operandName(name), ptxTypeAttr(ptxTypeName) {}
1679};
1680} // namespace
1681
1682// Helper to print operand list in the format: name[operands]
1683static void printOperandList(OpAsmPrinter &p, StringRef name,
1684 ArrayRef<Value> operands) {
1685 p << " " << name << "[";
1686 p.printOperands(operands);
1687 p << "]";
1688}
1689
1690// Helper to parse operand list in the format: name[operands]
1691static LogicalResult
1692parseMmaOperand(OpAsmParser &parser, StringRef operandName,
1694 if (parser.parseKeyword(operandName).failed())
1695 return failure();
1697 .failed())
1698 return failure();
1699 return success();
1700}
1701
1702// Helper to process operand fragments and determine which attributes can be
1703// inferred
1704template <typename Op>
1705static void
1706processOperandFragments(Op &op, std::array<MMAOperandFragment, 3> &frags,
1707 SmallVectorImpl<Type> &regTypes,
1708 SmallVectorImpl<StringRef> &ignoreAttrNames) {
1709 for (unsigned fragIdx = 0; fragIdx < frags.size(); fragIdx++) {
1710 auto &frag = frags[fragIdx];
1711 auto varOperandSpec = op.getODSOperandIndexAndLength(fragIdx);
1712 for (auto operandIdx = varOperandSpec.first;
1713 operandIdx < varOperandSpec.first + varOperandSpec.second;
1714 operandIdx++) {
1715 frag.regs.push_back(op.getOperand(operandIdx));
1716 if (fragIdx == 0 && operandIdx == varOperandSpec.first) {
1717 regTypes.push_back(op.getOperand(operandIdx).getType());
1718 }
1719 }
1720 if (fragIdx < 2) {
1721 regTypes.push_back(frag.regs[0].getType());
1722 }
1723 std::optional<MMATypes> inferredType =
1724 MmaOp::inferOperandMMAType(regTypes.back(),
1725 /*isAccumulator=*/fragIdx >= 2);
1726 if (inferredType)
1727 ignoreAttrNames.push_back(frag.ptxTypeAttr);
1728 }
1729}
1730
1731// Helper to parse type signature: (A_type, B_type, C_type)
1732static LogicalResult
1734 SmallVectorImpl<Type> &operandTypes) {
1735 if (parser.parseColon().failed() || parser.parseLParen().failed())
1736 return failure();
1737
1738 auto typeParser = [&]() {
1739 Type ty;
1740 if (parser.parseType(ty).failed())
1741 return failure();
1742 operandTypes.push_back(ty);
1743 return success();
1744 };
1745 if (parser.parseCommaSeparatedList(typeParser))
1746 return failure();
1747
1748 if (operandTypes.size() != 3)
1749 return parser.emitError(parser.getCurrentLocation(),
1750 "expected exactly 3 types");
1751
1752 return parser.parseRParen();
1753}
1754
1755// Helper to infer and set multiplicand PTX type attributes
1756static void
1758 const SmallVectorImpl<Type> &operandTypes) {
1759 if (!attrs.get("multiplicandAPtxType")) {
1760 if (auto inferredType =
1761 MmaOp::inferOperandMMAType(operandTypes[0], false)) {
1762 attrs.set("multiplicandAPtxType", MMATypesAttr::get(ctx, *inferredType));
1763 }
1764 }
1765 if (!attrs.get("multiplicandBPtxType")) {
1766 if (auto inferredType =
1767 MmaOp::inferOperandMMAType(operandTypes[1], false)) {
1768 attrs.set("multiplicandBPtxType", MMATypesAttr::get(ctx, *inferredType));
1769 }
1770 }
1771}
1772
1773// Helper to add common block scale properties
1774template <typename OpType>
1777 ScaleVecSize scaleVecSize,
1778 BlockScaleFormat blockScaleFormat,
1779 MMABlockScaleKind kind) {
1780 MLIRContext *ctx = builder.getContext();
1781 auto &properties = result.getOrAddProperties<typename OpType::Properties>();
1782 properties.setShape(
1783 builder.getAttr<MMAShapeAttr>(shape[0], shape[1], shape[2]));
1784 properties.setScaleVecSize(ScaleVecSizeAttr::get(ctx, scaleVecSize));
1785 properties.setBlockScaleFormat(
1786 BlockScaleFormatAttr::get(ctx, blockScaleFormat));
1787 properties.setKind(MMABlockScaleKindAttr::get(ctx, kind));
1788}
1789
1790// Helper to infer and add multiplicand PTX types to builder
1793 ValueRange operandB,
1794 std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes) {
1795 if (multiplicandPtxTypes) {
1796 result.addAttribute("multiplicandAPtxType",
1797 MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[0]));
1798 result.addAttribute("multiplicandBPtxType",
1799 MMATypesAttr::get(ctx, (*multiplicandPtxTypes)[1]));
1800 } else {
1801 if (auto res = MmaOp::inferOperandMMAType(operandA[0].getType(), false))
1802 result.addAttribute("multiplicandAPtxType", MMATypesAttr::get(ctx, *res));
1803 if (auto res = MmaOp::inferOperandMMAType(operandB[0].getType(), false))
1804 result.addAttribute("multiplicandBPtxType", MMATypesAttr::get(ctx, *res));
1805 }
1806}
1807
1808// Template helper for common accumPtxType/resultPtxType implementation
1809template <typename OpTy>
1810static MMATypes inferPtxTypeFromResult(OpTy op) {
1811 return *MmaOp::inferOperandMMAType(
1812 cast<LLVM::LLVMStructType>(op.getRes().getType()).getBody()[0],
1813 /*isAccumulator=*/true);
1814}
1815
1816//===----------------------------------------------------------------------===//
1817// MmaBlockScaleOp
1818//===----------------------------------------------------------------------===//
1819
1820void MmaBlockScaleOp::print(OpAsmPrinter &p) {
1821 SmallVector<Type, 4> regTypes;
1822 std::array<MMAOperandFragment, 3> frags{
1823 MMAOperandFragment("A", getMultiplicandAPtxTypeAttrName()),
1824 MMAOperandFragment("B", getMultiplicandBPtxTypeAttrName()),
1825 MMAOperandFragment("C", "")};
1826 SmallVector<StringRef, 4> ignoreAttrNames{
1827 mlir::NVVM::MmaBlockScaleOp::getOperandSegmentSizeAttr()};
1828
1829 processOperandFragments(*this, frags, regTypes, ignoreAttrNames);
1830
1831 // Print A, B, C operands
1832 for (const auto &frag : frags)
1833 printOperandList(p, frag.operandName, frag.regs);
1834
1835 // Print scale operands
1836 printOperandList(p, "scaleA",
1837 {getScaleAData(), getByteIdA(), getThreadIdA()});
1838 printOperandList(p, "scaleB",
1839 {getScaleBData(), getByteIdB(), getThreadIdB()});
1840
1841 p.printOptionalAttrDict(this->getOperation()->getAttrs(), ignoreAttrNames);
1842
1843 // Print type signature
1844 p << " : (";
1845 llvm::interleaveComma(SmallVector<Type, 3>{frags[0].regs[0].getType(),
1846 frags[1].regs[0].getType(),
1847 frags[2].regs[0].getType()},
1848 p);
1849 p << ")";
1850 p.printArrowTypeList(TypeRange{this->getRes().getType()});
1851}
1852
1853ParseResult MmaBlockScaleOp::parse(OpAsmParser &parser,
1855 struct LocalOperandFragment {
1856 std::optional<MMATypes> elemtype;
1857 SmallVector<OpAsmParser::UnresolvedOperand, 4> regs;
1858 };
1859
1860 Builder &builder = parser.getBuilder();
1861 std::array<LocalOperandFragment, 3> frags;
1862 NamedAttrList namedAttributes;
1863
1864 // Parse A[...] B[...] C[...]
1865 if (parseMmaOperand(parser, "A", frags[0].regs).failed() ||
1866 parseMmaOperand(parser, "B", frags[1].regs).failed() ||
1867 parseMmaOperand(parser, "C", frags[2].regs).failed())
1868 return failure();
1869
1870 // Parse scale operands: scaleA[...] scaleB[...]
1871 SmallVector<OpAsmParser::UnresolvedOperand, 3> scaleAOperands, scaleBOperands;
1872 if (parseMmaOperand(parser, "scaleA", scaleAOperands).failed() ||
1873 parseMmaOperand(parser, "scaleB", scaleBOperands).failed())
1874 return failure();
1875
1876 if (parser.parseOptionalAttrDict(namedAttributes).failed())
1877 return failure();
1878
1879 // Parse type signature
1880 SmallVector<Type, 3> operandTypes;
1881 if (parseMmaTypeSignature(parser, operandTypes).failed())
1882 return failure();
1883
1884 // Parse result type
1885 SmallVector<Type, 1> resultTypes;
1886 if (parser.parseArrowTypeList(resultTypes).failed())
1887 return failure();
1888
1889 // Infer element types and resolve operands
1890 for (const auto &[idx, frag] : llvm::enumerate(frags)) {
1891 frag.elemtype = MmaOp::inferOperandMMAType(operandTypes[idx],
1892 /*isAccumulator=*/idx >= 2);
1893 if (parser
1894 .resolveOperands(frag.regs, operandTypes[idx], parser.getNameLoc(),
1895 result.operands)
1896 .failed())
1897 return failure();
1898 }
1899
1900 // Resolve scale operands
1901 SmallVector<Type, 3> scaleTypes = {builder.getI32Type(), builder.getI16Type(),
1902 builder.getI16Type()};
1903 if (parser
1904 .resolveOperands(scaleAOperands, scaleTypes, parser.getNameLoc(),
1905 result.operands)
1906 .failed() ||
1907 parser
1908 .resolveOperands(scaleBOperands, scaleTypes, parser.getNameLoc(),
1909 result.operands)
1910 .failed())
1911 return failure();
1912
1913 // Add attributes
1914 result.addAttributes(namedAttributes);
1915 inferAndSetMultiplicandTypes(parser.getContext(), result.attributes,
1916 operandTypes);
1917
1918 result.addTypes(resultTypes);
1919 result.addAttribute(MmaBlockScaleOp::getOperandSegmentSizeAttr(),
1920 builder.getDenseI32ArrayAttr({
1921 static_cast<int32_t>(frags[0].regs.size()),
1922 static_cast<int32_t>(frags[1].regs.size()),
1923 static_cast<int32_t>(frags[2].regs.size()),
1924 1, // scaleAData
1925 1, // byteIdA
1926 1, // threadIdA
1927 1, // scaleBData
1928 1, // byteIdB
1929 1 // threadIdB
1930 }));
1931 return success();
1932}
1933
1934void MmaBlockScaleOp::build(
1935 OpBuilder &builder, OperationState &result, Type resultType,
1936 ValueRange operandA, ValueRange operandB, ValueRange operandC,
1937 Value scaleAData, Value byteIdA, Value threadIdA, Value scaleBData,
1938 Value byteIdB, Value threadIdB, ArrayRef<int64_t> shape,
1939 std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes,
1940 ScaleVecSize scaleVecSize, BlockScaleFormat blockScaleFormat,
1941 MMABlockScaleKind kind) {
1942 assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
1943
1945 blockScaleFormat, kind);
1946
1947 result.addOperands(operandA);
1948 result.addOperands(operandB);
1949 result.addOperands(operandC);
1950 result.addOperands(
1951 {scaleAData, byteIdA, threadIdA, scaleBData, byteIdB, threadIdB});
1952
1953 addInferredMultiplicandTypes(builder.getContext(), result, operandA, operandB,
1954 multiplicandPtxTypes);
1955
1956 result.addTypes(resultType);
1957 result.addAttribute(MmaBlockScaleOp::getOperandSegmentSizeAttr(),
1958 builder.getDenseI32ArrayAttr({
1959 static_cast<int32_t>(operandA.size()),
1960 static_cast<int32_t>(operandB.size()),
1961 static_cast<int32_t>(operandC.size()),
1962 1, // scaleAData
1963 1, // byteIdA
1964 1, // threadIdA
1965 1, // scaleBData
1966 1, // byteIdB
1967 1 // threadIdB
1968 }));
1969}
1970
1971NVVM::IDArgPair MmaBlockScaleOp::getIntrinsicIDAndArgs(
1972 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
1973 auto curOp = cast<NVVM::MmaBlockScaleOp>(op);
1974
1976 // Add A, B, C operands
1977 for (Value operand : curOp.getOperandA())
1978 args.push_back(mt.lookupValue(operand));
1979 for (Value operand : curOp.getOperandB())
1980 args.push_back(mt.lookupValue(operand));
1981 for (Value operand : curOp.getOperandC())
1982 args.push_back(mt.lookupValue(operand));
1983
1984 // Add scale operands
1985 args.push_back(mt.lookupValue(curOp.getScaleAData()));
1986 args.push_back(mt.lookupValue(curOp.getByteIdA()));
1987 args.push_back(mt.lookupValue(curOp.getThreadIdA()));
1988 args.push_back(mt.lookupValue(curOp.getScaleBData()));
1989 args.push_back(mt.lookupValue(curOp.getByteIdB()));
1990 args.push_back(mt.lookupValue(curOp.getThreadIdB()));
1991
1992 unsigned intId = MmaBlockScaleOp::getIntrinsicID(
1993 curOp.getShape().getM(), curOp.getShape().getN(), curOp.getShape().getK(),
1994 *curOp.getMultiplicandAPtxType(), *curOp.getMultiplicandBPtxType(),
1995 inferPtxTypeFromResult(curOp), curOp.getScaleVecSize(),
1996 curOp.getBlockScaleFormat(), curOp.getKind());
1997
1998 return {intId, args};
1999}
2000
2001LogicalResult MmaBlockScaleOp::verify() {
2002 LogicalResult result = success();
2003 int m = getShape().getM();
2004 int n = getShape().getN();
2005 int k = getShape().getK();
2006
2007 if (m == 16 && n == 8 && k == 64) {
2008 if (getMultiplicandAPtxType() != NVVM::MMATypes::e2m1 ||
2009 getMultiplicandBPtxType() != NVVM::MMATypes::e2m1)
2011 "unsupported MMATypes attribute for mma.m16n8k64.(mxf4nvf4|mxf4)");
2012 if (getKind() == NVVM::MMABlockScaleKind::MXF4) {
2013 if (getScaleVecSize() != NVVM::ScaleVecSize::X2)
2015 "unsupported ScaleVecSize attribute for mma.m16n8k64.mxf4");
2016 if (getBlockScaleFormat() != NVVM::BlockScaleFormat::UE8M0)
2018 "unsupported BlockScaleFormat attribute for mma.m16n8k64.mxf4");
2019 } else if (getKind() == NVVM::MMABlockScaleKind::MXF4NVF4) {
2020 if (!((getScaleVecSize() == NVVM::ScaleVecSize::X2 &&
2021 getBlockScaleFormat() == NVVM::BlockScaleFormat::UE8M0) ||
2022 (getScaleVecSize() == NVVM::ScaleVecSize::X4 &&
2023 (getBlockScaleFormat() == NVVM::BlockScaleFormat::UE4M3 ||
2024 getBlockScaleFormat() == NVVM::BlockScaleFormat::UE8M0))))
2025 result = emitOpError("unsupported ScaleVecSize and BlockScaleFormat "
2026 "attributes for mma.m16n8k64.mxf4nvf4");
2027 } else {
2028 result = emitOpError("unsupported Kind attribute for mma.m16n8k64");
2029 }
2030 } else if (m == 16 && n == 8 && k == 32) {
2031 if (!(getKind() == NVVM::MMABlockScaleKind::MXF8F6F4 &&
2032 getScaleVecSize() == NVVM::ScaleVecSize::X1 &&
2033 getBlockScaleFormat() == NVVM::BlockScaleFormat::UE8M0))
2034 result =
2035 emitOpError("unsupported Kind, ScaleVecSize and BlockScaleFormat "
2036 "attributes for mma.m16n8k32");
2037 } else {
2038 result = emitOpError("unsupported Geom for mma with block scaling");
2039 }
2040 return result;
2041}
2042
2043//===----------------------------------------------------------------------===//
2044// MmaSpBlockScaleOp
2045//===----------------------------------------------------------------------===//
2046
2047void MmaSpBlockScaleOp::print(OpAsmPrinter &p) {
2048 SmallVector<Type, 4> regTypes;
2049 std::array<MMAOperandFragment, 3> frags{
2050 MMAOperandFragment("A", getMultiplicandAPtxTypeAttrName()),
2051 MMAOperandFragment("B", getMultiplicandBPtxTypeAttrName()),
2052 MMAOperandFragment("C", "")};
2053 SmallVector<StringRef, 4> ignoreAttrNames{
2054 mlir::NVVM::MmaSpBlockScaleOp::getOperandSegmentSizeAttr()};
2055
2056 processOperandFragments(*this, frags, regTypes, ignoreAttrNames);
2057
2058 // Print A, B, C operands
2059 for (const auto &frag : frags)
2060 printOperandList(p, frag.operandName, frag.regs);
2061
2062 // Print sparse-specific operands
2063 printOperandList(p, "sparseMetadata", {getSparseMetadata()});
2064 printOperandList(p, "selector", {getSparsitySelector()});
2065
2066 // Print scale operands
2067 printOperandList(p, "scaleA",
2068 {getScaleAData(), getByteIdA(), getThreadIdA()});
2069 printOperandList(p, "scaleB",
2070 {getScaleBData(), getByteIdB(), getThreadIdB()});
2071
2072 p.printOptionalAttrDict(this->getOperation()->getAttrs(), ignoreAttrNames);
2073
2074 // Print type signature
2075 p << " : (";
2076 llvm::interleaveComma(SmallVector<Type, 3>{frags[0].regs[0].getType(),
2077 frags[1].regs[0].getType(),
2078 frags[2].regs[0].getType()},
2079 p);
2080 p << ")";
2081 p.printArrowTypeList(TypeRange{this->getRes().getType()});
2082}
2083
2084ParseResult MmaSpBlockScaleOp::parse(OpAsmParser &parser,
2086 struct LocalOperandFragment {
2087 std::optional<MMATypes> elemtype;
2088 SmallVector<OpAsmParser::UnresolvedOperand, 4> regs;
2089 };
2090
2091 Builder &builder = parser.getBuilder();
2092 std::array<LocalOperandFragment, 3> frags;
2093 NamedAttrList namedAttributes;
2094
2095 // Parse A[...] B[...] C[...]
2096 if (parseMmaOperand(parser, "A", frags[0].regs).failed() ||
2097 parseMmaOperand(parser, "B", frags[1].regs).failed() ||
2098 parseMmaOperand(parser, "C", frags[2].regs).failed())
2099 return failure();
2100
2101 // Parse sparse-specific operands
2103 selectorOperands;
2104 if (parseMmaOperand(parser, "sparseMetadata", metadataOperands).failed() ||
2105 parseMmaOperand(parser, "selector", selectorOperands).failed())
2106 return failure();
2107
2108 // Parse scale operands
2109 SmallVector<OpAsmParser::UnresolvedOperand, 3> scaleAOperands, scaleBOperands;
2110 if (parseMmaOperand(parser, "scaleA", scaleAOperands).failed() ||
2111 parseMmaOperand(parser, "scaleB", scaleBOperands).failed())
2112 return failure();
2113
2114 if (parser.parseOptionalAttrDict(namedAttributes).failed())
2115 return failure();
2116
2117 // Parse type signature
2118 SmallVector<Type, 3> operandTypes;
2119 if (parseMmaTypeSignature(parser, operandTypes).failed())
2120 return failure();
2121
2122 // Parse result type
2123 SmallVector<Type, 1> resultTypes;
2124 if (parser.parseArrowTypeList(resultTypes).failed())
2125 return failure();
2126
2127 // Infer element types and resolve operands
2128 for (const auto &[idx, frag] : llvm::enumerate(frags)) {
2129 frag.elemtype = MmaOp::inferOperandMMAType(operandTypes[idx],
2130 /*isAccumulator=*/idx >= 2);
2131 if (parser
2132 .resolveOperands(frag.regs, operandTypes[idx], parser.getNameLoc(),
2133 result.operands)
2134 .failed())
2135 return failure();
2136 }
2137
2138 // Resolve sparse metadata and selector
2139 Type i32Type = builder.getI32Type();
2140 if (parser
2141 .resolveOperands(metadataOperands, i32Type, parser.getNameLoc(),
2142 result.operands)
2143 .failed() ||
2144 parser
2145 .resolveOperands(selectorOperands, i32Type, parser.getNameLoc(),
2146 result.operands)
2147 .failed())
2148 return failure();
2149
2150 // Resolve scale operands
2151 SmallVector<Type, 3> scaleTypes = {i32Type, builder.getI16Type(),
2152 builder.getI16Type()};
2153 if (parser
2154 .resolveOperands(scaleAOperands, scaleTypes, parser.getNameLoc(),
2155 result.operands)
2156 .failed() ||
2157 parser
2158 .resolveOperands(scaleBOperands, scaleTypes, parser.getNameLoc(),
2159 result.operands)
2160 .failed())
2161 return failure();
2162
2163 // Add attributes
2164 result.addAttributes(namedAttributes);
2165 inferAndSetMultiplicandTypes(parser.getContext(), result.attributes,
2166 operandTypes);
2167
2168 // orderedMetadata is mandatory
2169 if (!result.attributes.get("orderedMetadata"))
2170 result.addAttribute("orderedMetadata", builder.getUnitAttr());
2171
2172 result.addTypes(resultTypes);
2173 result.addAttribute(MmaSpBlockScaleOp::getOperandSegmentSizeAttr(),
2174 builder.getDenseI32ArrayAttr({
2175 static_cast<int32_t>(frags[0].regs.size()),
2176 static_cast<int32_t>(frags[1].regs.size()),
2177 static_cast<int32_t>(frags[2].regs.size()),
2178 1, // sparseMetadata
2179 1, // sparsitySelector
2180 1, // scaleAData
2181 1, // byteIdA
2182 1, // threadIdA
2183 1, // scaleBData
2184 1, // byteIdB
2185 1 // threadIdB
2186 }));
2187 return success();
2188}
2189
2190void MmaSpBlockScaleOp::build(
2191 OpBuilder &builder, OperationState &result, Type resultType,
2192 ValueRange operandA, ValueRange operandB, ValueRange operandC,
2193 Value sparseMetadata, Value sparsitySelector, Value scaleAData,
2194 Value byteIdA, Value threadIdA, Value scaleBData, Value byteIdB,
2195 Value threadIdB, ArrayRef<int64_t> shape,
2196 std::optional<std::array<MMATypes, 2>> multiplicandPtxTypes,
2197 ScaleVecSize scaleVecSize, BlockScaleFormat blockScaleFormat,
2198 MMABlockScaleKind kind) {
2199 assert(shape.size() == 3 && "expected shape to have size 3 (m, n, k)");
2200
2202 builder, result, shape, scaleVecSize, blockScaleFormat, kind);
2203 result.addAttribute("orderedMetadata", builder.getUnitAttr());
2204
2205 result.addOperands(operandA);
2206 result.addOperands(operandB);
2207 result.addOperands(operandC);
2208 result.addOperands({sparseMetadata, sparsitySelector, scaleAData, byteIdA,
2209 threadIdA, scaleBData, byteIdB, threadIdB});
2210
2211 addInferredMultiplicandTypes(builder.getContext(), result, operandA, operandB,
2212 multiplicandPtxTypes);
2213
2214 result.addTypes(resultType);
2215 result.addAttribute(MmaSpBlockScaleOp::getOperandSegmentSizeAttr(),
2216 builder.getDenseI32ArrayAttr({
2217 static_cast<int32_t>(operandA.size()),
2218 static_cast<int32_t>(operandB.size()),
2219 static_cast<int32_t>(operandC.size()),
2220 1, // sparseMetadata
2221 1, // sparsitySelector
2222 1, // scaleAData
2223 1, // byteIdA
2224 1, // threadIdA
2225 1, // scaleBData
2226 1, // byteIdB
2227 1 // threadIdB
2228 }));
2229}
2230
2231NVVM::IDArgPair MmaSpBlockScaleOp::getIntrinsicIDAndArgs(
2232 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
2233 auto curOp = cast<NVVM::MmaSpBlockScaleOp>(op);
2234
2236 // Add A, B, C operands
2237 for (Value operand : curOp.getOperandA())
2238 args.push_back(mt.lookupValue(operand));
2239 for (Value operand : curOp.getOperandB())
2240 args.push_back(mt.lookupValue(operand));
2241 for (Value operand : curOp.getOperandC())
2242 args.push_back(mt.lookupValue(operand));
2243
2244 // Add sparse metadata and selector
2245 args.push_back(mt.lookupValue(curOp.getSparseMetadata()));
2246 args.push_back(mt.lookupValue(curOp.getSparsitySelector()));
2247
2248 // Add scale operands
2249 args.push_back(mt.lookupValue(curOp.getScaleAData()));
2250 args.push_back(mt.lookupValue(curOp.getByteIdA()));
2251 args.push_back(mt.lookupValue(curOp.getThreadIdA()));
2252 args.push_back(mt.lookupValue(curOp.getScaleBData()));
2253 args.push_back(mt.lookupValue(curOp.getByteIdB()));
2254 args.push_back(mt.lookupValue(curOp.getThreadIdB()));
2255
2256 unsigned intId = MmaSpBlockScaleOp::getIntrinsicID(
2257 curOp.getShape().getM(), curOp.getShape().getN(), curOp.getShape().getK(),
2258 *curOp.getMultiplicandAPtxType(), *curOp.getMultiplicandBPtxType(),
2259 inferPtxTypeFromResult(curOp), curOp.getScaleVecSize(),
2260 curOp.getBlockScaleFormat(), curOp.getKind());
2261
2262 return {intId, args};
2263}
2264
2265LogicalResult MmaSpBlockScaleOp::verify() {
2266 // Check that orderedMetadata is present
2267 if (!getOrderedMetadata()) {
2268 return emitOpError("'orderedMetadata' attribute is mandatory");
2269 }
2270
2271 LogicalResult result = success();
2272 int m = getShape().getM();
2273 int n = getShape().getN();
2274 int k = getShape().getK();
2275
2276 if (m == 16 && n == 8 && k == 128) {
2277 if (getMultiplicandAPtxType() != NVVM::MMATypes::e2m1 ||
2278 getMultiplicandBPtxType() != NVVM::MMATypes::e2m1)
2280 "unsupported MMATypes attribute for mma.m16n8k128.(mxf4nvf4|mxf4)");
2281 if (getKind() == NVVM::MMABlockScaleKind::MXF4) {
2282 if (getScaleVecSize() != NVVM::ScaleVecSize::X2)
2284 "unsupported ScaleVecSize attribute for mma.m16n8k128.mxf4");
2285 if (getBlockScaleFormat() != NVVM::BlockScaleFormat::UE8M0)
2287 "unsupported BlockScaleFormat attribute for mma.m16n8k128.mxf4");
2288 } else if (getKind() == NVVM::MMABlockScaleKind::MXF4NVF4) {
2289 if (!((getScaleVecSize() == NVVM::ScaleVecSize::X2 &&
2290 getBlockScaleFormat() == NVVM::BlockScaleFormat::UE8M0) ||
2291 (getScaleVecSize() == NVVM::ScaleVecSize::X4 &&
2292 (getBlockScaleFormat() == NVVM::BlockScaleFormat::UE4M3 ||
2293 getBlockScaleFormat() == NVVM::BlockScaleFormat::UE8M0))))
2294 result = emitOpError("unsupported ScaleVecSize and BlockScaleFormat "
2295 "attributes for mma.m16n8k128.mxf4nvf4");
2296 } else {
2297 result = emitOpError("unsupported Kind attribute for mma.m16n8k128");
2298 }
2299 } else if (m == 16 && n == 8 && k == 64) {
2300 if (!(getKind() == NVVM::MMABlockScaleKind::MXF8F6F4 &&
2301 getScaleVecSize() == NVVM::ScaleVecSize::X1 &&
2302 getBlockScaleFormat() == NVVM::BlockScaleFormat::UE8M0))
2303 result =
2304 emitOpError("unsupported Kind, ScaleVecSize and BlockScaleFormat "
2305 "attributes for mma.m16n8k64");
2306 } else {
2307 result = emitOpError("unsupported Geom for sparse mma with block scaling");
2308 }
2309 return result;
2310}
2311
2312LogicalResult ShflOp::verify() {
2313 auto returnStructType = llvm::dyn_cast<LLVM::LLVMStructType>(getType());
2314
2315 auto verifyTypeError = [&](Twine desc, Type expectedType,
2316 Type actualType) -> LogicalResult {
2317 return emitOpError("expected " + desc + " to be of type ")
2318 << expectedType << " but got " << actualType << " instead";
2319 };
2320
2321 if (returnStructType) {
2322 if (!getReturnValueAndIsValid())
2323 return emitOpError("\"return_value_and_is_valid\" attribute must be "
2324 "specified when the return type is a struct type");
2325
2326 if (returnStructType.getBody().size() != 2)
2327 return emitOpError("expected return type to be a two-element struct");
2328
2329 llvm::ArrayRef<Type> returnStruct = returnStructType.getBody();
2330 auto resultType = returnStruct[0];
2331 if (resultType != getVal().getType())
2332 return verifyTypeError("first element in the returned struct",
2333 getVal().getType(), resultType);
2334
2335 auto predicateType = returnStruct[1];
2336 if (!predicateType.isInteger(1))
2337 return verifyTypeError("second element in the returned struct",
2338 mlir::IntegerType::get(getContext(), 1),
2339 predicateType);
2340 } else {
2341 if (getReturnValueAndIsValid())
2342 return emitOpError("expected return type to be a two-element struct");
2343
2344 if (getType() != getVal().getType())
2345 return verifyTypeError("return type", getVal().getType(), getType());
2346 }
2347 return success();
2348}
2349
2350LogicalResult
2351ShflOp::inferReturnTypes(MLIRContext *context, std::optional<Location> location,
2352 ShflOp::Adaptor adaptor,
2353 SmallVectorImpl<Type> &inferredReturnTypes) {
2354 Type valType = adaptor.getVal().getType();
2355 if (adaptor.getReturnValueAndIsValid())
2356 inferredReturnTypes.push_back(LLVM::LLVMStructType::getLiteral(
2357 context, {valType, IntegerType::get(context, 1)}));
2358 else
2359 inferredReturnTypes.push_back(valType);
2360 return success();
2361}
2362
2363std::pair<mlir::Type, unsigned> NVVM::inferMMAType(NVVM::MMATypes type,
2364 NVVM::MMAFrag frag, int nRow,
2365 int nCol,
2366 MLIRContext *context) {
2367 unsigned numberElements = 0;
2368 Type elementType;
2369 OpBuilder builder(context);
2370 Type f16x2 = VectorType::get(2, builder.getF16Type());
2371 if (type == NVVM::MMATypes::f16) {
2372 elementType = f16x2;
2373 if (frag == NVVM::MMAFrag::a || frag == NVVM::MMAFrag::b)
2374 numberElements = 8;
2375 else
2376 numberElements = 4;
2377 } else if (type == NVVM::MMATypes::f32) {
2378 elementType = builder.getF32Type();
2379 numberElements = 8;
2380 } else if (type == NVVM::MMATypes::f64) {
2381 elementType = builder.getF64Type();
2382 if (frag == NVVM::MMAFrag::a || frag == NVVM::MMAFrag::b)
2383 numberElements = 1;
2384 else
2385 numberElements = 2;
2386 } else if (type == NVVM::MMATypes::tf32) {
2387 elementType = builder.getI32Type();
2388 numberElements = 4;
2389 } else if (type == NVVM::MMATypes::s8 || type == NVVM::MMATypes::u8) {
2390 elementType = builder.getI32Type();
2391 int parallelSize = 0;
2392 if (frag == NVVM::MMAFrag::a)
2393 parallelSize = nRow;
2394 if (frag == NVVM::MMAFrag::b)
2395 parallelSize = nCol;
2396
2397 // m == 16 && n == 16 && k == 16
2398 if (parallelSize == 16)
2399 numberElements = 2;
2400 // m == 8 && n == 32 && k == 16 or m == 32 && n == 8 && k == 16
2401 else if (parallelSize == 8)
2402 numberElements = 1;
2403 else if (parallelSize == 32)
2404 numberElements = 4;
2405 } else if (type == NVVM::MMATypes::s32) {
2406 elementType = builder.getI32Type();
2407 numberElements = 8;
2408 }
2409 assert(numberElements != 0 && elementType != nullptr);
2410 return std::make_pair(elementType, numberElements);
2411}
2412
2413static std::pair<mlir::Type, unsigned>
2414inferMMATypeFromMNK(NVVM::MMATypes type, NVVM::MMAFrag frag, int m, int n,
2415 int k, MLIRContext *context) {
2416 int nRow, nCol;
2417 if (frag == NVVM::MMAFrag::a) {
2418 nRow = m;
2419 nCol = k;
2420 } else if (frag == NVVM::MMAFrag::b) {
2421 nRow = k;
2422 nCol = n;
2423 } else {
2424 nRow = m;
2425 nCol = n;
2426 }
2427 assert(nRow && nCol);
2428 return inferMMAType(type, frag, nRow, nCol, context);
2429}
2430
2431LogicalResult NVVM::WMMALoadOp::verify() {
2432 unsigned addressSpace =
2433 llvm::cast<LLVM::LLVMPointerType>(getPtr().getType()).getAddressSpace();
2434 if (addressSpace != 0 && addressSpace != NVVMMemorySpace::Global &&
2435 addressSpace != NVVMMemorySpace::Shared)
2436 return emitOpError("expected source pointer in memory "
2437 "space 0, 1, 3");
2438
2439 if (NVVM::WMMALoadOp::getIntrinsicID(getM(), getN(), getK(), getLayout(),
2440 getEltype(), getFrag()) == 0)
2441 return emitOpError() << "invalid attribute combination";
2442 std::pair<Type, unsigned> typeInfo = inferMMATypeFromMNK(
2443 getEltype(), getFrag(), getM(), getN(), getK(), getContext());
2444 // Special case for f64 fragments
2445 Type f64Ty = Float64Type::get(getContext());
2446 if (typeInfo.first == f64Ty && typeInfo.second == 1) {
2447 if (getType() != f64Ty)
2448 return emitOpError("expected destination type to be f64");
2449 return success();
2450 }
2451 // Everything else is a struct
2452 Type dstType = LLVM::LLVMStructType::getLiteral(
2453 getContext(), SmallVector<Type, 8>(typeInfo.second, typeInfo.first));
2454 if (getType() != dstType)
2455 return emitOpError("expected destination type is a structure of ")
2456 << typeInfo.second << " elements of type " << typeInfo.first;
2457 return success();
2458}
2459
2460LogicalResult NVVM::WMMAStoreOp::verify() {
2461 unsigned addressSpace =
2462 llvm::cast<LLVM::LLVMPointerType>(getPtr().getType()).getAddressSpace();
2463 if (addressSpace != 0 && addressSpace != NVVMMemorySpace::Global &&
2464 addressSpace != NVVMMemorySpace::Shared)
2465 return emitOpError("expected operands to be a source pointer in memory "
2466 "space 0, 1, 3");
2467
2468 if (NVVM::WMMAStoreOp::getIntrinsicID(getM(), getN(), getK(), getLayout(),
2469 getEltype()) == 0)
2470 return emitOpError() << "invalid attribute combination";
2471 std::pair<Type, unsigned> typeInfo = inferMMATypeFromMNK(
2472 getEltype(), NVVM::MMAFrag::c, getM(), getN(), getK(), getContext());
2473 if (getArgs().size() != typeInfo.second)
2474 return emitOpError() << "expected " << typeInfo.second << " data operands";
2475 if (llvm::any_of(getArgs(), [&typeInfo](Value operands) {
2476 return operands.getType() != typeInfo.first;
2477 }))
2478 return emitOpError() << "expected data operands of type " << typeInfo.first;
2479 return success();
2480}
2481
2482LogicalResult NVVM::WMMAMmaOp::verify() {
2483 if (NVVM::WMMAMmaOp::getIntrinsicID(getM(), getN(), getK(), getLayoutA(),
2484 getLayoutB(), getEltypeA(),
2485 getEltypeB()) == 0)
2486 return emitOpError() << "invalid attribute combination";
2487 std::pair<Type, unsigned> typeInfoA = inferMMATypeFromMNK(
2488 getEltypeA(), NVVM::MMAFrag::a, getM(), getN(), getK(), getContext());
2489 std::pair<Type, unsigned> typeInfoB = inferMMATypeFromMNK(
2490 getEltypeA(), NVVM::MMAFrag::b, getM(), getN(), getK(), getContext());
2491 std::pair<Type, unsigned> typeInfoC = inferMMATypeFromMNK(
2492 getEltypeB(), NVVM::MMAFrag::c, getM(), getN(), getK(), getContext());
2493 SmallVector<Type, 32> arguments;
2494 arguments.append(typeInfoA.second, typeInfoA.first);
2495 arguments.append(typeInfoB.second, typeInfoB.first);
2496 arguments.append(typeInfoC.second, typeInfoC.first);
2497 unsigned numArgs = arguments.size();
2498 if (getArgs().size() != numArgs)
2499 return emitOpError() << "expected " << numArgs << " arguments";
2500 for (unsigned i = 0; i < numArgs; i++) {
2501 if (getArgs()[i].getType() != arguments[i])
2502 return emitOpError() << "expected argument " << i << " to be of type "
2503 << arguments[i];
2504 }
2505 Type dstType = LLVM::LLVMStructType::getLiteral(
2506 getContext(), SmallVector<Type, 8>(typeInfoC.second, typeInfoC.first));
2507 if (getType() != dstType)
2508 return emitOpError("expected destination type is a structure of ")
2509 << typeInfoC.second << " elements of type " << typeInfoC.first;
2510 return success();
2511}
2512
2513LogicalResult NVVM::LdMatrixOp::verify() {
2514 uint32_t num = getNum(), m = getShape().getM(), n = getShape().getN();
2515 if (m == 8 && n == 8) {
2516 if (num != 1 && num != 2 && num != 4) {
2517 return emitOpError("expected num attribute to be 1, 2 or 4 for 8x8 "
2518 "matrix");
2519 }
2520 if (getEltType() != LdStMatrixEltType::B16) {
2521 return emitOpError("expected element type to be b16 for 8x8 matrix");
2522 }
2523 } else if (m == 8 && n == 16) {
2524 if (num != 1 && num != 2 && num != 4) {
2525 return emitOpError("expected num attribute to be 1, 2 or 4 for 8x16 "
2526 "matrix");
2527 }
2528 if (getLayout() != MMALayout::row) {
2529 return emitOpError("expected layout to be row for 8x16 matrix");
2530 }
2531 if (getEltType() != LdStMatrixEltType::B8X16_B4X16_P64 &&
2532 getEltType() != LdStMatrixEltType::B8X16_B6X16_P32) {
2533 return emitOpError("expected element type to be b8x16.b4x16_p64 or "
2534 "b8x16.b6x16_p32 for 8x16 matrix");
2535 }
2536 } else if (m == 16 && n == 16) {
2537 if (num != 1 && num != 2) {
2538 return emitOpError("expected num attribute to be 1 or 2 for 16x16 "
2539 "matrix");
2540 }
2541 if (getLayout() != MMALayout::col) {
2542 return emitOpError("expected layout to be col for 16x16 matrix");
2543 }
2544 if (getEltType() != LdStMatrixEltType::B8 &&
2545 getEltType() != LdStMatrixEltType::B8X16_B4X16_P64 &&
2546 getEltType() != LdStMatrixEltType::B8X16_B6X16_P32) {
2547 return emitOpError("expected element type to be b8, b8x16.b4x16_p64 or "
2548 "b8x16.b6x16_p32 for 16x16 matrix");
2549 }
2550 } else {
2551 return emitOpError("expected shape to be 8x8, 8x16 or 16x16");
2552 }
2553
2554 Type i32 = IntegerType::get(getContext(), 32);
2555 uint32_t numElements = (m == 16 && n == 16 ? num * 2 : num);
2556 if (numElements == 1 && getType() != i32)
2557 return emitOpError("expected destination type is i32");
2558 if (numElements == 2 || numElements == 4) {
2559 Type dstType = LLVM::LLVMStructType::getLiteral(
2560 getContext(), SmallVector<Type>(numElements, i32));
2561 if (getType() != dstType)
2562 return emitOpError("expected destination type is a structure of ")
2563 << numElements << " elements of type i32";
2564 }
2565
2566 return success();
2567}
2568
2569LogicalResult LdMatrixOp::inferReturnTypes(
2570 MLIRContext *context, std::optional<Location> location,
2571 LdMatrixOp::Adaptor adaptor, SmallVectorImpl<Type> &inferredReturnTypes) {
2572 uint32_t num = adaptor.getNum();
2573 uint32_t m = adaptor.getShape().getM();
2574 uint32_t n = adaptor.getShape().getN();
2575 uint32_t numElements = (m == 16 && n == 16) ? num * 2 : num;
2576
2577 Type i32 = IntegerType::get(context, 32);
2578 if (numElements == 1)
2579 inferredReturnTypes.push_back(i32);
2580 else
2581 inferredReturnTypes.push_back(LLVM::LLVMStructType::getLiteral(
2582 context, SmallVector<Type>(numElements, i32)));
2583 return success();
2584}
2585
2586LogicalResult NVVM::StMatrixOp::verify() {
2587 int numMatrix = getSources().size();
2588 if (numMatrix != 1 && numMatrix != 2 && numMatrix != 4)
2589 return emitOpError("expected num attribute to be 1, 2 or 4");
2590
2591 int m = getShape().getM(), n = getShape().getN();
2592 if (m == 8 && n == 8) {
2593 if (getEltType() != NVVM::LdStMatrixEltType::B16) {
2594 return emitOpError("expected element type to be B16 for 8x8 matrix");
2595 }
2596 } else if (m == 16 && n == 8) {
2597 if (getEltType() != NVVM::LdStMatrixEltType::B8) {
2598 return emitOpError("expected element type to be B8 for 16x8 matrix");
2599 }
2600 if (getLayout() != NVVM::MMALayout::col) {
2601 return emitOpError("expected layout to be col for 16x8 matrix");
2602 }
2603 } else {
2604 return emitOpError("expected shape to be 8x8 or 16x8");
2605 }
2606
2607 return success();
2608}
2609
2610LogicalResult NVVM::MovMatrixOp::verify() {
2611 int m = getShape().getM(), n = getShape().getN();
2612 if (m != 8 || n != 8)
2613 return emitOpError("expected shape to be 8x8");
2614 if (getLayout() != NVVM::MMALayout::col)
2615 return emitOpError("expected layout to be col");
2616 if (getEltType() != NVVM::LdStMatrixEltType::B16)
2617 return emitOpError("expected element type to be b16");
2618 return success();
2619}
2620
2621static FailureOr<int> getAllowedSizeK(NVVM::WGMMATypes typeA) {
2622 if (typeA == NVVM::WGMMATypes::tf32)
2623 return 8;
2624 if (typeA == NVVM::WGMMATypes::f16 || typeA == NVVM::WGMMATypes::bf16)
2625 return 16;
2626 if (typeA == NVVM::WGMMATypes::s8 || typeA == NVVM::WGMMATypes::u8)
2627 return 32;
2628 if (typeA == NVVM::WGMMATypes::e4m3 || typeA == NVVM::WGMMATypes::e5m2)
2629 return 32;
2630 if (typeA == NVVM::WGMMATypes::b1)
2631 return 256;
2632 return failure();
2633}
2634
2635static LogicalResult isAllowedWGMMADataType(NVVM::WGMMATypes typeD,
2636 NVVM::WGMMATypes typeA,
2637 NVVM::WGMMATypes typeB) {
2638 switch (typeA) {
2639 case NVVM::WGMMATypes::f16:
2640 if ((typeD == NVVM::WGMMATypes::f32 || typeD == NVVM::WGMMATypes::f16) &&
2641 typeB == NVVM::WGMMATypes::f16)
2642 return success();
2643 break;
2644 case NVVM::WGMMATypes::tf32:
2645 if (typeD == NVVM::WGMMATypes::f32 && typeB == NVVM::WGMMATypes::tf32)
2646 return success();
2647 break;
2648 case NVVM::WGMMATypes::u8:
2649 case NVVM::WGMMATypes::s8:
2650 if (typeD == NVVM::WGMMATypes::s32 &&
2651 (typeB == NVVM::WGMMATypes::u8 || typeB == NVVM::WGMMATypes::s8))
2652 return success();
2653 break;
2654 case NVVM::WGMMATypes::b1:
2655 if (typeD == NVVM::WGMMATypes::s32 && typeB == NVVM::WGMMATypes::b1)
2656 return success();
2657 break;
2658 case NVVM::WGMMATypes::bf16:
2659 if ((typeD == NVVM::WGMMATypes::f32 || typeD == NVVM::WGMMATypes::f16) &&
2660 typeB == NVVM::WGMMATypes::bf16)
2661 return success();
2662 break;
2663 case NVVM::WGMMATypes::e4m3:
2664 case NVVM::WGMMATypes::e5m2:
2665 if ((typeD == NVVM::WGMMATypes::f32 || typeD == NVVM::WGMMATypes::f16) &&
2666 (typeB == NVVM::WGMMATypes::e5m2 || typeB == NVVM::WGMMATypes::e4m3))
2667 return success();
2668 break;
2669 case WGMMATypes::f32:
2670 case WGMMATypes::s32:
2671 llvm_unreachable("unsupported input types");
2672 break;
2673 }
2674 return failure();
2675}
2676
2677static LogicalResult isAllowedSizeN(int sizeN, NVVM::WGMMATypes typeA) {
2678 SmallVector<int> allowedN = {8, 16, 24, 32, 40, 48, 56, 64,
2679 72, 80, 88, 96, 104, 112, 120, 128,
2680 136, 144, 152, 160, 168, 176, 184, 192,
2681 200, 208, 216, 224, 232, 240, 248, 256};
2682 SmallVector<int> allowedNshort = {8, 16, 24, 32, 48, 64,
2683 80, 96, 112, 128, 144, 160,
2684 176, 192, 208, 224, 240, 256};
2685 switch (typeA) {
2686 case WGMMATypes::f16:
2687 case WGMMATypes::tf32:
2688 case WGMMATypes::bf16:
2689 case WGMMATypes::e4m3:
2690 case WGMMATypes::e5m2:
2691 if (llvm::is_contained(allowedN, sizeN))
2692 return success();
2693 break;
2694 case WGMMATypes::u8:
2695 case WGMMATypes::s8:
2696 case WGMMATypes::b1:
2697 if (llvm::is_contained(allowedNshort, sizeN))
2698 return success();
2699 break;
2700 case WGMMATypes::f32:
2701 case WGMMATypes::s32:
2702 llvm_unreachable("unsupported input types");
2703 break;
2704 }
2705 return failure();
2706}
2707
2708LogicalResult NVVM::WgmmaMmaAsyncOp::verify() {
2709 Value outValue = getResults();
2710 auto stype = dyn_cast<LLVM::LLVMStructType>(outValue.getType());
2711 if (!stype)
2712 return emitOpError() << "expected results to be struct";
2713 int outputSize = stype.getBody().size();
2714 WGMMATypes typeD = getTypeD();
2715 WGMMATypes typeA = getTypeA();
2716 WGMMATypes typeB = getTypeB();
2717
2718 for (Type t : stype.getBody()) {
2719 if (t != stype.getBody().front())
2720 return emitOpError()
2721 << "all elements in struct must be same type but there is " << t;
2722 }
2723
2724 if (typeD != WGMMATypes::f32 && typeD != WGMMATypes::f16 &&
2725 typeD != WGMMATypes::s32) {
2726 return emitOpError() << "does not support the given output type " << typeD;
2727 }
2728 if (typeD == WGMMATypes::s32 &&
2729 (getScaleA() == WGMMAScaleIn::neg || getScaleB() == WGMMAScaleIn::neg)) {
2730 return emitOpError() << "has s32 output, scaleA and scaleB cannot be neg";
2731 }
2732
2733 if (failed(isAllowedWGMMADataType(typeD, typeA, typeB))) {
2734 return emitOpError() << typeD << " += " << typeA << " * " << typeB
2735 << ", it is not supported.";
2736 }
2737
2738 // Check M
2739 if (getShape().getM() != 64)
2740 return emitOpError() << "shape 'm' must be 64";
2741
2742 // Check K
2743 FailureOr<int> allowedK = getAllowedSizeK(typeA);
2744 if (failed(allowedK) || allowedK.value() != getShape().getK())
2745 return emitOpError() << "shape 'k' must be " << allowedK.value()
2746 << " for input type " << typeA;
2747
2748 // Check N
2749 if (failed(isAllowedSizeN(getShape().getN(), typeA))) {
2750 return emitOpError() << "has input type " << typeA << " n is set to "
2751 << getShape().getN() << ", it is not supported.";
2752 }
2753
2754 // Check transpose (only available for f16/bf16)
2755 // Matrices A should be stored in row-major and B in column-major.
2756 // Only f16/bf16 matrices can be stored in either column-major or row-major
2757 // by setting the transpose value(imm-trans-a,imm-trans-b) in PTX code.
2758 if ((typeA != WGMMATypes::f16 && typeA != WGMMATypes::bf16) &&
2759 (getLayoutA() == mlir::NVVM::MMALayout::col ||
2760 getLayoutB() == mlir::NVVM::MMALayout::row)) {
2761 return emitOpError()
2762 << "given layouts layout_a = " << getLayoutA()
2763 << " and layout_b = " << getLayoutB() << " for input types " << typeA
2764 << " and " << typeB
2765 << " requires transpose. However, this is only supported for: "
2766 << MMATypes::f16 << " and " << MMATypes::bf16;
2767 }
2768
2769 // Check result registers
2770 int expectedOutput = 0;
2771 if (typeD == WGMMATypes::f32 || typeD == WGMMATypes::s32)
2772 expectedOutput = getShape().getN() / 2;
2773 if (typeD == WGMMATypes::f16)
2774 expectedOutput = getShape().getN() / 4;
2775 if (outputSize != expectedOutput) {
2776 return emitOpError() << "results " << expectedOutput
2777 << ", however output struct has " << outputSize
2778 << " elements";
2779 }
2780 // Check satfinite (only available for s32 accumulator)
2781 if (typeD != WGMMATypes::s32 &&
2782 getSatfinite().value_or(NVVM::MMAIntOverflow::wrapped) ==
2783 NVVM::MMAIntOverflow::satfinite) {
2784 return emitOpError()
2785 << " `satfinite` can be only used with s32 accumulator, however "
2786 "the current accumulator is "
2787 << typeD;
2788 }
2789
2790 return success();
2791}
2792
2793std::string NVVM::WgmmaMmaAsyncOp::getPtx() {
2794
2795 int m = getShape().getM(), n = getShape().getN(), k = getShape().getK();
2796 bool isF16 = getTypeA() == WGMMATypes::f16 || getTypeA() == WGMMATypes::bf16;
2797
2798 StringRef outputTypeName = stringifyWGMMATypes(getTypeD());
2799
2800 int expectedOutputRegisters = 0;
2801 if (getTypeD() == WGMMATypes::f16)
2802 expectedOutputRegisters = getShape().getN() / 4;
2803 else
2804 expectedOutputRegisters = getShape().getN() / 2;
2805
2806 std::string ptx;
2807 llvm::raw_string_ostream ss(ptx);
2808
2809 ss << "{\n"
2810 ".reg .pred p;\n"
2811 "setp.ne.b32 p, $"
2812 << ((expectedOutputRegisters * 2) + 2)
2813 << ", 0;\n"
2814 "wgmma.mma_async.sync.aligned.m"
2815 << m << "n" << n << "k" << k << "." << outputTypeName << "." << getTypeA()
2816 << "." << getTypeB();
2817 if (getSatfinite().value_or(NVVM::MMAIntOverflow::wrapped) ==
2818 NVVM::MMAIntOverflow::satfinite)
2819 ss << ".satfinite";
2820 ss << " {";
2821 int regCnt = 0;
2822 for (; regCnt < expectedOutputRegisters; ++regCnt) {
2823 ss << "$" << regCnt;
2824 if (regCnt != expectedOutputRegisters - 1)
2825 ss << ", ";
2826 }
2827
2828 ss << "},";
2829 // Need to map read/write registers correctly.
2830 regCnt = (regCnt * 2);
2831 ss << " $" << (regCnt) << ","
2832 << " $" << (regCnt + 1) << ","
2833 << " p";
2834 if (getTypeD() != WGMMATypes::s32) {
2835 ss << ", $" << (regCnt + 3) << ", $" << (regCnt + 4);
2836 }
2837 // Don't add transpose parameters unless needed.
2838 if (isF16) {
2839 ss << ", $" << (regCnt + 5) << ", $" << (regCnt + 6);
2840 }
2841 ss << ";\n"
2842 << "}\n";
2843 return ptx;
2844}
2845
2846bool NVVM::WgmmaMmaAsyncOp::getAsmValues(
2847 RewriterBase &rewriter,
2848 llvm::SmallVectorImpl<std::pair<mlir::Value, mlir::NVVM::PTXRegisterMod>>
2849 &asmValues) {
2850 bool isF16 = getTypeA() == WGMMATypes::f16 || getTypeA() == WGMMATypes::bf16;
2851 if (getResults())
2852 asmValues.push_back({getResults(), mlir::NVVM::PTXRegisterMod::Write});
2853 if (getInouts())
2854 asmValues.push_back({getInouts(), mlir::NVVM::PTXRegisterMod::ReadWrite});
2855 asmValues.push_back({getDescriptorA(), mlir::NVVM::PTXRegisterMod::Read});
2856 asmValues.push_back({getDescriptorB(), mlir::NVVM::PTXRegisterMod::Read});
2857 asmValues.push_back({makeConstantI32(rewriter, static_cast<int>(getScaleD())),
2859 if (getTypeD() != WGMMATypes::s32) {
2860 asmValues.push_back(
2861 {makeConstantI32(rewriter,
2862 getScaleA() == NVVM::WGMMAScaleIn::neg ? -1 : 1),
2864 asmValues.push_back(
2865 {makeConstantI32(rewriter,
2866 getScaleB() == NVVM::WGMMAScaleIn::neg ? -1 : 1),
2868 }
2869 if (isF16) {
2870 asmValues.push_back(
2871 {makeConstantI32(rewriter, static_cast<int>(getLayoutA())),
2873 asmValues.push_back(
2874 {makeConstantI32(rewriter, 1 - static_cast<int>(getLayoutB())),
2876 }
2877 return true; // Has manual mapping
2878}
2879
2880LogicalResult NVVM::FenceProxyOp::verify() {
2881 if (getKind() == NVVM::ProxyKind::async_shared && !getSpace().has_value()) {
2882 return emitOpError() << "async_shared fence requires space attribute";
2883 }
2884 if (getKind() != NVVM::ProxyKind::async_shared && getSpace().has_value()) {
2885 return emitOpError() << "only async_shared fence can have space attribute";
2886 }
2887 return success();
2888}
2889
2890LogicalResult NVVM::FenceProxyAcquireOp::verify() {
2891 if (getFromProxy() != NVVM::ProxyKind::GENERIC)
2892 return emitOpError("uni-directional proxies only support generic for "
2893 "from_proxy attribute");
2894
2895 if (getToProxy() != NVVM::ProxyKind::TENSORMAP)
2896 return emitOpError("uni-directional proxies only support tensormap "
2897 "for to_proxy attribute");
2898 return success();
2899}
2900
2901LogicalResult NVVM::FenceProxyReleaseOp::verify() {
2902 if (getFromProxy() != NVVM::ProxyKind::GENERIC)
2903 return emitOpError("uni-directional proxies only support generic for "
2904 "from_proxy attribute");
2905
2906 if (getToProxy() != NVVM::ProxyKind::TENSORMAP)
2907 return emitOpError("uni-directional proxies only support tensormap "
2908 "for to_proxy attribute");
2909 return success();
2910}
2911
2912LogicalResult NVVM::FenceProxySyncRestrictOp::verify() {
2913 if (getFromProxy() != NVVM::ProxyKind::GENERIC)
2914 return emitOpError("only generic is support for from_proxy attribute");
2915
2916 if (getToProxy() != NVVM::ProxyKind::async)
2917 return emitOpError("only async is supported for to_proxy attribute");
2918 return success();
2919}
2920
2921LogicalResult NVVM::SetMaxRegisterOp::verify() {
2922 if (getRegCount() % 8)
2923 return emitOpError("new register size must be multiple of 8");
2924 if (getRegCount() < 24 || getRegCount() > 256)
2925 return emitOpError("new register size must be in between 24 to 256");
2926 return success();
2927}
2928
2929LogicalResult NVVM::BarrierOp::verify() {
2930 if (getNumberOfThreads() && !getBarrierId())
2931 return emitOpError(
2932 "barrier id is missing, it should be set between 0 to 15");
2933
2934 if (getBarrierId() && (getReductionOp() || getReductionPredicate()))
2935 return emitOpError("reduction are only available when id is 0");
2936
2937 if ((getReductionOp() && !getReductionPredicate()) ||
2938 (!getReductionOp() && getReductionPredicate()))
2939 return emitOpError("reduction predicate and reduction operation must be "
2940 "specified together");
2941
2942 return success();
2943}
2944
2945LogicalResult BarrierOp::inferReturnTypes(
2946 MLIRContext *context, std::optional<Location> location,
2947 BarrierOp::Adaptor adaptor, SmallVectorImpl<Type> &inferredReturnTypes) {
2948 if (adaptor.getReductionOp())
2949 inferredReturnTypes.push_back(IntegerType::get(context, 32));
2950 return success();
2951}
2952
2953bool BarrierOp::isCompatibleReturnTypes(TypeRange l, TypeRange r) {
2955}
2956
2957LogicalResult NVVM::Tcgen05CpOp::verify() {
2958 auto mc = getMulticast();
2959
2960 using SH = Tcgen05CpShape;
2961 using MC = Tcgen05CpMulticast;
2962 switch (getShape()) {
2963 case SH::SHAPE_128x256b:
2964 case SH::SHAPE_128x128b:
2965 case SH::SHAPE_4x256b:
2966 if (mc != MC::NONE)
2967 return emitError("Invalid multicast type for tcgen05.cp Op");
2968 break;
2969 case SH::SHAPE_64x128b:
2970 if (mc != MC::WARPX2_01_23 && mc != MC::WARPX2_02_13)
2971 return emitError("Shape 64x128b requires multicast warpx2_01_23 or "
2972 "warpx2_02_13 for tcgen05.cp Op");
2973 break;
2974 case SH::SHAPE_32x128b:
2975 if (mc != MC::WARPX4)
2976 return emitError(
2977 "Shape 32x128b requires multicast warpx4 for tcgen05.cp Op");
2978 break;
2979 }
2980 return success();
2981}
2982
2983LogicalResult NVVM::MatchSyncOp::verify() {
2984 if (getKind() == NVVM::MatchSyncKind::all) {
2985 auto type = llvm::dyn_cast<LLVM::LLVMStructType>(getType());
2986 if (!type || type.getBody().size() != 2 ||
2987 !type.getBody()[0].isInteger(32) || !type.getBody()[1].isInteger(1)) {
2988 return emitOpError("match.sync 'all' returns a two element struct with "
2989 "first element as i32 and second element as i1");
2990 }
2991 } else {
2992 if (!getType().isInteger(32)) {
2993 return emitOpError("match.sync 'any' returns an i32");
2994 }
2995 }
2996 return success();
2997}
2998
2999LogicalResult MatchSyncOp::inferReturnTypes(
3000 MLIRContext *context, std::optional<Location> location,
3001 MatchSyncOp::Adaptor adaptor, SmallVectorImpl<Type> &inferredReturnTypes) {
3002 if (adaptor.getKind() == NVVM::MatchSyncKind::all)
3003 inferredReturnTypes.push_back(LLVM::LLVMStructType::getLiteral(
3004 context,
3005 {IntegerType::get(context, 32), IntegerType::get(context, 1)}));
3006 else
3007 inferredReturnTypes.push_back(IntegerType::get(context, 32));
3008 return success();
3009}
3010
3011LogicalResult NVVM::VoteSyncOp::verify() {
3012 if (getKind() == NVVM::VoteSyncKind::ballot) {
3013 if (!getType().isInteger(32)) {
3014 return emitOpError("vote.sync 'ballot' returns an i32");
3015 }
3016 } else {
3017 if (!getType().isInteger(1)) {
3018 return emitOpError("vote.sync 'any', 'all' and 'uni' returns an i1");
3019 }
3020 }
3021 return success();
3022}
3023
3024LogicalResult VoteSyncOp::inferReturnTypes(
3025 MLIRContext *context, std::optional<Location> location,
3026 VoteSyncOp::Adaptor adaptor, SmallVectorImpl<Type> &inferredReturnTypes) {
3027 unsigned width = adaptor.getKind() == NVVM::VoteSyncKind::ballot ? 32 : 1;
3028 inferredReturnTypes.push_back(IntegerType::get(context, width));
3029 return success();
3030}
3031
3032LogicalResult NVVM::PrefetchOp::verify() {
3033 using MemSpace = NVVM::NVVMMemorySpace;
3034 using CacheLevel = NVVM::PrefetchCacheLevel;
3035
3036 unsigned addressSpace =
3037 llvm::cast<LLVM::LLVMPointerType>(getAddr().getType()).getAddressSpace();
3038 std::optional<NVVM::CacheEvictionPriority> evictPriority = getEvictPriority();
3039 std::optional<NVVM::PrefetchCacheLevel> cacheLevel = getCacheLevel();
3040
3041 if (getTensormap() && cacheLevel)
3042 return emitOpError("cannot specify both tensormap and cache level");
3043
3044 if (getTensormap()) {
3045 if (addressSpace != MemSpace::Generic &&
3046 addressSpace != MemSpace::Constant) {
3047 return emitOpError(
3048 "prefetch tensormap requires a generic or constant pointer");
3049 }
3050
3051 if (evictPriority) {
3052 return emitOpError(
3053 "prefetch tensormap does not support eviction priority");
3054 }
3055
3056 if (getInParamSpace() && addressSpace != MemSpace::Generic) {
3057 return emitOpError(
3058 "in_param_space can only be specified for a generic pointer");
3059 }
3060
3061 } else if (cacheLevel) {
3062 if (addressSpace != MemSpace::Generic && addressSpace != MemSpace::Global &&
3063 addressSpace != MemSpace::Local) {
3064 return emitOpError("prefetch to cache level requires a generic, global, "
3065 "or local pointer");
3066 }
3067
3068 if (getUniform()) {
3069 if (*cacheLevel != CacheLevel::L1) {
3070 return emitOpError(
3071 "unsupported cache level, the only supported uniform "
3072 "cache level is L1");
3073 }
3074
3075 if (addressSpace != MemSpace::Generic) {
3076 return emitOpError(
3077 "prefetch to uniform cache requires a generic pointer");
3078 }
3079 }
3080
3081 if (evictPriority) {
3082 if (*cacheLevel != CacheLevel::L2)
3083 return emitOpError(
3084 "cache eviction priority supported only for cache level L2");
3085
3086 if (addressSpace != MemSpace::Global)
3087 return emitOpError("cache eviction priority requires a global pointer");
3088
3089 if (*evictPriority != NVVM::CacheEvictionPriority::EvictNormal &&
3090 *evictPriority != NVVM::CacheEvictionPriority::EvictLast)
3091 return emitOpError(
3092 "unsupported cache eviction priority, only evict_last and "
3093 "evict_normal are supported");
3094 }
3095
3096 if (getPredicate())
3097 return emitOpError("predicate supported only on prefetch tensormap");
3098
3099 } else {
3100 return emitOpError(
3101 "requires specification of either cache level or tensormap");
3102 }
3103
3104 return success();
3105}
3106
3107LogicalResult NVVM::ClusterLaunchControlQueryCancelOp::verify() {
3108 switch (getQueryType()) {
3109 case NVVM::ClusterLaunchControlQueryType::IS_CANCELED:
3110 if (!getType().isInteger(1))
3111 return emitOpError("is_canceled query type returns an i1");
3112 break;
3113 case NVVM::ClusterLaunchControlQueryType::GET_FIRST_CTA_ID_X:
3114 case NVVM::ClusterLaunchControlQueryType::GET_FIRST_CTA_ID_Y:
3115 case NVVM::ClusterLaunchControlQueryType::GET_FIRST_CTA_ID_Z:
3116 if (!getType().isInteger(32)) {
3117 return emitOpError("get_first_cta_id_x, get_first_cta_id_y, "
3118 "get_first_cta_id_z query types return an i32");
3119 }
3120 break;
3121 }
3122 return success();
3123}
3124
3125LogicalResult ClusterLaunchControlQueryCancelOp::inferReturnTypes(
3126 MLIRContext *context, std::optional<Location> location,
3127 ClusterLaunchControlQueryCancelOp::Adaptor adaptor,
3128 SmallVectorImpl<Type> &inferredReturnTypes) {
3129 unsigned width =
3130 adaptor.getQueryType() == NVVM::ClusterLaunchControlQueryType::IS_CANCELED
3131 ? 1
3132 : 32;
3133 inferredReturnTypes.push_back(IntegerType::get(context, width));
3134 return success();
3135}
3136
3137LogicalResult NVVM::ReduxOp::verify() {
3138 mlir::Type reduxType = getType();
3139
3140 if (!reduxType.isF32()) {
3141 if (getAbs())
3142 return emitOpError("abs attribute is supported only for f32 type");
3143 if (getNan())
3144 return emitOpError("nan attribute is supported only for f32 type");
3145 }
3146
3147 NVVM::ReductionKind kind = getKind();
3148 switch (kind) {
3149 case NVVM::ReductionKind::ADD:
3150 case NVVM::ReductionKind::AND:
3151 case NVVM::ReductionKind::OR:
3152 case NVVM::ReductionKind::XOR:
3153 case NVVM::ReductionKind::MAX:
3154 case NVVM::ReductionKind::MIN:
3155 case NVVM::ReductionKind::UMAX:
3156 case NVVM::ReductionKind::UMIN:
3157 if (!reduxType.isInteger(32))
3158 return emitOpError("'")
3159 << kind << "' reduction kind unsupported with " << reduxType
3160 << " type. Only supported type is 'i32'.";
3161 break;
3162 case NVVM::ReductionKind::FMIN:
3163 case NVVM::ReductionKind::FMAX:
3164 if (!reduxType.isF32())
3165 return emitOpError("'")
3166 << kind << "' reduction kind unsupported with " << reduxType
3167 << " type. Only supported type is 'f32'.";
3168 break;
3169 }
3170
3171 return success();
3172}
3173
3174LogicalResult NVVM::TensormapReplaceOp::verify() {
3175 auto ord = getOrd();
3176 Value newVal = getNewValue();
3177 auto newValAttr = getNewValueAttr();
3178 auto fieldName = stringifyEnum(getField());
3179
3180 if (ord && !llvm::is_contained({NVVM::TensormapField::BOX_DIM,
3181 NVVM::TensormapField::GLOBAL_DIM,
3182 NVVM::TensormapField::GLOBAL_STRIDE,
3183 NVVM::TensormapField::ELEMENT_STRIDE},
3184 getField()))
3185 return emitOpError("ordinal is not supported for ")
3186 << fieldName << " field";
3187
3188 auto invalidNewVal = [&](llvm::Twine type) -> std::string {
3189 return llvm::Twine("new_value must be specified and must be an " + type +
3190 " for " + llvm::Twine(fieldName) + " field")
3191 .str();
3192 };
3193
3194 auto invalidNewValAttr = [&]() -> std::string {
3195 return (llvm::Twine(
3196 "new_value_attr must be specified and must be a valid ") +
3197 llvm::Twine(fieldName) + " attribute for " + fieldName + " field")
3198 .str();
3199 };
3200
3201 switch (getField()) {
3202 case NVVM::TensormapField::GLOBAL_ADDRESS:
3203 if (!(newVal && newVal.getType().isInteger(64)))
3204 return emitOpError(invalidNewVal("i64"));
3205 break;
3206 case NVVM::TensormapField::RANK:
3207 if (!(newVal && newVal.getType().isInteger(32)))
3208 return emitOpError(invalidNewVal("i32"));
3209 break;
3210 case NVVM::TensormapField::GLOBAL_STRIDE:
3211 if (!ord)
3212 return emitOpError("ordinal is required for global_stride field");
3213 if (!(newVal && newVal.getType().isInteger(64)))
3214 return emitOpError(invalidNewVal("i64"));
3215 break;
3216 case NVVM::TensormapField::BOX_DIM:
3217 case NVVM::TensormapField::GLOBAL_DIM:
3218 case NVVM::TensormapField::ELEMENT_STRIDE:
3219 if (!ord)
3220 return emitOpError("ordinal is required for ")
3221 << stringifyEnum(getField()) << " field";
3222 if (!(newVal && newVal.getType().isInteger(32)))
3223 return emitOpError(invalidNewVal("i32"));
3224 break;
3225 case NVVM::TensormapField::ELEMTYPE:
3226 if (!(newValAttr && llvm::isa<TensormapElemtypeAttr>(*newValAttr)))
3227 return emitOpError(invalidNewValAttr());
3228 break;
3229 case NVVM::TensormapField::INTERLEAVE_LAYOUT:
3230 if (!(newValAttr && llvm::isa<TensormapInterleaveLayoutAttr>(*newValAttr)))
3231 return emitOpError(invalidNewValAttr());
3232 break;
3233 case NVVM::TensormapField::SWIZZLE_MODE:
3234 if (!(newValAttr && llvm::isa<TensormapSwizzleModeAttr>(*newValAttr)))
3235 return emitOpError(invalidNewValAttr());
3236 break;
3237 case NVVM::TensormapField::SWIZZLE_ATOMICITY:
3238 if (!(newValAttr && llvm::isa<TensormapSwizzleAtomicityAttr>(*newValAttr)))
3239 return emitOpError(invalidNewValAttr());
3240 break;
3241 case NVVM::TensormapField::FILL_MODE:
3242 if (!(newValAttr && llvm::isa<TensormapFillModeAttr>(*newValAttr)))
3243 return emitOpError(invalidNewValAttr());
3244 break;
3245 }
3246
3247 return success();
3248}
3249
3250template <typename OpType>
3251static LogicalResult verifyAddSubFOp(OpType op) {
3252 mlir::NVVM::FPRoundingMode rndMode = op.getRnd();
3253 mlir::NVVM::SaturationMode satMode = op.getSat();
3254 bool isFTZ = op.getFtz();
3255
3256 mlir::Type opType = op.getRes().getType();
3257 mlir::Type opBaseType = isa<VectorType>(opType)
3258 ? cast<VectorType>(opType).getElementType()
3259 : opType;
3260
3261 if (opBaseType.isF64() && (satMode != NVVM::SaturationMode::NONE || isFTZ))
3262 return op.emitOpError("FTZ and saturation are not supported for "
3263 "additions/subtractions involving f64 type");
3264
3265 if (opBaseType.isF16() && !(rndMode == NVVM::FPRoundingMode::RN ||
3266 rndMode == NVVM::FPRoundingMode::NONE))
3267 return op.emitOpError("only RN rounding mode is supported for f16 and "
3268 "vector<2xf16> additions/subtractions");
3269
3270 if (opBaseType.isBF16()) {
3271 if (rndMode != NVVM::FPRoundingMode::RN &&
3272 rndMode != NVVM::FPRoundingMode::NONE)
3273 return op.emitOpError("only RN rounding mode is supported for bf16 and "
3274 "vector<2xbf16> additions/subtractions");
3275 if (satMode != NVVM::SaturationMode::NONE || isFTZ)
3276 return op.emitOpError("FTZ and saturation are not supported for bf16 and "
3277 "vector<2xbf16> additions/subtractions");
3278 }
3279
3280 // FIXME: This is a temporary check disallowing lowering to add.rn.ftz.f16(x2)
3281 // PTX instructions since the corresponding LLVM intrinsic is missing. This
3282 // should be removed once the intrinsics for f16 addition (with FTZ only) are
3283 // available.
3284 if (opBaseType.isF16() && isFTZ && satMode == NVVM::SaturationMode::NONE)
3285 return op.emitOpError("FTZ with no saturation is not supported for f16 and "
3286 "vector<2xf16> additions/subtractions");
3287
3288 return success();
3289}
3290
3291LogicalResult NVVM::AddFOp::verify() { return verifyAddSubFOp<AddFOp>(*this); }
3292
3293LogicalResult NVVM::SubFOp::verify() { return verifyAddSubFOp<SubFOp>(*this); }
3294
3295LogicalResult NVVM::FmaOp::verify() {
3296 auto opType = getRes().getType();
3297 mlir::NVVM::FPRoundingMode rndMode = getRnd();
3298 mlir::NVVM::SaturationMode satMode = getSat();
3299 bool isFTZ = getFtz();
3300 bool isRelu = getRelu();
3301 bool hasOOB = getOob();
3302
3303 auto getBaseFType = [](Type type) -> Type {
3304 if (isa<VectorType>(type))
3305 return cast<VectorType>(type).getElementType();
3306 return type;
3307 };
3308
3309 auto opBaseType = getBaseFType(opType);
3310
3311 if (rndMode == NVVM::FPRoundingMode::NONE)
3312 return emitOpError("rounding mode must be specified");
3313
3314 if (isRelu && satMode == NVVM::SaturationMode::SAT)
3315 return emitOpError("relu and saturation are not supported together");
3316
3317 if (hasOOB && (satMode == NVVM::SaturationMode::SAT || isFTZ))
3318 return emitOpError("oob is not supported with saturation or FTZ");
3319
3320 if (!(opBaseType.isF16() || opBaseType.isBF16()) && (isRelu || hasOOB))
3321 return emitOpError("relu and oob are only supported for f16 and bf16");
3322
3323 if (opBaseType.isF64() && (satMode != NVVM::SaturationMode::NONE || isFTZ))
3324 return emitOpError("FTZ and saturation are not supported for f64 type");
3325
3326 if (opBaseType.isF16() && rndMode != NVVM::FPRoundingMode::RN)
3327 return emitOpError(
3328 "only RN rounding mode is supported for f16 and vector<2xf16>");
3329
3330 if (opBaseType.isBF16()) {
3331 if (rndMode != NVVM::FPRoundingMode::RN)
3332 return emitOpError(
3333 "only RN rounding mode is supported for bf16 and vector<2xbf16>");
3334 if (satMode != NVVM::SaturationMode::NONE || isFTZ)
3335 return emitOpError(
3336 "FTZ and saturation are not supported for bf16 and vector<2xbf16>");
3337 }
3338
3339 return success();
3340}
3341
3342/// Packs the given `field` into the `result`.
3343/// The `result` is 64-bits and each `field` can be 32-bits or narrower.
3344static llvm::Value *
3345packValInto64Bits(llvm::IRBuilderBase &builder,
3346 llvm::Value *result, // the `result` (unset bits are zero)
3347 llvm::Value *field, // `field` to pack into `result`
3348 unsigned sizeInBits, // Size of `field` in bits
3349 unsigned start) { // Starting bit within `result`
3350 field = builder.CreateZExtOrBitCast(field, builder.getInt32Ty());
3351
3352 unsigned mask = (sizeInBits < 32 ? ((1u << sizeInBits) - 1) : 0xffffffffu);
3353 if (mask != 0xffffffffu)
3354 field = builder.CreateAnd(field, builder.getInt32(mask));
3355
3356 field = builder.CreateZExtOrBitCast(field, builder.getInt64Ty());
3357 field = builder.CreateShl(field, start);
3358
3359 return builder.CreateOr(result, field);
3360}
3361
3362void Tcgen05MmaSmemDescOp::createSmemDescriptor(Operation &op,
3364 llvm::IRBuilderBase &builder) {
3365 auto thisOp = cast<NVVM::Tcgen05MmaSmemDescOp>(op);
3366 llvm::Value *smemDesc = builder.getInt64(0);
3367
3368 smemDesc = packValInto64Bits(builder, smemDesc,
3369 mt.lookupValue(thisOp.getStartAddr()), 14, 0);
3370 smemDesc = packValInto64Bits(
3371 builder, smemDesc, mt.lookupValue(thisOp.getLeadingDimOffset()), 14, 16);
3372 smemDesc = packValInto64Bits(
3373 builder, smemDesc, mt.lookupValue(thisOp.getStrideDimOffset()), 14, 32);
3374
3375 smemDesc = packValInto64Bits(builder, smemDesc, builder.getInt32(1), 3, 46);
3376 smemDesc = packValInto64Bits(builder, smemDesc,
3377 mt.lookupValue(thisOp.getBaseOffset()), 3, 49);
3378 smemDesc = packValInto64Bits(
3379 builder, smemDesc, mt.lookupValue(thisOp.getLeadingDimMode()), 1, 52);
3380 smemDesc = packValInto64Bits(builder, smemDesc,
3381 mt.lookupValue(thisOp.getSwizzleMode()), 3, 61);
3382
3383 mt.mapValue(thisOp.getRes()) = smemDesc;
3384}
3385
3386//===----------------------------------------------------------------------===//
3387// getPtx methods
3388//===----------------------------------------------------------------------===//
3389
3390std::string NVVM::MBarrierInitOp::getPtx() {
3391 bool isShared = isPtrInSharedCTASpace(getAddr());
3392 return isShared ? std::string("mbarrier.init.shared.b64 [%0], %1;")
3393 : std::string("mbarrier.init.b64 [%0], %1;");
3394}
3395
3396std::string NVVM::MBarrierArriveExpectTxOp::getPtx() {
3397 bool isShared = isPtrInSharedCTASpace(getAddr());
3398 return isShared
3399 ? std::string("mbarrier.arrive.expect_tx.shared.b64 _, [%0], %1;")
3400 : std::string("mbarrier.arrive.expect_tx.b64 _, [%0], %1;");
3401}
3402
3403std::string NVVM::MBarrierTryWaitParityOp::getPtx() {
3404 bool isShared = isPtrInSharedCTASpace(getAddr());
3405 llvm::StringRef space = isShared ? ".shared" : "";
3406
3407 return llvm::formatv("{\n\t"
3408 ".reg .pred P1; \n\t"
3409 "LAB_WAIT: \n\t"
3410 "mbarrier.try_wait.parity{0}.b64 P1, [%0], %1, %2; \n\t"
3411 "@P1 bra.uni DONE; \n\t"
3412 "bra.uni LAB_WAIT; \n\t"
3413 "DONE: \n\t"
3414 "}",
3415 space);
3416}
3417
3418//===----------------------------------------------------------------------===//
3419// Canonicalization patterns
3420//===----------------------------------------------------------------------===//
3421
3424
3425 LogicalResult matchAndRewrite(SubFOp op,
3426 PatternRewriter &rewriter) const override {
3427 Location loc = op.getLoc();
3428 Value negRhs =
3429 LLVM::FNegOp::create(rewriter, loc, op.getRhs().getType(), op.getRhs());
3430
3431 rewriter.replaceOpWithNewOp<AddFOp>(op, op.getType(), op.getLhs(), negRhs,
3432 op.getRnd(), op.getSat(), op.getFtz());
3433 return success();
3434 }
3435};
3436
3437void SubFOp::getCanonicalizationPatterns(RewritePatternSet &patterns,
3438 MLIRContext *context) {
3439 patterns.add<ConvertFsubToFnegFadd>(context);
3440}
3441
3442//===----------------------------------------------------------------------===//
3443// getIntrinsicID/getIntrinsicIDAndArgs methods
3444//===----------------------------------------------------------------------===//
3445
3446mlir::NVVM::IDArgPair NVVM::BarrierOp::getIntrinsicIDAndArgs(
3447 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3448 auto thisOp = cast<NVVM::BarrierOp>(op);
3449 llvm::Value *barrierId = thisOp.getBarrierId()
3450 ? mt.lookupValue(thisOp.getBarrierId())
3451 : builder.getInt32(0);
3452 llvm::Intrinsic::ID id;
3453 llvm::SmallVector<llvm::Value *> args = {barrierId};
3454 if (thisOp.getNumberOfThreads()) {
3455 id = llvm::Intrinsic::nvvm_barrier_cta_sync_aligned_count;
3456 args.push_back(mt.lookupValue(thisOp.getNumberOfThreads()));
3457 } else if (thisOp.getReductionOp()) {
3458 switch (*thisOp.getReductionOp()) {
3459 case NVVM::BarrierReduction::AND:
3460 id = llvm::Intrinsic::nvvm_barrier_cta_red_and_aligned_all;
3461 break;
3462 case NVVM::BarrierReduction::OR:
3463 id = llvm::Intrinsic::nvvm_barrier_cta_red_or_aligned_all;
3464 break;
3465 case NVVM::BarrierReduction::POPC:
3466 id = llvm::Intrinsic::nvvm_barrier_cta_red_popc_aligned_all;
3467 break;
3468 }
3469 args.push_back(builder.CreateICmpNE(
3470 mt.lookupValue(thisOp.getReductionPredicate()), builder.getInt32(0)));
3471 } else {
3472 id = llvm::Intrinsic::nvvm_barrier_cta_sync_aligned_all;
3473 }
3474
3475 return {id, std::move(args)};
3476}
3477
3479CosOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
3480 llvm::IRBuilderBase &builder) {
3481 auto thisOp = cast<NVVM::CosOp>(op);
3482 llvm::Intrinsic::ID id = thisOp.getFtz()
3483 ? llvm::Intrinsic::nvvm_cos_approx_ftz_f
3484 : llvm::Intrinsic::nvvm_cos_approx_f;
3485 return {id, {mt.lookupValue(thisOp.getSrc())}};
3486}
3487
3489SinOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
3490 llvm::IRBuilderBase &builder) {
3491 auto thisOp = cast<NVVM::SinOp>(op);
3492 llvm::Intrinsic::ID id = thisOp.getFtz()
3493 ? llvm::Intrinsic::nvvm_sin_approx_ftz_f
3494 : llvm::Intrinsic::nvvm_sin_approx_f;
3495 return {id, {mt.lookupValue(thisOp.getSrc())}};
3496}
3497
3499Log2Op::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
3500 llvm::IRBuilderBase &builder) {
3501 auto thisOp = cast<NVVM::Log2Op>(op);
3502 llvm::Intrinsic::ID id = thisOp.getFtz()
3503 ? llvm::Intrinsic::nvvm_lg2_approx_ftz_f
3504 : llvm::Intrinsic::nvvm_lg2_approx_f;
3505 return {id, {mt.lookupValue(thisOp.getSrc())}};
3506}
3507
3509Ex2Op::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
3510 llvm::IRBuilderBase &builder) {
3511 auto thisOp = cast<NVVM::Ex2Op>(op);
3512 llvm::Intrinsic::ID id = thisOp.getFtz()
3513 ? llvm::Intrinsic::nvvm_ex2_approx_ftz
3514 : llvm::Intrinsic::nvvm_ex2_approx;
3515 return {id, {mt.lookupValue(thisOp.getSrc())}};
3516}
3517
3519PMEventOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
3520 llvm::IRBuilderBase &builder) {
3521 auto thisOp = cast<NVVM::PMEventOp>(op);
3522 llvm::Type *i16Ty = llvm::Type::getInt16Ty(mt.getLLVMContext());
3523
3524 // With event-id, mask is generated as (1 << event-id)
3525 llvm::Value *maskVal;
3526 if (auto eventAttr = thisOp.getEventIdAttr()) {
3527 uint16_t mask = static_cast<uint16_t>(1u << eventAttr.getInt());
3528 maskVal = llvm::ConstantInt::get(i16Ty, mask);
3529 } else {
3530 maskVal =
3531 llvm::ConstantInt::get(i16Ty, thisOp.getMaskedEventIdAttr().getValue());
3532 }
3533
3534 return {llvm::Intrinsic::nvvm_pm_event_mask, {maskVal}};
3535}
3536
3537mlir::NVVM::IDArgPair MBarrierInitOp::getIntrinsicIDAndArgs(
3538 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3539 auto thisOp = cast<NVVM::MBarrierInitOp>(op);
3540 bool isShared = isPtrInSharedCTASpace(thisOp.getAddr());
3541 llvm::Intrinsic::ID id = isShared ? llvm::Intrinsic::nvvm_mbarrier_init_shared
3542 : llvm::Intrinsic::nvvm_mbarrier_init;
3543
3544 // Fill the Intrinsic Args
3546 args.push_back(mt.lookupValue(thisOp.getAddr()));
3547 args.push_back(mt.lookupValue(thisOp.getCount()));
3548
3549 return {id, std::move(args)};
3550}
3551
3552mlir::NVVM::IDArgPair MBarrierInvalOp::getIntrinsicIDAndArgs(
3553 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3554 auto thisOp = cast<NVVM::MBarrierInvalOp>(op);
3555 bool isShared = isPtrInSharedCTASpace(thisOp.getAddr());
3556 llvm::Intrinsic::ID id = isShared
3557 ? llvm::Intrinsic::nvvm_mbarrier_inval_shared
3558 : llvm::Intrinsic::nvvm_mbarrier_inval;
3559
3560 return {id, {mt.lookupValue(thisOp.getAddr())}};
3561}
3562
3563mlir::NVVM::IDArgPair MBarrierExpectTxOp::getIntrinsicIDAndArgs(
3564 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3565 auto thisOp = cast<NVVM::MBarrierExpectTxOp>(op);
3566
3567 bool isClusterSpace = isPtrInSharedClusterSpace(thisOp.getAddr());
3568 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3569 // bit-0: Space
3570 // bit-1: Scope
3571 size_t index = ((isClusterScope ? 1 : 0) << 1) | (isClusterSpace ? 1 : 0);
3572
3573 static constexpr llvm::Intrinsic::ID IDs[] = {
3574 llvm::Intrinsic::nvvm_mbarrier_expect_tx_scope_cta_space_cta,
3575 llvm::Intrinsic::nvvm_mbarrier_expect_tx_scope_cta_space_cluster,
3576 llvm::Intrinsic::nvvm_mbarrier_expect_tx_scope_cluster_space_cta,
3577 llvm::Intrinsic::nvvm_mbarrier_expect_tx_scope_cluster_space_cluster};
3578
3579 // Fill the Intrinsic Args
3581 args.push_back(mt.lookupValue(thisOp.getAddr()));
3582 args.push_back(mt.lookupValue(thisOp.getTxcount()));
3583
3584 return {IDs[index], std::move(args)};
3585}
3586
3587mlir::NVVM::IDArgPair MBarrierCompleteTxOp::getIntrinsicIDAndArgs(
3588 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3589 auto thisOp = cast<NVVM::MBarrierCompleteTxOp>(op);
3590
3591 bool isClusterSpace = isPtrInSharedClusterSpace(thisOp.getAddr());
3592 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3593 // bit-0: Space
3594 // bit-1: Scope
3595 size_t index = ((isClusterScope ? 1 : 0) << 1) | (isClusterSpace ? 1 : 0);
3596
3597 static constexpr llvm::Intrinsic::ID IDs[] = {
3598 llvm::Intrinsic::nvvm_mbarrier_complete_tx_scope_cta_space_cta,
3599 llvm::Intrinsic::nvvm_mbarrier_complete_tx_scope_cta_space_cluster,
3600 llvm::Intrinsic::nvvm_mbarrier_complete_tx_scope_cluster_space_cta,
3601 llvm::Intrinsic::nvvm_mbarrier_complete_tx_scope_cluster_space_cluster};
3602
3603 // Fill the Intrinsic Args
3605 args.push_back(mt.lookupValue(thisOp.getAddr()));
3606 args.push_back(mt.lookupValue(thisOp.getTxcount()));
3607
3608 return {IDs[index], std::move(args)};
3609}
3610
3611mlir::NVVM::IDArgPair MBarrierArriveOp::getIntrinsicIDAndArgs(
3612 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3613 auto thisOp = cast<NVVM::MBarrierArriveOp>(op);
3614
3615 bool isClusterSpace = isPtrInSharedClusterSpace(thisOp.getAddr());
3616 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3617 // bit-0: Space
3618 // bit-1: Scope
3619 size_t index = ((isClusterScope ? 1 : 0) << 1) | (isClusterSpace ? 1 : 0);
3620
3621 static constexpr llvm::Intrinsic::ID IDs[] = {
3622 llvm::Intrinsic::nvvm_mbarrier_arrive_scope_cta_space_cta,
3623 llvm::Intrinsic::nvvm_mbarrier_arrive_scope_cta_space_cluster,
3624 llvm::Intrinsic::nvvm_mbarrier_arrive_scope_cluster_space_cta,
3625 llvm::Intrinsic::nvvm_mbarrier_arrive_scope_cluster_space_cluster};
3626 static constexpr llvm::Intrinsic::ID relaxedIDs[] = {
3627 llvm::Intrinsic::nvvm_mbarrier_arrive_relaxed_scope_cta_space_cta,
3628 llvm::Intrinsic::nvvm_mbarrier_arrive_relaxed_scope_cta_space_cluster,
3629 llvm::Intrinsic::nvvm_mbarrier_arrive_relaxed_scope_cluster_space_cta,
3630 llvm::Intrinsic::
3631 nvvm_mbarrier_arrive_relaxed_scope_cluster_space_cluster};
3632 auto id = thisOp.getRelaxed() ? relaxedIDs[index] : IDs[index];
3633
3634 // Tidy-up the Intrinsic Args
3635 bool needCast = isPtrInGenericSpace(thisOp.getAddr());
3636 llvm::Value *mbar = mt.lookupValue(thisOp.getAddr());
3637 if (needCast)
3638 mbar = castPtrToAddrSpace(builder, mbar, NVVMMemorySpace::Shared);
3639
3640 // We have the most basic mbarrier.arrive supported on sm_80.
3641 // It supports: Space=cta, scope=cta, No relaxed, No explicit count.
3642 // So, only for this combination use the legacy intrinsic.
3643 bool hasCount = static_cast<bool>(thisOp.getCount());
3644 if (!hasCount &&
3645 (id == llvm::Intrinsic::nvvm_mbarrier_arrive_scope_cta_space_cta))
3646 return {llvm::Intrinsic::nvvm_mbarrier_arrive_shared, {mbar}};
3647
3648 // When count is not explicitly specified, the default is 1.
3649 llvm::LLVMContext &ctx = mt.getLLVMContext();
3650 llvm::Value *count =
3651 hasCount ? mt.lookupValue(thisOp.getCount())
3652 : llvm::ConstantInt::get(llvm::Type::getInt32Ty(ctx), 1);
3653 return {id, {mbar, count}};
3654}
3655
3656mlir::NVVM::IDArgPair MBarrierArriveDropOp::getIntrinsicIDAndArgs(
3657 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3658 auto thisOp = cast<NVVM::MBarrierArriveDropOp>(op);
3659
3660 bool isClusterSpace = isPtrInSharedClusterSpace(thisOp.getAddr());
3661 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3662 // bit-0: Space
3663 // bit-1: Scope
3664 size_t index = ((isClusterScope ? 1 : 0) << 1) | (isClusterSpace ? 1 : 0);
3665
3666 static constexpr llvm::Intrinsic::ID IDs[] = {
3667 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_scope_cta_space_cta,
3668 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_scope_cta_space_cluster,
3669 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_scope_cluster_space_cta,
3670 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_scope_cluster_space_cluster};
3671 static constexpr llvm::Intrinsic::ID relaxedIDs[] = {
3672 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_relaxed_scope_cta_space_cta,
3673 llvm::Intrinsic::
3674 nvvm_mbarrier_arrive_drop_relaxed_scope_cta_space_cluster,
3675 llvm::Intrinsic::
3676 nvvm_mbarrier_arrive_drop_relaxed_scope_cluster_space_cta,
3677 llvm::Intrinsic::
3678 nvvm_mbarrier_arrive_drop_relaxed_scope_cluster_space_cluster};
3679 auto id = thisOp.getRelaxed() ? relaxedIDs[index] : IDs[index];
3680
3681 // Tidy-up the Intrinsic Args
3682 bool needCast = isPtrInGenericSpace(thisOp.getAddr());
3683 llvm::Value *mbar = mt.lookupValue(thisOp.getAddr());
3684 if (needCast)
3685 mbar = castPtrToAddrSpace(builder, mbar, NVVMMemorySpace::Shared);
3686
3687 // When count is not explicitly specified, the default is 1.
3688 llvm::LLVMContext &ctx = mt.getLLVMContext();
3689 bool hasCount = static_cast<bool>(thisOp.getCount());
3690 llvm::Value *count =
3691 hasCount ? mt.lookupValue(thisOp.getCount())
3692 : llvm::ConstantInt::get(llvm::Type::getInt32Ty(ctx), 1);
3693
3694 return {id, {mbar, count}};
3695}
3696
3697bool MBarrierArriveExpectTxOp::getAsmValues(
3698 RewriterBase &rewriter,
3699 llvm::SmallVectorImpl<std::pair<mlir::Value, mlir::NVVM::PTXRegisterMod>>
3700 &asmValues) {
3701 // Add all the operands but not the attrs to the asmValues list.
3702 // The attrs here are used to generate the right variants for
3703 // intrinsics-lowering. So, we ignore them while generating inline-PTX.
3704 for (auto val : getOperands())
3705 asmValues.push_back({val, mlir::NVVM::PTXRegisterMod::Read});
3706
3707 return false;
3708}
3709
3710mlir::NVVM::IDArgPair MBarrierArriveExpectTxOp::getIntrinsicIDAndArgs(
3711 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3712 auto thisOp = cast<NVVM::MBarrierArriveExpectTxOp>(op);
3713
3714 bool isClusterSpace = isPtrInSharedClusterSpace(thisOp.getAddr());
3715 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3716 // bit-0: Space
3717 // bit-1: Scope
3718 size_t index = ((isClusterScope ? 1 : 0) << 1) | (isClusterSpace ? 1 : 0);
3719
3720 // clang-format off
3721 static constexpr llvm::Intrinsic::ID IDs[] = {
3722 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_scope_cta_space_cta,
3723 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_scope_cta_space_cluster,
3724 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_scope_cluster_space_cta,
3725 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_scope_cluster_space_cluster};
3726 static constexpr llvm::Intrinsic::ID relaxedIDs[] = {
3727 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_relaxed_scope_cta_space_cta,
3728 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_relaxed_scope_cta_space_cluster,
3729 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_relaxed_scope_cluster_space_cta,
3730 llvm::Intrinsic::nvvm_mbarrier_arrive_expect_tx_relaxed_scope_cluster_space_cluster};
3731 // clang-format on
3732 auto id = thisOp.getRelaxed() ? relaxedIDs[index] : IDs[index];
3733
3734 // Tidy-up the Intrinsic Args
3735 llvm::Value *txcount = mt.lookupValue(thisOp.getTxcount());
3736 llvm::Value *mbar = mt.lookupValue(thisOp.getAddr());
3737 bool needCast = isPtrInGenericSpace(thisOp.getAddr());
3738 if (needCast)
3739 mbar = castPtrToAddrSpace(builder, mbar, NVVMMemorySpace::Shared);
3740
3741 return {id, {mbar, txcount}};
3742}
3743
3744mlir::NVVM::IDArgPair MBarrierArriveDropExpectTxOp::getIntrinsicIDAndArgs(
3745 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3746 auto thisOp = cast<NVVM::MBarrierArriveDropExpectTxOp>(op);
3747
3748 bool isClusterSpace = isPtrInSharedClusterSpace(thisOp.getAddr());
3749 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3750 // bit-0: Space
3751 // bit-1: Scope
3752 size_t index = ((isClusterScope ? 1 : 0) << 1) | (isClusterSpace ? 1 : 0);
3753
3754 // clang-format off
3755 static constexpr llvm::Intrinsic::ID IDs[] = {
3756 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_scope_cta_space_cta,
3757 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_scope_cta_space_cluster,
3758 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_scope_cluster_space_cta,
3759 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_scope_cluster_space_cluster};
3760 static constexpr llvm::Intrinsic::ID relaxedIDs[] = {
3761 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_relaxed_scope_cta_space_cta,
3762 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_relaxed_scope_cta_space_cluster,
3763 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_relaxed_scope_cluster_space_cta,
3764 llvm::Intrinsic::nvvm_mbarrier_arrive_drop_expect_tx_relaxed_scope_cluster_space_cluster};
3765 // clang-format on
3766 auto id = thisOp.getRelaxed() ? relaxedIDs[index] : IDs[index];
3767
3768 // Tidy-up the Intrinsic Args
3769 llvm::Value *txcount = mt.lookupValue(thisOp.getTxcount());
3770 llvm::Value *mbar = mt.lookupValue(thisOp.getAddr());
3771 bool needCast = isPtrInGenericSpace(thisOp.getAddr());
3772 if (needCast)
3773 mbar = castPtrToAddrSpace(builder, mbar, NVVMMemorySpace::Shared);
3774
3775 return {id, {mbar, txcount}};
3776}
3777
3778mlir::NVVM::IDArgPair MBarrierArriveNocompleteOp::getIntrinsicIDAndArgs(
3779 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3780 auto thisOp = cast<NVVM::MBarrierArriveNocompleteOp>(op);
3781 bool isShared = isPtrInSharedCTASpace(thisOp.getAddr());
3782 llvm::Intrinsic::ID id =
3783 isShared ? llvm::Intrinsic::nvvm_mbarrier_arrive_noComplete_shared
3784 : llvm::Intrinsic::nvvm_mbarrier_arrive_noComplete;
3785 // Fill the Intrinsic Args
3787 args.push_back(mt.lookupValue(thisOp.getAddr()));
3788 args.push_back(mt.lookupValue(thisOp.getCount()));
3789
3790 return {id, std::move(args)};
3791}
3792
3793mlir::NVVM::IDArgPair MBarrierArriveDropNocompleteOp::getIntrinsicIDAndArgs(
3794 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3795 auto thisOp = cast<NVVM::MBarrierArriveDropNocompleteOp>(op);
3796 bool isShared = isPtrInSharedCTASpace(thisOp.getAddr());
3797 llvm::Intrinsic::ID id =
3798 isShared ? llvm::Intrinsic::nvvm_mbarrier_arrive_drop_noComplete_shared
3799 : llvm::Intrinsic::nvvm_mbarrier_arrive_drop_noComplete;
3800 // Fill the Intrinsic Args
3802 args.push_back(mt.lookupValue(thisOp.getAddr()));
3803 args.push_back(mt.lookupValue(thisOp.getCount()));
3804
3805 return {id, std::move(args)};
3806}
3807
3808mlir::NVVM::IDArgPair MBarrierTestWaitOp::getIntrinsicIDAndArgs(
3809 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3810 auto thisOp = cast<NVVM::MBarrierTestWaitOp>(op);
3811 bool isPhaseParity = thisOp.getStateOrPhase().getType().isInteger(32);
3812 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3813 // bit-0: isPhaseParity
3814 // bit-1: Scope
3815 size_t index = ((isClusterScope ? 1 : 0) << 1) | (isPhaseParity ? 1 : 0);
3816
3817 // clang-format off
3818 static constexpr llvm::Intrinsic::ID IDs[] = {
3819 llvm::Intrinsic::nvvm_mbarrier_test_wait_scope_cta_space_cta,
3820 llvm::Intrinsic::nvvm_mbarrier_test_wait_parity_scope_cta_space_cta,
3821 llvm::Intrinsic::nvvm_mbarrier_test_wait_scope_cluster_space_cta,
3822 llvm::Intrinsic::nvvm_mbarrier_test_wait_parity_scope_cluster_space_cta};
3823 static constexpr llvm::Intrinsic::ID relaxedIDs[] = {
3824 llvm::Intrinsic::nvvm_mbarrier_test_wait_relaxed_scope_cta_space_cta,
3825 llvm::Intrinsic::nvvm_mbarrier_test_wait_parity_relaxed_scope_cta_space_cta,
3826 llvm::Intrinsic::nvvm_mbarrier_test_wait_relaxed_scope_cluster_space_cta,
3827 llvm::Intrinsic::nvvm_mbarrier_test_wait_parity_relaxed_scope_cluster_space_cta};
3828 // clang-format on
3829 auto id = thisOp.getRelaxed() ? relaxedIDs[index] : IDs[index];
3830
3831 // Tidy-up the Intrinsic Args
3832 llvm::Value *mbar = mt.lookupValue(thisOp.getAddr());
3833 llvm::Value *input = mt.lookupValue(thisOp.getStateOrPhase());
3834 bool needCast = isPtrInGenericSpace(thisOp.getAddr());
3835 if (needCast)
3836 mbar = castPtrToAddrSpace(builder, mbar, NVVMMemorySpace::Shared);
3837
3838 return {id, {mbar, input}};
3839}
3840
3841mlir::NVVM::IDArgPair MBarrierTryWaitOp::getIntrinsicIDAndArgs(
3842 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3843 auto thisOp = cast<NVVM::MBarrierTryWaitOp>(op);
3844 bool isPhaseParity = thisOp.getStateOrPhase().getType().isInteger(32);
3845 bool isClusterScope = thisOp.getScope() == NVVM::MemScopeKind::CLUSTER;
3846 bool hasTicks = static_cast<bool>(thisOp.getTicks());
3847 // bit-0: isPhaseParity
3848 // bit-1: Scope
3849 // bit-2: hasTicks
3850 size_t index = ((hasTicks ? 1 : 0) << 2) | ((isClusterScope ? 1 : 0) << 1) |
3851 (isPhaseParity ? 1 : 0);
3852
3853 // clang-format off
3854 static constexpr llvm::Intrinsic::ID IDs[] = {
3855 llvm::Intrinsic::nvvm_mbarrier_try_wait_scope_cta_space_cta,
3856 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_scope_cta_space_cta,
3857 llvm::Intrinsic::nvvm_mbarrier_try_wait_scope_cluster_space_cta,
3858 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_scope_cluster_space_cta,
3859 llvm::Intrinsic::nvvm_mbarrier_try_wait_tl_scope_cta_space_cta,
3860 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_tl_scope_cta_space_cta,
3861 llvm::Intrinsic::nvvm_mbarrier_try_wait_tl_scope_cluster_space_cta,
3862 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_tl_scope_cluster_space_cta};
3863 static constexpr llvm::Intrinsic::ID relaxedIDs[] = {
3864 llvm::Intrinsic::nvvm_mbarrier_try_wait_relaxed_scope_cta_space_cta,
3865 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_relaxed_scope_cta_space_cta,
3866 llvm::Intrinsic::nvvm_mbarrier_try_wait_relaxed_scope_cluster_space_cta,
3867 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_relaxed_scope_cluster_space_cta,
3868 llvm::Intrinsic::nvvm_mbarrier_try_wait_tl_relaxed_scope_cta_space_cta,
3869 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_tl_relaxed_scope_cta_space_cta,
3870 llvm::Intrinsic::nvvm_mbarrier_try_wait_tl_relaxed_scope_cluster_space_cta,
3871 llvm::Intrinsic::nvvm_mbarrier_try_wait_parity_tl_relaxed_scope_cluster_space_cta};
3872 // clang-format on
3873 auto id = thisOp.getRelaxed() ? relaxedIDs[index] : IDs[index];
3874
3875 // Tidy-up the mbarrier pointer
3876 llvm::Value *mbar = mt.lookupValue(thisOp.getAddr());
3877 bool needCast = isPtrInGenericSpace(thisOp.getAddr());
3878 if (needCast)
3879 mbar = castPtrToAddrSpace(builder, mbar, NVVMMemorySpace::Shared);
3880
3881 // Fill the Intrinsic Args
3883 args.push_back(mbar);
3884 args.push_back(mt.lookupValue(thisOp.getStateOrPhase()));
3885 if (hasTicks)
3886 args.push_back(mt.lookupValue(thisOp.getTicks()));
3887
3888 return {id, std::move(args)};
3889}
3890
3891mlir::NVVM::IDArgPair CpAsyncMBarrierArriveOp::getIntrinsicIDAndArgs(
3892 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3893 auto thisOp = cast<NVVM::CpAsyncMBarrierArriveOp>(op);
3894 bool isShared = isPtrInSharedCTASpace(thisOp.getAddr());
3895
3896 llvm::Intrinsic::ID id;
3897 if (thisOp.getNoinc()) {
3898 id = isShared ? llvm::Intrinsic::nvvm_cp_async_mbarrier_arrive_noinc_shared
3899 : llvm::Intrinsic::nvvm_cp_async_mbarrier_arrive_noinc;
3900 } else {
3901 id = isShared ? llvm::Intrinsic::nvvm_cp_async_mbarrier_arrive_shared
3902 : llvm::Intrinsic::nvvm_cp_async_mbarrier_arrive;
3903 }
3904
3905 return {id, {mt.lookupValue(thisOp.getAddr())}};
3906}
3907
3909MovMatrixOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
3910 llvm::IRBuilderBase &builder) {
3911 auto thisOp = cast<NVVM::MovMatrixOp>(op);
3912 return {llvm::Intrinsic::nvvm_movmatrix_sync_aligned_m8n8_trans_b16,
3913 {mt.lookupValue(thisOp.getSrc())}};
3914}
3915
3916#define CP_ASYNC_ID_IMPL(mod, size, suffix) \
3917 llvm::Intrinsic::nvvm_cp_async_##mod##_shared_global_##size##suffix
3918
3919#define GET_CP_ASYNC_ID(mod, size, has_cpsize) \
3920 has_cpsize ? CP_ASYNC_ID_IMPL(mod, size, _s) : CP_ASYNC_ID_IMPL(mod, size, )
3921
3922llvm::Intrinsic::ID
3923CpAsyncOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
3925 llvm::Intrinsic::ID id;
3926
3927 auto cpAsyncOp = cast<NVVM::CpAsyncOp>(op);
3928 bool hasCpSize = static_cast<bool>(cpAsyncOp.getCpSize());
3929 switch (cpAsyncOp.getSize()) {
3930 case 4:
3931 id = GET_CP_ASYNC_ID(ca, 4, hasCpSize);
3932 break;
3933 case 8:
3934 id = GET_CP_ASYNC_ID(ca, 8, hasCpSize);
3935 break;
3936 case 16:
3937 id = (cpAsyncOp.getModifier() == NVVM::LoadCacheModifierKind::CG)
3938 ? GET_CP_ASYNC_ID(cg, 16, hasCpSize)
3939 : GET_CP_ASYNC_ID(ca, 16, hasCpSize);
3940 break;
3941 default:
3942 llvm_unreachable("Invalid copy size in CpAsyncOp.");
3943 }
3944
3945 // Fill the Intrinsic Args
3946 args.push_back(mt.lookupValue(cpAsyncOp.getDst()));
3947 args.push_back(mt.lookupValue(cpAsyncOp.getSrc()));
3948 if (hasCpSize)
3949 args.push_back(mt.lookupValue(cpAsyncOp.getCpSize()));
3950
3951 return id;
3952}
3953
3954mlir::NVVM::IDArgPair CpAsyncBulkPrefetchOp::getIntrinsicIDAndArgs(
3955 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3956 auto thisOp = cast<NVVM::CpAsyncBulkPrefetchOp>(op);
3958 llvm::Intrinsic::ID id = llvm::Intrinsic::nvvm_cp_async_bulk_prefetch_L2;
3959
3960 // Fill the Intrinsic Args
3961 args.push_back(mt.lookupValue(thisOp.getSrcMem()));
3962 args.push_back(mt.lookupValue(thisOp.getSize()));
3963
3964 mlir::Value cacheHint = thisOp.getL2CacheHint();
3965 const bool hasCacheHint = static_cast<bool>(cacheHint);
3966 llvm::Value *i64Unused =
3967 llvm::ConstantInt::get(llvm::Type::getInt64Ty(mt.getLLVMContext()), 0);
3968 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Unused);
3969 args.push_back(builder.getInt1(hasCacheHint));
3970
3971 return {id, std::move(args)};
3972}
3973
3974mlir::NVVM::IDArgPair CpAsyncBulkGlobalToSharedClusterOp::getIntrinsicIDAndArgs(
3975 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
3976 auto thisOp = cast<NVVM::CpAsyncBulkGlobalToSharedClusterOp>(op);
3978
3979 // Fill the Intrinsic Args: dst, mbar, src, size.
3980 args.push_back(mt.lookupValue(thisOp.getDstMem()));
3981 args.push_back(mt.lookupValue(thisOp.getMbar()));
3982 args.push_back(mt.lookupValue(thisOp.getSrcMem()));
3983 args.push_back(mt.lookupValue(thisOp.getSize()));
3984
3985 // Multicast mask for shared::cluster only, if available.
3986 mlir::Value multicastMask = thisOp.getMulticastMask();
3987 const bool hasMulticastMask = static_cast<bool>(multicastMask);
3988 const bool isSharedCTA = isPtrInSharedCTASpace(thisOp.getDstMem());
3989 if (!isSharedCTA) {
3990 llvm::Value *i16Unused = llvm::ConstantInt::get(builder.getInt16Ty(), 0);
3991 args.push_back(hasMulticastMask ? mt.lookupValue(multicastMask)
3992 : i16Unused);
3993 }
3994
3995 // Cache hint, if available.
3996 mlir::Value cacheHint = thisOp.getL2CacheHint();
3997 const bool hasCacheHint = static_cast<bool>(cacheHint);
3998 llvm::Value *i64Unused = llvm::ConstantInt::get(builder.getInt64Ty(), 0);
3999 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Unused);
4000
4001 // Flag arguments for multicast and cachehint.
4002 if (!isSharedCTA)
4003 args.push_back(builder.getInt1(hasMulticastMask));
4004 args.push_back(builder.getInt1(hasCacheHint));
4005
4006 llvm::Intrinsic::ID id =
4007 isSharedCTA
4008 ? llvm::Intrinsic::nvvm_cp_async_bulk_global_to_shared_cta
4009 : llvm::Intrinsic::nvvm_cp_async_bulk_global_to_shared_cluster;
4010
4011 return {id, std::move(args)};
4012}
4013
4014mlir::NVVM::IDArgPair CpAsyncBulkSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
4015 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4016 auto thisOp = cast<NVVM::CpAsyncBulkSharedCTAToGlobalOp>(op);
4018 llvm::Intrinsic::ID id =
4019 llvm::Intrinsic::nvvm_cp_async_bulk_shared_cta_to_global;
4020
4021 // Fill the Intrinsic Args
4022 args.push_back(mt.lookupValue(thisOp.getDstMem()));
4023 args.push_back(mt.lookupValue(thisOp.getSrcMem()));
4024 args.push_back(mt.lookupValue(thisOp.getSize()));
4025
4026 mlir::Value cacheHint = thisOp.getL2CacheHint();
4027 const bool hasCacheHint = static_cast<bool>(cacheHint);
4028 llvm::Value *i64Unused =
4029 llvm::ConstantInt::get(llvm::Type::getInt64Ty(mt.getLLVMContext()), 0);
4030 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Unused);
4031 args.push_back(builder.getInt1(hasCacheHint));
4032
4033 // Choose the bytemask variant
4034 if (mlir::Value byteMask = thisOp.getByteMask()) {
4035 args.push_back(mt.lookupValue(byteMask));
4036 id = llvm::Intrinsic::nvvm_cp_async_bulk_shared_cta_to_global_bytemask;
4037 }
4038
4039 return {id, std::move(args)};
4040}
4041
4042bool CpAsyncBulkTensorGlobalToSharedClusterOp::getAsmValues(
4043 RewriterBase &rewriter,
4044 llvm::SmallVectorImpl<std::pair<mlir::Value, mlir::NVVM::PTXRegisterMod>>
4045 &asmValues) {
4046 // Add all the operands but not the attrs to the asmValues list.
4047 // The attrs here are used to generate the right variants for
4048 // intrinsics-lowering. So, we ignore them while generating inline-PTX.
4049 for (auto val : getOperands())
4050 asmValues.push_back({val, mlir::NVVM::PTXRegisterMod::Read});
4051
4052 return false;
4053}
4054
4056CpAsyncBulkTensorGlobalToSharedClusterOp::getIntrinsicIDAndArgs(
4057 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4058 auto thisOp = cast<NVVM::CpAsyncBulkTensorGlobalToSharedClusterOp>(op);
4059 const bool isCTAOnly = thisOp.getIsCTAOnly();
4061
4062 // Fill the Intrinsic Args
4063 args.push_back(mt.lookupValue(thisOp.getDstMem()));
4064 args.push_back(mt.lookupValue(thisOp.getMbar()));
4065 args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
4066
4067 // Coordinates and im2col-offsets
4068 for (mlir::Value v : thisOp.getCoordinates())
4069 args.push_back(mt.lookupValue(v));
4070 for (mlir::Value v : thisOp.getIm2colOffsets())
4071 args.push_back(mt.lookupValue(v));
4072
4073 // MulticastMask, if available
4074 mlir::Value mcMask = thisOp.getMulticastMask();
4075 const bool hasMC = static_cast<bool>(mcMask);
4076 llvm::Value *i16Zero =
4077 llvm::ConstantInt::get(llvm::Type::getInt16Ty(mt.getLLVMContext()), 0);
4078
4079 // CacheHint, if available
4080 mlir::Value cacheHint = thisOp.getL2CacheHint();
4081 const bool hasCacheHint = static_cast<bool>(cacheHint);
4082 llvm::Value *i64Zero =
4083 llvm::ConstantInt::get(llvm::Type::getInt64Ty(mt.getLLVMContext()), 0);
4084
4085 // Flag argument CTAGroup
4086 // CTA_1/2 is mapped to values 1 and 2 for the intrinsics.
4087 // Hence, the +1 to getGroup().
4088 const int32_t val =
4089 thisOp.getGroup() ? (static_cast<int32_t>(*thisOp.getGroup()) + 1) : 0;
4090 llvm::Value *cg =
4091 llvm::ConstantInt::get(llvm::Type::getInt32Ty(mt.getLLVMContext()), val);
4092
4093 if (!isCTAOnly) {
4094 // For shared::cluster, all the arguments that we build are applicable.
4095 args.push_back(hasMC ? mt.lookupValue(mcMask) : i16Zero);
4096 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Zero);
4097 args.push_back(builder.getInt1(hasMC));
4098 args.push_back(builder.getInt1(hasCacheHint));
4099 args.push_back(cg);
4100 } else {
4101 // For shared::cta, only cache-hint is applicable.
4102 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Zero);
4103 args.push_back(builder.getInt1(hasCacheHint));
4104 }
4105
4106 constexpr size_t numDims = 5; // 1D to 5D
4107 constexpr size_t numModes = 5; // Tile, Im2col, w, w_128, gather4
4108 using rowTy = std::array<llvm::Intrinsic::ID, numDims + 1>;
4109 using TableTy = std::array<rowTy, numModes>;
4110 static constexpr TableTy IDTable{
4111 {{notIntrinsic, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_1d,
4112 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_2d,
4113 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_3d,
4114 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_4d,
4115 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_5d},
4117 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_3d,
4118 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_4d,
4119 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_5d},
4121 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_3d,
4122 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_4d,
4123 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_5d},
4125 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_128_3d,
4126 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_128_4d,
4127 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_im2col_w_128_5d},
4129 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_tile_gather4_2d}}};
4130
4131 static constexpr TableTy IDTableCTA{
4132 {{notIntrinsic,
4133 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_1d,
4134 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_2d,
4135 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_3d,
4136 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_4d,
4137 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_5d},
4139 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_3d,
4140 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_4d,
4141 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_5d},
4143 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_3d,
4144 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_4d,
4145 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_5d},
4147 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_128_3d,
4148 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_128_4d,
4149 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_im2col_w_128_5d},
4151 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_g2s_cta_tile_gather4_2d}}};
4152
4153 static_assert(
4154 (getMaxEnumValForTMALoadMode() == std::size(IDTable) - 1) &&
4155 (getMaxEnumValForTMALoadMode() == std::size(IDTableCTA) - 1),
4156 "TMALoadModes must match number of rows in IDTable and IDTableCTA");
4157 size_t mode = static_cast<size_t>(thisOp.getMode());
4158 size_t dim = thisOp.getCoordinates().size();
4159 auto id = isCTAOnly ? IDTableCTA[mode][dim] : IDTable[mode][dim];
4160 assert(id != notIntrinsic &&
4161 "Invalid intrinsic for CpAsyncBulkTensorGlobalToSharedClusterOp.");
4162
4163 return {id, std::move(args)};
4164}
4165
4166mlir::NVVM::IDArgPair CpAsyncBulkTensorPrefetchOp::getIntrinsicIDAndArgs(
4167 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4168 auto thisOp = cast<NVVM::CpAsyncBulkTensorPrefetchOp>(op);
4170
4171 // Fill the Intrinsic Args
4172 args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
4173
4174 for (auto v : thisOp.getCoordinates())
4175 args.push_back(mt.lookupValue(v));
4176 for (auto v : thisOp.getIm2colOffsets())
4177 args.push_back(mt.lookupValue(v));
4178
4179 mlir::Value cacheHint = thisOp.getL2CacheHint();
4180 const bool hasCacheHint = static_cast<bool>(cacheHint);
4181 llvm::Value *i64Unused =
4182 llvm::ConstantInt::get(llvm::Type::getInt64Ty(mt.getLLVMContext()), 0);
4183 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Unused);
4184 args.push_back(builder.getInt1(hasCacheHint));
4185
4186 const unsigned NI = llvm::Intrinsic::not_intrinsic;
4187 static constexpr llvm::Intrinsic::ID IDTable[][6] = {
4188 {NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_tile_1d,
4189 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_tile_2d,
4190 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_tile_3d,
4191 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_tile_4d,
4192 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_tile_5d},
4193 {NI, NI, NI,
4194 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_3d,
4195 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_4d,
4196 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_5d},
4197 {NI, NI, NI,
4198 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_w_3d,
4199 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_w_4d,
4200 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_w_5d},
4201 {NI, NI, NI,
4202 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_w_128_3d,
4203 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_w_128_4d,
4204 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_im2col_w_128_5d},
4205 {NI, NI, NI, NI, NI,
4206 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_prefetch_tile_gather4_2d}};
4207
4208 static_assert(getMaxEnumValForTMALoadMode() == std::size(IDTable) - 1,
4209 "TMALoadModes must match number of rows in IDTable");
4210 size_t mode = static_cast<size_t>(thisOp.getMode());
4211 size_t dim = thisOp.getCoordinates().size();
4212 llvm::Intrinsic::ID id = IDTable[mode][dim];
4213 if (id == llvm::Intrinsic::not_intrinsic)
4214 llvm_unreachable("Invalid intrinsic for CpAsyncBulkTensorPrefetchOp.");
4215
4216 return {id, std::move(args)};
4217}
4218
4220CpAsyncBulkTensorSharedCTAToGlobalOp::getIntrinsicIDAndArgs(
4221 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4222 auto thisOp = cast<NVVM::CpAsyncBulkTensorSharedCTAToGlobalOp>(op);
4224
4225 // Fill the Intrinsic Args
4226 args.push_back(mt.lookupValue(thisOp.getSrcMem()));
4227 args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
4228
4229 for (auto v : thisOp.getCoordinates())
4230 args.push_back(mt.lookupValue(v));
4231
4232 mlir::Value cacheHint = thisOp.getL2CacheHint();
4233 const bool hasCacheHint = static_cast<bool>(cacheHint);
4234 llvm::Value *i64Unused =
4235 llvm::ConstantInt::get(llvm::Type::getInt64Ty(mt.getLLVMContext()), 0);
4236 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64Unused);
4237 args.push_back(builder.getInt1(hasCacheHint));
4238
4239 const unsigned NI = llvm::Intrinsic::not_intrinsic;
4240 static constexpr llvm::Intrinsic::ID IDTable[][6] = {
4241 {NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_1d,
4242 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_2d,
4243 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_3d,
4244 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_4d,
4245 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_5d},
4246 {NI, NI, NI, llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_im2col_3d,
4247 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_im2col_4d,
4248 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_im2col_5d},
4249 {NI, NI, NI, NI, NI,
4250 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_s2g_tile_scatter4_2d}};
4251
4252 static_assert(getMaxEnumValForTMAStoreMode() == std::size(IDTable) - 1,
4253 "TMAStoreModes must match number of rows in IDTable");
4254 size_t mode = static_cast<size_t>(thisOp.getMode());
4255 size_t dim = thisOp.getCoordinates().size();
4256 llvm::Intrinsic::ID id = IDTable[mode][dim];
4257 if (id == llvm::Intrinsic::not_intrinsic)
4258 llvm_unreachable(
4259 "Invalid intrinsic for CpAsyncBulkTensorSharedCTAToGlobalOp.");
4260
4261 return {id, std::move(args)};
4262}
4263
4264NVVM::IDArgPair CpAsyncBulkTensorReduceOp::getIntrinsicIDAndArgs(
4265 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4266 auto thisOp = cast<NVVM::CpAsyncBulkTensorReduceOp>(op);
4267 llvm::LLVMContext &ctx = mt.getLLVMContext();
4268
4270
4271 // Arguments to the intrinsic:
4272 // shared_mem_ptr, tmaDesc, tensorDims
4273 // cache_hint(if applicable) and flag(boolean)
4274 args.push_back(mt.lookupValue(thisOp.getSrcMem()));
4275 args.push_back(mt.lookupValue(thisOp.getTmaDescriptor()));
4276
4277 for (Value v : thisOp.getCoordinates())
4278 args.push_back(mt.lookupValue(v));
4279
4280 mlir::Value cacheHint = thisOp.getL2CacheHint();
4281 const bool hasCacheHint = static_cast<bool>(cacheHint);
4282 llvm::Value *i64ZeroValue =
4283 llvm::ConstantInt::get(llvm::Type::getInt64Ty(ctx), 0);
4284 args.push_back(hasCacheHint ? mt.lookupValue(cacheHint) : i64ZeroValue);
4285 args.push_back(builder.getInt1(hasCacheHint));
4286
4287 const llvm::Intrinsic::ID notIntrinsic = llvm::Intrinsic::not_intrinsic;
4288
4289 constexpr unsigned numRedKinds = 8; // ADD, MIN, MAX, INC, DEC, AND, OR, XOR
4290 constexpr unsigned numLayouts = 2; // TILE, IM2COL
4291 constexpr unsigned maxDim = 5; // 1D to 5D
4292 using row = std::array<llvm::Intrinsic::ID, maxDim + 1>;
4293 using layoutTable = std::array<row, numLayouts>;
4294 using fullTable = std::array<layoutTable, numRedKinds>;
4295 static constexpr fullTable IDTable{
4296 {// RedTy::ADD
4297 {{{{notIntrinsic,
4298 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_1d,
4299 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_2d,
4300 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_3d,
4301 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_4d,
4302 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_tile_5d}},
4304 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_3d,
4305 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_4d,
4306 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_add_im2col_5d}}}},
4307 // RedTy::MIN
4308 {{{{notIntrinsic,
4309 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_1d,
4310 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_2d,
4311 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_3d,
4312 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_4d,
4313 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_tile_5d}},
4315 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_3d,
4316 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_4d,
4317 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_min_im2col_5d}}}},
4318 // RedTy::MAX
4319 {{{{notIntrinsic,
4320 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_1d,
4321 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_2d,
4322 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_3d,
4323 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_4d,
4324 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_tile_5d}},
4326 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_3d,
4327 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_4d,
4328 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_max_im2col_5d}}}},
4329 // RedTy::INC
4330 {{{{notIntrinsic,
4331 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_1d,
4332 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_2d,
4333 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_3d,
4334 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_4d,
4335 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_tile_5d}},
4337 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_3d,
4338 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_4d,
4339 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_inc_im2col_5d}}}},
4340 // RedTy::DEC
4341 {{{{notIntrinsic,
4342 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_1d,
4343 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_2d,
4344 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_3d,
4345 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_4d,
4346 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_tile_5d}},
4348 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_3d,
4349 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_4d,
4350 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_dec_im2col_5d}}}},
4351 // RedTy::AND
4352 {{{{notIntrinsic,
4353 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_1d,
4354 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_2d,
4355 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_3d,
4356 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_4d,
4357 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_tile_5d}},
4359 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_3d,
4360 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_4d,
4361 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_and_im2col_5d}}}},
4362 // RedTy::OR
4363 {{{{notIntrinsic,
4364 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_1d,
4365 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_2d,
4366 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_3d,
4367 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_4d,
4368 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_tile_5d}},
4370 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_3d,
4371 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_4d,
4372 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_or_im2col_5d}}}},
4373 // RedTy::XOR
4374 {{{{notIntrinsic,
4375 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_1d,
4376 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_2d,
4377 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_3d,
4378 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_4d,
4379 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_tile_5d}},
4381 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_im2col_3d,
4382 llvm::Intrinsic::nvvm_cp_async_bulk_tensor_reduce_xor_im2col_4d,
4383 llvm::Intrinsic::
4384 nvvm_cp_async_bulk_tensor_reduce_xor_im2col_5d}}}}}};
4385
4386 static_assert(getMaxEnumValForTMAReduxKind() == std::size(IDTable) - 1,
4387 "TMAReduxKinds must match number of rows in IDTable");
4388
4389 size_t redKind = static_cast<size_t>(thisOp.getRedKind());
4390 size_t mode = static_cast<size_t>(thisOp.getMode());
4391 size_t dim = thisOp.getCoordinates().size();
4392
4393 assert(redKind < IDTable.size() &&
4394 "Invalid redKind for CpAsyncBulkTensorReduceOp");
4395 assert(mode < IDTable[redKind].size() &&
4396 "Invalid mode for CpAsyncBulkTensorReduceOp");
4397 assert(dim < IDTable[redKind][mode].size() &&
4398 "Invalid dim for CpAsyncBulkTensorReduceOp");
4399
4400 llvm::Intrinsic::ID intrinsicID = IDTable[redKind][mode][dim];
4401
4402 assert(intrinsicID != notIntrinsic &&
4403 "Invalid intrinsic for CpAsyncBulkTensorReduceOp.");
4404
4405 return {intrinsicID, std::move(args)};
4406}
4407
4408#define _none
4409
4410#define CVT_F2TF32_ID_IMPL(rnd, relu, sf) \
4411 hasRelu ? llvm::Intrinsic::nvvm_f2tf32_##rnd##relu##sf \
4412 : llvm::Intrinsic::nvvm_f2tf32_##rnd##sf
4413
4414#define GET_CVT_F2TF32_ID(rnd, relu, sf) \
4415 hasSatFinite ? CVT_F2TF32_ID_IMPL(rnd, relu, sf) \
4416 : CVT_F2TF32_ID_IMPL(rnd, relu, )
4417
4418llvm::Intrinsic::ID
4419ConvertFloatToTF32Op::getIntrinsicID(NVVM::FPRoundingMode rnd,
4420 NVVM::SaturationMode sat, bool hasRelu) {
4421 using RndMode = NVVM::FPRoundingMode;
4422 bool hasSatFinite = (sat == NVVM::SaturationMode::SATFINITE);
4423 switch (rnd) {
4424 case RndMode::RN:
4425 return GET_CVT_F2TF32_ID(rn, _relu, _satfinite);
4426 case RndMode::RZ:
4427 return GET_CVT_F2TF32_ID(rz, _relu, _satfinite);
4428 case RndMode::RNA:
4429 return GET_CVT_F2TF32_ID(rna, _none, _satfinite);
4430 default:
4431 llvm_unreachable("Invalid RoundingMode for CvtFloatToTF32Op");
4432 }
4433}
4434
4436ConvertF32x2ToF4x2Op::getIntrinsicIDAndArgs(NVVM::ConvertF32x2ToF4x2Op op,
4438 llvm::IRBuilderBase &builder) {
4440 args.push_back(mt.lookupValue(op.getA()));
4441 args.push_back(mt.lookupValue(op.getB()));
4442
4443 bool hasRelu = op.getRelu();
4444
4445 llvm::Intrinsic::ID intId =
4446 hasRelu ? llvm::Intrinsic::nvvm_ff_to_e2m1x2_rn_relu_satfinite
4447 : llvm::Intrinsic::nvvm_ff_to_e2m1x2_rn_satfinite;
4448
4449 return {intId, std::move(args)};
4450}
4451
4452#define GET_F32x2_TO_F6x2_ID(type, has_relu) \
4453 has_relu ? llvm::Intrinsic::nvvm_ff_to_##type##_rn_relu_satfinite \
4454 : llvm::Intrinsic::nvvm_ff_to_##type##_rn_satfinite
4455
4456llvm::Intrinsic::ID ConvertF32x2ToF6x2Op::getIntrinsicID(mlir::Type dstTy,
4457 bool hasRelu) {
4459 .Case([&](mlir::Float6E2M3FNType) {
4460 return GET_F32x2_TO_F6x2_ID(e2m3x2, hasRelu);
4461 })
4462 .Case([&](mlir::Float6E3M2FNType) {
4463 return GET_F32x2_TO_F6x2_ID(e3m2x2, hasRelu);
4464 })
4465 .Default([](mlir::Type) {
4466 llvm_unreachable("Invalid conversion in ConvertF32x2ToF6x2Op");
4467 return llvm::Intrinsic::not_intrinsic;
4468 });
4469}
4470
4472ConvertF16x2ToF4x2Op::getIntrinsicIDAndArgs(NVVM::ConvertF16x2ToF4x2Op &op,
4474 llvm::IRBuilderBase &builder) {
4475 mlir::Type dstTy = op.getDstTy();
4476 bool hasRelu = op.getRelu();
4477
4478 llvm::Intrinsic::ID intId = llvm::Intrinsic::not_intrinsic;
4479
4480 if (llvm::isa<mlir::Float4E2M1FNType>(dstTy))
4481 intId = hasRelu ? llvm::Intrinsic::nvvm_f16x2_to_e2m1x2_rn_relu_satfinite
4482 : llvm::Intrinsic::nvvm_f16x2_to_e2m1x2_rn_satfinite;
4483
4485 args.push_back(mt.lookupValue(op.getSrc()));
4486
4487 return {intId, std::move(args)};
4488}
4489
4491ConvertBF16x2ToF4x2Op::getIntrinsicIDAndArgs(NVVM::ConvertBF16x2ToF4x2Op &op,
4493 llvm::IRBuilderBase &builder) {
4494 mlir::Type dstTy = op.getDstTy();
4495 bool hasRelu = op.getRelu();
4496
4497 llvm::Intrinsic::ID intId = llvm::Intrinsic::not_intrinsic;
4498
4499 if (llvm::isa<mlir::Float4E2M1FNType>(dstTy))
4500 intId = hasRelu ? llvm::Intrinsic::nvvm_bf16x2_to_e2m1x2_rn_relu_satfinite
4501 : llvm::Intrinsic::nvvm_bf16x2_to_e2m1x2_rn_satfinite;
4502
4504 args.push_back(mt.lookupValue(op.getSrc()));
4505
4506 return {intId, std::move(args)};
4507}
4508
4509llvm::Intrinsic::ID ConvertF16x2ToF6x2Op::getIntrinsicID(mlir::Type dstTy,
4510 bool hasRelu) {
4512 .Case<mlir::Float6E2M3FNType>([&](mlir::Float6E2M3FNType) {
4513 return hasRelu ? llvm::Intrinsic::nvvm_f16x2_to_e2m3x2_rn_relu_satfinite
4514 : llvm::Intrinsic::nvvm_f16x2_to_e2m3x2_rn_satfinite;
4515 })
4516 .Case<mlir::Float6E3M2FNType>([&](mlir::Float6E3M2FNType) {
4517 return hasRelu ? llvm::Intrinsic::nvvm_f16x2_to_e3m2x2_rn_relu_satfinite
4518 : llvm::Intrinsic::nvvm_f16x2_to_e3m2x2_rn_satfinite;
4519 })
4520 .Default([](mlir::Type) {
4521 llvm_unreachable("Invalid conversion in ConvertF16x2ToF6x2Op");
4522 return llvm::Intrinsic::not_intrinsic;
4523 });
4524}
4525
4526llvm::Intrinsic::ID ConvertBF16x2ToF6x2Op::getIntrinsicID(mlir::Type dstTy,
4527 bool hasRelu) {
4529 .Case<mlir::Float6E2M3FNType>([&](mlir::Float6E2M3FNType) {
4530 return hasRelu
4531 ? llvm::Intrinsic::nvvm_bf16x2_to_e2m3x2_rn_relu_satfinite
4532 : llvm::Intrinsic::nvvm_bf16x2_to_e2m3x2_rn_satfinite;
4533 })
4534 .Case<mlir::Float6E3M2FNType>([&](mlir::Float6E3M2FNType) {
4535 return hasRelu
4536 ? llvm::Intrinsic::nvvm_bf16x2_to_e3m2x2_rn_relu_satfinite
4537 : llvm::Intrinsic::nvvm_bf16x2_to_e3m2x2_rn_satfinite;
4538 })
4539 .Default([](mlir::Type) {
4540 llvm_unreachable("Invalid conversion in ConvertBF16x2ToF6x2Op");
4541 return llvm::Intrinsic::not_intrinsic;
4542 });
4543}
4544
4545#define GET_F32x2_TO_F8X2_US_ID(rnd, has_satf) \
4546 has_satf ? llvm::Intrinsic::nvvm_ff_to_ue8m0x2_##rnd##_satfinite \
4547 : llvm::Intrinsic::nvvm_ff_to_ue8m0x2_##rnd
4548
4549#define GET_F32x2_TO_F8X2_S_ID(type, has_relu) \
4550 has_relu ? llvm::Intrinsic::nvvm_ff_to_##type##_rn_relu \
4551 : llvm::Intrinsic::nvvm_ff_to_##type##_rn
4552
4553llvm::Intrinsic::ID
4554ConvertF32x2ToF8x2Op::getIntrinsicID(mlir::Type dstTy, NVVM::FPRoundingMode rnd,
4555 NVVM::SaturationMode sat, bool hasRelu) {
4556 bool hasSatFinite = (sat == NVVM::SaturationMode::SATFINITE);
4557 bool hasRoundingModeRZ = (rnd == NVVM::FPRoundingMode::RZ);
4558 bool hasRoundingModeRP = (rnd == NVVM::FPRoundingMode::RP);
4559
4561 .Case([&](mlir::Float8E4M3FNType) {
4562 return GET_F32x2_TO_F8X2_S_ID(e4m3x2, hasRelu);
4563 })
4564 .Case([&](mlir::Float8E5M2Type) {
4565 return GET_F32x2_TO_F8X2_S_ID(e5m2x2, hasRelu);
4566 })
4567 .Case([&](mlir::Float8E8M0FNUType) {
4568 if (hasRoundingModeRZ)
4569 return GET_F32x2_TO_F8X2_US_ID(rz, hasSatFinite);
4570 else if (hasRoundingModeRP)
4571 return GET_F32x2_TO_F8X2_US_ID(rp, hasSatFinite);
4572
4573 llvm_unreachable("Invalid conversion in ConvertF32x2ToF8x2Op");
4574 })
4575 .Default([](mlir::Type) {
4576 llvm_unreachable("Invalid conversion in ConvertF32x2ToF8x2Op");
4577 return llvm::Intrinsic::not_intrinsic;
4578 });
4579}
4580
4581#define GET_F16x2_TO_F8X2_ID(type, has_relu) \
4582 has_relu ? llvm::Intrinsic::nvvm_f16x2_to_##type##_rn_relu \
4583 : llvm::Intrinsic::nvvm_f16x2_to_##type##_rn
4584
4585llvm::Intrinsic::ID ConvertF16x2ToF8x2Op::getIntrinsicID(mlir::Type dstTy,
4586 bool hasRelu) {
4588 .Case([&](mlir::Float8E4M3FNType) {
4589 return GET_F16x2_TO_F8X2_ID(e4m3x2, hasRelu);
4590 })
4591 .Case([&](mlir::Float8E5M2Type) {
4592 return GET_F16x2_TO_F8X2_ID(e5m2x2, hasRelu);
4593 })
4594 .Default([](mlir::Type) {
4595 llvm_unreachable("Invalid conversion in ConvertF16x2ToF8x2Op");
4596 return llvm::Intrinsic::not_intrinsic;
4597 });
4598}
4599
4600llvm::Intrinsic::ID
4601ConvertBF16x2ToF8x2Op::getIntrinsicID(mlir::Type dstTy,
4602 NVVM::FPRoundingMode rnd,
4603 NVVM::SaturationMode sat, bool hasRelu) {
4604 bool hasSatFinite = (sat == NVVM::SaturationMode::SATFINITE);
4605
4606 static constexpr llvm::Intrinsic::ID ue8m0x2IDs[] = {
4607 llvm::Intrinsic::nvvm_bf16x2_to_ue8m0x2_rz,
4608 llvm::Intrinsic::nvvm_bf16x2_to_ue8m0x2_rp,
4609 llvm::Intrinsic::nvvm_bf16x2_to_ue8m0x2_rz_satfinite,
4610 llvm::Intrinsic::nvvm_bf16x2_to_ue8m0x2_rp_satfinite,
4611 };
4612
4614 .Case<mlir::Float8E4M3FNType>([&](mlir::Float8E4M3FNType) {
4615 return hasRelu
4616 ? llvm::Intrinsic::nvvm_bf16x2_to_e4m3x2_rn_relu_satfinite
4617 : llvm::Intrinsic::nvvm_bf16x2_to_e4m3x2_rn_satfinite;
4618 })
4619 .Case<mlir::Float8E5M2Type>([&](mlir::Float8E5M2Type) {
4620 return hasRelu
4621 ? llvm::Intrinsic::nvvm_bf16x2_to_e5m2x2_rn_relu_satfinite
4622 : llvm::Intrinsic::nvvm_bf16x2_to_e5m2x2_rn_satfinite;
4623 })
4624 .Case<mlir::Float8E8M0FNUType>([&](mlir::Float8E8M0FNUType) {
4625 bool hasRoundingModeRP = (rnd == NVVM::FPRoundingMode::RP);
4626 unsigned index = (hasSatFinite << 1) | hasRoundingModeRP;
4627 return ue8m0x2IDs[index];
4628 })
4629 .Default([](mlir::Type) {
4630 llvm_unreachable("Invalid conversion in ConvertBF16x2ToF8x2Op");
4631 return llvm::Intrinsic::not_intrinsic;
4632 });
4633}
4634
4635NVVM::IDArgPair ConvertF8x2ToF16x2Op::getIntrinsicIDAndArgs(
4636 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4637 auto curOp = cast<NVVM::ConvertF8x2ToF16x2Op>(op);
4638
4639 bool hasRelu = curOp.getRelu();
4640
4641 llvm::Intrinsic::ID intId =
4643 .Case([&](Float8E4M3FNType type) {
4644 return hasRelu ? llvm::Intrinsic::nvvm_e4m3x2_to_f16x2_rn_relu
4645 : llvm::Intrinsic::nvvm_e4m3x2_to_f16x2_rn;
4646 })
4647 .Case([&](Float8E5M2Type type) {
4648 return hasRelu ? llvm::Intrinsic::nvvm_e5m2x2_to_f16x2_rn_relu
4649 : llvm::Intrinsic::nvvm_e5m2x2_to_f16x2_rn;
4650 })
4651 .Default([](mlir::Type type) {
4652 llvm_unreachable("Invalid type for ConvertF8x2ToF16x2Op");
4653 return llvm::Intrinsic::not_intrinsic;
4654 });
4655
4656 llvm::Value *packedI16 =
4657 builder.CreateBitCast(mt.lookupValue(curOp.getSrc()),
4658 llvm::Type::getInt16Ty(builder.getContext()));
4659
4660 return {intId, {packedI16}};
4661}
4662
4663NVVM::IDArgPair ConvertF8x2ToBF16x2Op::getIntrinsicIDAndArgs(
4664 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4665 auto curOp = cast<NVVM::ConvertF8x2ToBF16x2Op>(op);
4666
4667 llvm::Intrinsic::ID intId = llvm::Intrinsic::nvvm_ue8m0x2_to_bf16x2;
4668 llvm::Value *packedI16 =
4669 builder.CreateBitCast(mt.lookupValue(curOp.getSrc()),
4670 llvm::Type::getInt16Ty(builder.getContext()));
4671
4672 return {intId, {packedI16}};
4673}
4674
4675NVVM::IDArgPair ConvertF6x2ToF16x2Op::getIntrinsicIDAndArgs(
4676 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4677 auto curOp = cast<NVVM::ConvertF6x2ToF16x2Op>(op);
4678
4679 bool hasRelu = curOp.getRelu();
4680
4681 llvm::Intrinsic::ID intId =
4683 .Case([&](Float6E2M3FNType type) {
4684 return hasRelu ? llvm::Intrinsic::nvvm_e2m3x2_to_f16x2_rn_relu
4685 : llvm::Intrinsic::nvvm_e2m3x2_to_f16x2_rn;
4686 })
4687 .Case([&](Float6E3M2FNType type) {
4688 return hasRelu ? llvm::Intrinsic::nvvm_e3m2x2_to_f16x2_rn_relu
4689 : llvm::Intrinsic::nvvm_e3m2x2_to_f16x2_rn;
4690 })
4691 .Default([](mlir::Type type) {
4692 llvm_unreachable("Invalid type for ConvertF6x2ToF16x2Op");
4693 return llvm::Intrinsic::not_intrinsic;
4694 });
4695
4696 llvm::Value *packedI16 =
4697 builder.CreateBitCast(mt.lookupValue(curOp.getSrc()),
4698 llvm::Type::getInt16Ty(builder.getContext()));
4699
4700 return {intId, {packedI16}};
4701}
4702
4703NVVM::IDArgPair ConvertF4x2ToF16x2Op::getIntrinsicIDAndArgs(
4704 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4705 auto curOp = cast<NVVM::ConvertF4x2ToF16x2Op>(op);
4706
4707 bool hasRelu = curOp.getRelu();
4708
4709 llvm::Intrinsic::ID intId =
4711 .Case([&](Float4E2M1FNType type) {
4712 return hasRelu ? llvm::Intrinsic::nvvm_e2m1x2_to_f16x2_rn_relu
4713 : llvm::Intrinsic::nvvm_e2m1x2_to_f16x2_rn;
4714 })
4715 .Default([](mlir::Type type) {
4716 llvm_unreachable("Invalid type for ConvertF4x2ToF16x2Op");
4717 return llvm::Intrinsic::not_intrinsic;
4718 });
4719
4720 llvm::Value *extendedI16 =
4721 builder.CreateZExt(mt.lookupValue(curOp.getSrc()),
4722 llvm::Type::getInt16Ty(builder.getContext()));
4723
4724 return {intId, {extendedI16}};
4725}
4726
4727NVVM::IDArgPair ConvertF32x2ToS2F6x2Op::getIntrinsicIDAndArgs(
4728 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4729 auto thisOp = cast<NVVM::ConvertF32x2ToS2F6x2Op>(op);
4730 bool hasRelu = thisOp.getRelu();
4731 bool hasScale = static_cast<bool>(thisOp.getScaleFactor());
4732
4733 llvm::Intrinsic::ID id =
4734 hasRelu
4735 ? llvm::Intrinsic::nvvm_ff_to_s2f6x2_rn_relu_satfinite_scale_n2_ue8m0
4736 : llvm::Intrinsic::nvvm_ff_to_s2f6x2_rn_satfinite_scale_n2_ue8m0;
4737
4738 // Fill the Intrinsic Args
4740 args.push_back(mt.lookupValue(thisOp.getA()));
4741 args.push_back(mt.lookupValue(thisOp.getB()));
4742 args.push_back(hasScale ? mt.lookupValue(thisOp.getScaleFactor())
4743 : builder.getInt16(0x7f7f));
4744 return {id, std::move(args)};
4745}
4746
4747NVVM::IDArgPair ConvertBF16x2ToS2F6x2Op::getIntrinsicIDAndArgs(
4748 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4749 auto thisOp = cast<NVVM::ConvertBF16x2ToS2F6x2Op>(op);
4750 bool hasRelu = thisOp.getRelu();
4751 bool hasScale = static_cast<bool>(thisOp.getScaleFactor());
4752
4753 llvm::Intrinsic::ID id =
4754 hasRelu
4755 ? llvm::Intrinsic::
4756 nvvm_bf16x2_to_s2f6x2_rn_relu_satfinite_scale_n2_ue8m0
4757 : llvm::Intrinsic::nvvm_bf16x2_to_s2f6x2_rn_satfinite_scale_n2_ue8m0;
4758
4759 // Fill the Intrinsic Args
4761 args.push_back(mt.lookupValue(thisOp.getSrc()));
4762 args.push_back(hasScale ? mt.lookupValue(thisOp.getScaleFactor())
4763 : builder.getInt16(0x7f7f));
4764 return {id, std::move(args)};
4765}
4766
4767NVVM::IDArgPair ConvertS2F6x2ToBF16x2Op::getIntrinsicIDAndArgs(
4768 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
4769 auto thisOp = cast<NVVM::ConvertS2F6x2ToBF16x2Op>(op);
4770 bool hasRelu = thisOp.getRelu();
4771 bool hasScale = static_cast<bool>(thisOp.getScaleFactor());
4772 bool hasSat = thisOp.getSat() == NVVM::SaturationMode::SATFINITE;
4773
4774 static constexpr llvm::Intrinsic::ID ids[] = {
4775 llvm::Intrinsic::nvvm_s2f6x2_to_bf16x2_rn_scale_n2_ue8m0,
4776 llvm::Intrinsic::nvvm_s2f6x2_to_bf16x2_rn_relu_scale_n2_ue8m0,
4777 llvm::Intrinsic::nvvm_s2f6x2_to_bf16x2_rn_satfinite_scale_n2_ue8m0,
4778 llvm::Intrinsic::nvvm_s2f6x2_to_bf16x2_rn_relu_satfinite_scale_n2_ue8m0,
4779 };
4780
4781 unsigned idx = (hasSat << 1) | hasRelu;
4782
4783 // Fill the Intrinsic Args
4785 llvm::Value *packedI16 =
4786 builder.CreateBitCast(mt.lookupValue(thisOp.getSrc()),
4787 llvm::Type::getInt16Ty(builder.getContext()));
4788 args.push_back(packedI16);
4789 args.push_back(hasScale ? mt.lookupValue(thisOp.getScaleFactor())
4790 : builder.getInt16(0x7f7f));
4791
4792 return {ids[idx], std::move(args)};
4793}
4794
4795llvm::Intrinsic::ID
4796Tcgen05AllocOp::getIntrinsicIDAndArgs(Operation &op,
4799 auto curOp = cast<NVVM::Tcgen05AllocOp>(op);
4800 unsigned as = llvm::cast<LLVM::LLVMPointerType>(curOp.getAddr().getType())
4801 .getAddressSpace();
4802 bool isShared = as == NVVMMemorySpace::Shared;
4803 bool is2CTAMode = curOp.getGroup() == CTAGroupKind::CTA_2;
4804
4805 llvm::Intrinsic::ID id;
4806 if (isShared) {
4807 id = is2CTAMode ? llvm::Intrinsic::nvvm_tcgen05_alloc_shared_cg2
4808 : llvm::Intrinsic::nvvm_tcgen05_alloc_shared_cg1;
4809 } else {
4810 id = is2CTAMode ? llvm::Intrinsic::nvvm_tcgen05_alloc_cg2
4811 : llvm::Intrinsic::nvvm_tcgen05_alloc_cg1;
4812 }
4813
4814 // Fill the Intrinsic Args
4815 args.push_back(mt.lookupValue(curOp.getAddr()));
4816 args.push_back(mt.lookupValue(curOp.getNCols()));
4817
4818 return id;
4819}
4820
4821llvm::Intrinsic::ID Tcgen05DeallocOp::getIntrinsicIDAndArgs(
4824 auto curOp = cast<NVVM::Tcgen05DeallocOp>(op);
4825 auto id = (curOp.getGroup() == CTAGroupKind::CTA_1)
4826 ? llvm::Intrinsic::nvvm_tcgen05_dealloc_cg1
4827 : llvm::Intrinsic::nvvm_tcgen05_dealloc_cg2;
4828
4829 // Fill the Intrinsic Args
4830 args.push_back(mt.lookupValue(curOp.getTaddr()));
4831 args.push_back(mt.lookupValue(curOp.getNCols()));
4832
4833 return id;
4834}
4835
4836#define TCGEN05_COMMIT_IMPL(cg, is_shared, mc) \
4837 is_shared ? llvm::Intrinsic::nvvm_tcgen05_commit##mc##_shared##_##cg \
4838 : llvm::Intrinsic::nvvm_tcgen05_commit##mc##_##cg
4839
4840#define GET_TCGEN05_COMMIT_ID(cta_group, is_shared, has_mc) \
4841 has_mc ? TCGEN05_COMMIT_IMPL(cta_group, is_shared, _mc) \
4842 : TCGEN05_COMMIT_IMPL(cta_group, is_shared, )
4843
4844llvm::Intrinsic::ID
4845Tcgen05CommitOp::getIntrinsicIDAndArgs(Operation &op,
4848 auto curOp = cast<NVVM::Tcgen05CommitOp>(op);
4849 unsigned as = llvm::cast<LLVM::LLVMPointerType>(curOp.getAddr().getType())
4850 .getAddressSpace();
4851 bool isShared = as == NVVMMemorySpace::Shared;
4852 bool hasMulticast = static_cast<bool>(curOp.getMulticastMask());
4853 bool is2CTAMode = curOp.getGroup() == CTAGroupKind::CTA_2;
4854
4855 llvm::Intrinsic::ID id =
4856 is2CTAMode ? GET_TCGEN05_COMMIT_ID(cg2, isShared, hasMulticast)
4857 : GET_TCGEN05_COMMIT_ID(cg1, isShared, hasMulticast);
4858
4859 // Fill the Intrinsic Args
4860 args.push_back(mt.lookupValue(curOp.getAddr()));
4861 if (hasMulticast)
4862 args.push_back(mt.lookupValue(curOp.getMulticastMask()));
4863
4864 return id;
4865}
4866
4867#define TCGEN05_CP_IMPL(shape_mc, src_fmt, cg) \
4868 llvm::Intrinsic::nvvm_tcgen05_cp##shape_mc##src_fmt##cg
4869
4870#define TCGEN05_CP_2CTA(shape_mc, src_fmt, is_2cta) \
4871 is_2cta ? TCGEN05_CP_IMPL(shape_mc, src_fmt, _cg2) \
4872 : TCGEN05_CP_IMPL(shape_mc, src_fmt, _cg1)
4873
4874#define GET_TCGEN05_CP_ID(shape_mc, src_fmt, is_2cta) \
4875 [&]() -> auto { \
4876 if ((src_fmt) == Tcgen05CpSrcFormat::B6x16_P32) \
4877 return TCGEN05_CP_2CTA(shape_mc, _b6x16_p32, is_2cta); \
4878 if ((src_fmt) == Tcgen05CpSrcFormat::B4x16_P64) \
4879 return TCGEN05_CP_2CTA(shape_mc, _b4x16_p64, is_2cta); \
4880 return TCGEN05_CP_2CTA(shape_mc, , is_2cta); \
4881 }()
4882
4884ConvertF32x2ToF16x2Op::getIntrinsicIDAndArgs(NVVM::ConvertF32x2ToF16x2Op &op,
4886 llvm::IRBuilderBase &builder) {
4887 static constexpr llvm::Intrinsic::ID rndRNIds[] = {
4888 llvm::Intrinsic::nvvm_ff2f16x2_rn,
4889 llvm::Intrinsic::nvvm_ff2f16x2_rn_relu,
4890 llvm::Intrinsic::nvvm_ff2f16x2_rn_satfinite,
4891 llvm::Intrinsic::nvvm_ff2f16x2_rn_relu_satfinite,
4892 };
4893 static constexpr llvm::Intrinsic::ID rndRZIds[] = {
4894 llvm::Intrinsic::nvvm_ff2f16x2_rz,
4895 llvm::Intrinsic::nvvm_ff2f16x2_rz_relu,
4896 llvm::Intrinsic::nvvm_ff2f16x2_rz_satfinite,
4897 llvm::Intrinsic::nvvm_ff2f16x2_rz_relu_satfinite,
4898 };
4899 static constexpr llvm::Intrinsic::ID rndRSIds[] = {
4900 llvm::Intrinsic::nvvm_ff2f16x2_rs,
4901 llvm::Intrinsic::nvvm_ff2f16x2_rs_relu,
4902 llvm::Intrinsic::nvvm_ff2f16x2_rs_satfinite,
4903 llvm::Intrinsic::nvvm_ff2f16x2_rs_relu_satfinite,
4904 };
4905
4906 unsigned hasRelu = op.getRelu() ? 1 : 0;
4907 unsigned hasSatFinite =
4908 (op.getSat() == NVVM::SaturationMode::SATFINITE) ? 1 : 0;
4909 // idx: bit-0 - relu
4910 // bit-1 - satfinite
4911 unsigned idx = (hasSatFinite << 1) | hasRelu;
4912
4914 args.push_back(mt.lookupValue(op.getSrcHi()));
4915 args.push_back(mt.lookupValue(op.getSrcLo()));
4916 if (op.getRandomBits())
4917 args.push_back(mt.lookupValue(op.getRandomBits()));
4918
4919 switch (op.getRnd()) {
4920 case FPRoundingMode::RN:
4921 return {rndRNIds[idx], std::move(args)};
4922 case FPRoundingMode::RZ:
4923 return {rndRZIds[idx], std::move(args)};
4924 case FPRoundingMode::RS:
4925 return {rndRSIds[idx], std::move(args)};
4926 default:
4927 llvm_unreachable("Invalid rounding mode for ConvertF32x2ToF16x2Op");
4928 }
4929}
4930
4932ConvertF32x2ToBF16x2Op::getIntrinsicIDAndArgs(NVVM::ConvertF32x2ToBF16x2Op &op,
4934 llvm::IRBuilderBase &builder) {
4935 static constexpr llvm::Intrinsic::ID rndRNIds[] = {
4936 llvm::Intrinsic::nvvm_ff2bf16x2_rn,
4937 llvm::Intrinsic::nvvm_ff2bf16x2_rn_relu,
4938 llvm::Intrinsic::nvvm_ff2bf16x2_rn_satfinite,
4939 llvm::Intrinsic::nvvm_ff2bf16x2_rn_relu_satfinite,
4940 };
4941 static constexpr llvm::Intrinsic::ID rndRZIds[] = {
4942 llvm::Intrinsic::nvvm_ff2bf16x2_rz,
4943 llvm::Intrinsic::nvvm_ff2bf16x2_rz_relu,
4944 llvm::Intrinsic::nvvm_ff2bf16x2_rz_satfinite,
4945 llvm::Intrinsic::nvvm_ff2bf16x2_rz_relu_satfinite,
4946 };
4947 static constexpr llvm::Intrinsic::ID rndRSIds[] = {
4948 llvm::Intrinsic::nvvm_ff2bf16x2_rs,
4949 llvm::Intrinsic::nvvm_ff2bf16x2_rs_relu,
4950 llvm::Intrinsic::nvvm_ff2bf16x2_rs_satfinite,
4951 llvm::Intrinsic::nvvm_ff2bf16x2_rs_relu_satfinite,
4952 };
4953
4954 unsigned hasRelu = op.getRelu() ? 1 : 0;
4955 unsigned hasSatFinite =
4956 (op.getSat() == NVVM::SaturationMode::SATFINITE) ? 1 : 0;
4957 // idx: bit-0 - relu
4958 // bit-1 - satfinite
4959 unsigned idx = (hasSatFinite << 1) | hasRelu;
4960
4962 args.push_back(mt.lookupValue(op.getSrcHi()));
4963 args.push_back(mt.lookupValue(op.getSrcLo()));
4964 if (op.getRandomBits())
4965 args.push_back(mt.lookupValue(op.getRandomBits()));
4966
4967 switch (op.getRnd()) {
4968 case FPRoundingMode::RN:
4969 return {rndRNIds[idx], std::move(args)};
4970 case FPRoundingMode::RZ:
4971 return {rndRZIds[idx], std::move(args)};
4972 case FPRoundingMode::RS:
4973 return {rndRSIds[idx], std::move(args)};
4974 default:
4975 llvm_unreachable("Invalid rounding mode for ConvertF32x2ToBF16x2Op");
4976 }
4977}
4978
4979llvm::Intrinsic::ID ConvertF32x4ToF8x4Op::getIntrinsicID() {
4980 mlir::Type dstTy = getDstTy();
4981 bool hasRelu = getRelu();
4982
4984 .Case([&](mlir::Float8E4M3FNType) {
4985 return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e4m3x4_rs_relu_satfinite
4986 : llvm::Intrinsic::nvvm_f32x4_to_e4m3x4_rs_satfinite;
4987 })
4988 .Case([&](mlir::Float8E5M2Type) {
4989 return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e5m2x4_rs_relu_satfinite
4990 : llvm::Intrinsic::nvvm_f32x4_to_e5m2x4_rs_satfinite;
4991 })
4992 .Default([](mlir::Type) {
4993 llvm_unreachable("Invalid F8 type in ConvertF32x4ToF8x4Op");
4994 return llvm::Intrinsic::not_intrinsic;
4995 });
4996}
4997
4998llvm::Intrinsic::ID ConvertF32x4ToF6x4Op::getIntrinsicID() {
4999 mlir::Type dstTy = getDstTy();
5000 bool hasRelu = getRelu();
5001
5003 .Case([&](mlir::Float6E2M3FNType) {
5004 return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e2m3x4_rs_relu_satfinite
5005 : llvm::Intrinsic::nvvm_f32x4_to_e2m3x4_rs_satfinite;
5006 })
5007 .Case([&](mlir::Float6E3M2FNType) {
5008 return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e3m2x4_rs_relu_satfinite
5009 : llvm::Intrinsic::nvvm_f32x4_to_e3m2x4_rs_satfinite;
5010 })
5011 .Default([](mlir::Type) {
5012 llvm_unreachable("Invalid F6 type in ConvertF32x4ToF6x4Op");
5013 return llvm::Intrinsic::not_intrinsic;
5014 });
5015}
5016
5017llvm::Intrinsic::ID ConvertF32x4ToF4x4Op::getIntrinsicID() {
5018 mlir::Type dstTy = getDstTy();
5019 bool hasRelu = getRelu();
5020
5022 .Case([&](mlir::Float4E2M1FNType) {
5023 return hasRelu ? llvm::Intrinsic::nvvm_f32x4_to_e2m1x4_rs_relu_satfinite
5024 : llvm::Intrinsic::nvvm_f32x4_to_e2m1x4_rs_satfinite;
5025 })
5026 .Default([](mlir::Type) {
5027 llvm_unreachable("Invalid F4 type in ConvertF32x4ToF4x4Op");
5028 return llvm::Intrinsic::not_intrinsic;
5029 });
5030}
5031
5032llvm::Intrinsic::ID Tcgen05CpOp::getIntrinsicID(Operation &op) {
5033 auto curOp = cast<NVVM::Tcgen05CpOp>(op);
5034 bool is2CTA = curOp.getGroup() == CTAGroupKind::CTA_2;
5035 auto srcFmt = curOp.getSrcFormat();
5036 auto mc = curOp.getMulticast();
5037
5038 switch (curOp.getShape()) {
5039 case Tcgen05CpShape::SHAPE_128x256b:
5040 return GET_TCGEN05_CP_ID(_128x256b, srcFmt, is2CTA);
5041 case Tcgen05CpShape::SHAPE_128x128b:
5042 return GET_TCGEN05_CP_ID(_128x128b, srcFmt, is2CTA);
5043 case Tcgen05CpShape::SHAPE_4x256b:
5044 return GET_TCGEN05_CP_ID(_4x256b, srcFmt, is2CTA);
5045 case Tcgen05CpShape::SHAPE_32x128b:
5046 return GET_TCGEN05_CP_ID(_32x128b_warpx4, srcFmt, is2CTA);
5047 case Tcgen05CpShape::SHAPE_64x128b:
5048 return (mc == Tcgen05CpMulticast::WARPX2_01_23)
5049 ? GET_TCGEN05_CP_ID(_64x128b_warpx2_01_23, srcFmt, is2CTA)
5050 : GET_TCGEN05_CP_ID(_64x128b_warpx2_02_13, srcFmt, is2CTA);
5051 }
5052 llvm_unreachable("Invalid shape in tcgen05 cp Op");
5053}
5054
5055// Returns the valid vector length for a given shape and vector length, the
5056// function models the table mentioned in the tcgen05.{ld, st} Op description
5057static unsigned isValidVectorLength(NVVM::Tcgen05LdStShape shape,
5058 unsigned vecLen) {
5059 if (shape == NVVM::Tcgen05LdStShape::SHAPE_16X128B)
5060 return vecLen >= 2;
5061 if (shape == NVVM::Tcgen05LdStShape::SHAPE_16X256B)
5062 return vecLen >= 4;
5063 return true;
5064}
5065
5066LogicalResult Tcgen05LdOp::verify() {
5067 LogicalResult result = success();
5068 if (getShape() == NVVM::Tcgen05LdStShape::SHAPE_16X32BX2 && !getOffset())
5069 result = emitError("shape 16x32bx2 requires offset argument");
5070
5071 if (getShape() != NVVM::Tcgen05LdStShape::SHAPE_16X32BX2 && getOffset())
5072 result = emitError("offset argument is only supported for shape 16x32bx2");
5073
5074 auto resTy = getRes().getType();
5075 unsigned resLen = isa<VectorType>(resTy)
5076 ? llvm::cast<VectorType>(resTy).getNumElements()
5077 : 1;
5078 if (!isValidVectorLength(getShape(), resLen))
5079 result = emitError(llvm::formatv("invalid result type length {0} for shape "
5080 "{1} in tcgen05.ld Op",
5081 resLen, stringifyEnum(getShape())));
5082
5083 return result;
5084}
5085
5086LogicalResult Tcgen05StOp::verify() {
5087 LogicalResult result = success();
5088 if (getShape() == NVVM::Tcgen05LdStShape::SHAPE_16X32BX2 && !getOffset())
5089 result = emitError("shape 16x32bx2 requires offset argument");
5090
5091 auto valTy = getVal().getType();
5092 unsigned valLen = isa<VectorType>(valTy)
5093 ? llvm::cast<VectorType>(valTy).getNumElements()
5094 : 1;
5095 if (!isValidVectorLength(getShape(), valLen))
5096 result = emitError(llvm::formatv("invalid input length {0} for shape "
5097 "{1} in tcgen05.st Op",
5098 valLen, stringifyEnum(getShape())));
5099
5100 return result;
5101}
5102
5103/// Infer the result ranges for the NVVM SpecialRangeableRegisterOp that might
5104/// have ConstantRangeAttr.
5107 SetIntRangeFn setResultRanges) {
5108 if (auto rangeAttr = op->getAttrOfType<LLVM::ConstantRangeAttr>("range")) {
5109 setResultRanges(result, {rangeAttr.getLower(), rangeAttr.getUpper(),
5110 rangeAttr.getLower(), rangeAttr.getUpper()});
5111 } else {
5112 setResultRanges(result, IntegerValueRange::getMaxRange(result).getValue());
5113 }
5114}
5115
5116/// Verify the range attribute satisfies LLVM ConstantRange constructor
5117/// requirements for NVVM SpecialRangeableRegisterOp.
5118static LogicalResult
5120 std::optional<LLVM::ConstantRangeAttr> rangeAttr) {
5121 if (!rangeAttr)
5122 return success();
5123
5124 const llvm::APInt &lower = rangeAttr->getLower();
5125 const llvm::APInt &upper = rangeAttr->getUpper();
5126
5127 // Check LLVM ConstantRange constructor condition
5128 if (lower == upper && !lower.isMaxValue() && !lower.isMinValue()) {
5129 unsigned bitWidth = lower.getBitWidth();
5130 llvm::APInt minVal = llvm::APInt::getMinValue(bitWidth);
5131 llvm::APInt maxVal = llvm::APInt::getMaxValue(bitWidth);
5132 return op->emitOpError(
5133 "invalid range attribute: Lower == Upper, but they aren't min (")
5134 << llvm::toString(minVal, 10, false) << ") or max ("
5135 << llvm::toString(maxVal, 10, false)
5136 << ") value! This is an invalid constant range.";
5137 }
5138
5139 return success();
5140}
5141
5142static llvm::Value *getAsPackedI32(llvm::Value *arg,
5143 llvm::IRBuilderBase &builder) {
5144 return builder.CreateBitCast(arg,
5145 llvm::Type::getInt32Ty(builder.getContext()));
5146}
5147
5148NVVM::IDArgPair DotAccumulate4WayOp::getIntrinsicIDAndArgs(
5149 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5150 auto curOp = cast<NVVM::DotAccumulate4WayOp>(op);
5151
5153 args.push_back(getAsPackedI32(mt.lookupValue(curOp.getA()), builder));
5154 args.push_back(getAsPackedI32(mt.lookupValue(curOp.getB()), builder));
5155 args.push_back(mt.lookupValue(curOp.getC()));
5156
5157 bool isASigned = curOp.getAType() == NVVM::DotAccumulateType::SIGNED;
5158 bool isBSigned = curOp.getBType() == NVVM::DotAccumulateType::SIGNED;
5159 unsigned type = (isASigned << 1) | isBSigned;
5160 const llvm::Intrinsic::ID ids[] = {
5161 llvm::Intrinsic::nvvm_idp4a_u_u,
5162 llvm::Intrinsic::nvvm_idp4a_u_s,
5163 llvm::Intrinsic::nvvm_idp4a_s_u,
5164 llvm::Intrinsic::nvvm_idp4a_s_s,
5165 };
5166 return {ids[type], args};
5167}
5168
5169NVVM::IDArgPair DotAccumulate2WayOp::getIntrinsicIDAndArgs(
5170 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5171 auto curOp = cast<NVVM::DotAccumulate2WayOp>(op);
5172
5174 args.push_back(getAsPackedI32(mt.lookupValue(curOp.getA()), builder));
5175 args.push_back(getAsPackedI32(mt.lookupValue(curOp.getB()), builder));
5176 args.push_back(builder.getInt1(curOp.getBHi()));
5177 args.push_back(mt.lookupValue(curOp.getC()));
5178
5179 bool isASigned = curOp.getAType() == NVVM::DotAccumulateType::SIGNED;
5180 bool isBSigned = curOp.getBType() == NVVM::DotAccumulateType::SIGNED;
5181 unsigned type = (isASigned << 1) | isBSigned;
5182 const llvm::Intrinsic::ID ids[] = {
5183 llvm::Intrinsic::nvvm_idp2a_u_u,
5184 llvm::Intrinsic::nvvm_idp2a_u_s,
5185 llvm::Intrinsic::nvvm_idp2a_s_u,
5186 llvm::Intrinsic::nvvm_idp2a_s_s,
5187 };
5188 return {ids[type], args};
5189}
5190
5191static llvm::Value *getParamCastedAddr(llvm::Value *addr,
5192 llvm::IRBuilderBase &builder) {
5193 return builder.CreateAddrSpaceCast(
5194 addr, builder.getPtrTy(llvm::NVPTXAS::ADDRESS_SPACE_ENTRY_PARAM));
5195}
5196
5198PrefetchOp::getIntrinsicIDAndArgs(NVVM::PrefetchOp &op,
5200 llvm::IRBuilderBase &builder) {
5201 using MemSpace = NVVM::NVVMMemorySpace;
5202 using CacheLevel = NVVM::PrefetchCacheLevel;
5203
5204 std::optional<NVVM::PrefetchCacheLevel> cacheLevel = op.getCacheLevel();
5205 std::optional<NVVM::CacheEvictionPriority> evictPriority =
5206 op.getEvictPriority();
5207 unsigned addressSpace =
5208 llvm::cast<LLVM::LLVMPointerType>(op.getAddr().getType())
5209 .getAddressSpace();
5210
5212 llvm::Value *addr = mt.lookupValue(op.getAddr());
5213 args.push_back(op.getInParamSpace() ? getParamCastedAddr(addr, builder)
5214 : addr);
5215
5216 if (op.getTensormap())
5217 return {llvm::Intrinsic::nvvm_prefetch_tensormap, args};
5218
5219 assert(cacheLevel && "expected cache level for non-tensormap prefetch");
5220
5221 if (op.getUniform() && *cacheLevel == CacheLevel::L1)
5222 return {llvm::Intrinsic::nvvm_prefetchu_L1, args};
5223
5224 if (evictPriority && *cacheLevel == CacheLevel::L2) {
5225 switch (*evictPriority) {
5226 case NVVM::CacheEvictionPriority::EvictLast:
5227 return {llvm::Intrinsic::nvvm_prefetch_global_L2_evict_last, args};
5228 case NVVM::CacheEvictionPriority::EvictNormal:
5229 return {llvm::Intrinsic::nvvm_prefetch_global_L2_evict_normal, args};
5230 default:
5231 llvm_unreachable("Invalid cache eviction priority");
5232 }
5233 }
5234
5235 switch (static_cast<MemSpace>(addressSpace)) {
5236 case MemSpace::Generic:
5237 return *cacheLevel == CacheLevel::L1
5238 ? NVVM::IDArgPair({llvm::Intrinsic::nvvm_prefetch_L1, args})
5239 : NVVM::IDArgPair({llvm::Intrinsic::nvvm_prefetch_L2, args});
5240 case MemSpace::Global:
5241 return *cacheLevel == CacheLevel::L1
5243 {llvm::Intrinsic::nvvm_prefetch_global_L1, args})
5244 : NVVM::IDArgPair(
5245 {llvm::Intrinsic::nvvm_prefetch_global_L2, args});
5246 case MemSpace::Local:
5247 return *cacheLevel == CacheLevel::L1
5249 {llvm::Intrinsic::nvvm_prefetch_local_L1, args})
5250 : NVVM::IDArgPair(
5251 {llvm::Intrinsic::nvvm_prefetch_local_L2, args});
5252 default:
5253 llvm_unreachable("Invalid pointer address space");
5254 }
5255}
5256
5257bool NVVM::InlinePtxOp::getAsmValues(
5258 RewriterBase &rewriter,
5259 llvm::SmallVectorImpl<std::pair<mlir::Value, mlir::NVVM::PTXRegisterMod>>
5260 &asmValues) {
5261 for (auto arg : getReadWriteArgs())
5262 asmValues.push_back({arg, mlir::NVVM::PTXRegisterMod::ReadWrite});
5263 for (auto arg : getResults())
5264 asmValues.push_back({arg, mlir::NVVM::PTXRegisterMod::Write});
5265 for (auto arg : getReadOnlyArgs())
5266 asmValues.push_back({arg, mlir::NVVM::PTXRegisterMod::Read});
5267 if (getPredicate())
5268 asmValues.push_back({getPredicate(), mlir::NVVM::PTXRegisterMod::Read});
5269 return false; // No manual mapping needed
5270}
5271
5272NVVM::IDArgPair ClusterLaunchControlTryCancelOp::getIntrinsicIDAndArgs(
5273 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5274 auto curOp = cast<NVVM::ClusterLaunchControlTryCancelOp>(op);
5276 args.push_back(mt.lookupValue(curOp.getSmemAddress()));
5277 args.push_back(mt.lookupValue(curOp.getMbarrier()));
5278
5279 llvm::Intrinsic::ID intrinsicID =
5280 curOp.getMulticast()
5281 ? llvm::Intrinsic::
5282 nvvm_clusterlaunchcontrol_try_cancel_async_multicast_shared
5283 : llvm::Intrinsic::nvvm_clusterlaunchcontrol_try_cancel_async_shared;
5284
5285 return {intrinsicID, args};
5286}
5287
5288NVVM::IDArgPair ClusterLaunchControlQueryCancelOp::getIntrinsicIDAndArgs(
5289 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5290 auto curOp = cast<NVVM::ClusterLaunchControlQueryCancelOp>(op);
5292 args.push_back(mt.lookupValue(curOp.getTryCancelResponse()));
5293
5294 llvm::Intrinsic::ID intrinsicID;
5295
5296 switch (curOp.getQueryType()) {
5297 case NVVM::ClusterLaunchControlQueryType::IS_CANCELED:
5298 intrinsicID =
5299 llvm::Intrinsic::nvvm_clusterlaunchcontrol_query_cancel_is_canceled;
5300 break;
5301 case NVVM::ClusterLaunchControlQueryType::GET_FIRST_CTA_ID_X:
5302 intrinsicID = llvm::Intrinsic::
5303 nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_x;
5304 break;
5305 case NVVM::ClusterLaunchControlQueryType::GET_FIRST_CTA_ID_Y:
5306 intrinsicID = llvm::Intrinsic::
5307 nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_y;
5308 break;
5309 case NVVM::ClusterLaunchControlQueryType::GET_FIRST_CTA_ID_Z:
5310 intrinsicID = llvm::Intrinsic::
5311 nvvm_clusterlaunchcontrol_query_cancel_get_first_ctaid_z;
5312 break;
5313 }
5314 return {intrinsicID, args};
5315}
5316
5318PermuteOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
5319 llvm::IRBuilderBase &builder) {
5320 auto thisOp = cast<NVVM::PermuteOp>(op);
5321 NVVM::PermuteMode mode = thisOp.getMode();
5322
5323 static constexpr llvm::Intrinsic::ID IDs[] = {
5324 llvm::Intrinsic::nvvm_prmt, llvm::Intrinsic::nvvm_prmt_f4e,
5325 llvm::Intrinsic::nvvm_prmt_b4e, llvm::Intrinsic::nvvm_prmt_rc8,
5326 llvm::Intrinsic::nvvm_prmt_ecl, llvm::Intrinsic::nvvm_prmt_ecr,
5327 llvm::Intrinsic::nvvm_prmt_rc16};
5328
5329 unsigned modeIndex = static_cast<unsigned>(mode);
5331 args.push_back(mt.lookupValue(thisOp.getLo()));
5332
5333 // Only first 3 modes (Default, f4e, b4e) need the hi operand.
5334 if (modeIndex < 3)
5335 args.push_back(mt.lookupValue(thisOp.getHi()));
5336
5337 args.push_back(mt.lookupValue(thisOp.getSelector()));
5338
5339 return {IDs[modeIndex], args};
5340}
5341
5342mlir::NVVM::IDArgPair TensormapReplaceOp::getIntrinsicIDAndArgs(
5343 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5344 auto thisOp = cast<NVVM::TensormapReplaceOp>(op);
5345
5347 args.push_back(mt.lookupValue(thisOp.getAddr()));
5348 if (thisOp.getOrd())
5349 args.push_back(builder.getInt32(thisOp.getOrd().value()));
5350 if (thisOp.getNewValue())
5351 args.push_back(mt.lookupValue(thisOp.getNewValue()));
5352 if (auto attr = thisOp.getNewValueAttr()) {
5353 auto val =
5355 .Case<TensormapElemtypeAttr, TensormapInterleaveLayoutAttr,
5356 TensormapSwizzleModeAttr, TensormapSwizzleAtomicityAttr,
5357 TensormapFillModeAttr>([](auto attr) {
5358 return static_cast<unsigned>(attr.getValue());
5359 })
5360 .Default([](auto attr) {
5361 llvm_unreachable("Invalid attribute type");
5362 return 0;
5363 });
5364 args.push_back(builder.getInt32(val));
5365 }
5366
5367 static constexpr llvm::Intrinsic::ID IDs[] = {
5368 llvm::Intrinsic::nvvm_tensormap_replace_global_address,
5369 llvm::Intrinsic::nvvm_tensormap_replace_rank,
5370 llvm::Intrinsic::nvvm_tensormap_replace_box_dim,
5371 llvm::Intrinsic::nvvm_tensormap_replace_global_dim,
5372 llvm::Intrinsic::nvvm_tensormap_replace_global_stride,
5373 llvm::Intrinsic::nvvm_tensormap_replace_element_stride,
5374 llvm::Intrinsic::nvvm_tensormap_replace_elemtype,
5375 llvm::Intrinsic::nvvm_tensormap_replace_interleave_layout,
5376 llvm::Intrinsic::nvvm_tensormap_replace_swizzle_mode,
5377 llvm::Intrinsic::nvvm_tensormap_replace_swizzle_atomicity,
5378 llvm::Intrinsic::nvvm_tensormap_replace_fill_mode,
5379 };
5380
5381 unsigned fieldIndex = static_cast<unsigned>(thisOp.getField());
5382
5383 return {IDs[fieldIndex], args};
5384}
5385
5386//===----------------------------------------------------------------------===//
5387// NVVM tcgen05.mma functions
5388//===----------------------------------------------------------------------===//
5389
5391Tcgen05MMAOp::getIntrinsicIDAndArgs(Operation &op, LLVM::ModuleTranslation &mt,
5392 llvm::IRBuilderBase &builder) {
5393
5394 auto thisOp = cast<NVVM::Tcgen05MMAOp>(op);
5396
5397 args.push_back(mt.lookupValue(thisOp.getMatrixD()));
5398
5399 llvm::Value *A = mt.lookupValue(thisOp.getMatrixA());
5400 const bool isATensor = isa<llvm::PointerType>(A->getType());
5401 args.push_back(A);
5402
5403 args.push_back(mt.lookupValue(thisOp.getMatrixB()));
5404 args.push_back(mt.lookupValue(thisOp.getIdesc()));
5405 args.push_back(mt.lookupValue(thisOp.getEnableInputD()));
5406
5407 using EnableAShiftArray = std::array<llvm::Intrinsic::ID, 2>;
5408 using CtaGroupArray = std::array<EnableAShiftArray, 2>;
5409 using IsATensorArray = std::array<CtaGroupArray, 2>;
5410 using HasScaleInputDArray = std::array<IsATensorArray, 2>;
5411 using HasDisableOutputLaneArray = std::array<HasScaleInputDArray, 2>;
5412
5413 // [hasDisableOutputLane][hasScaleInputD][isATensor][CtaGroup][EnableAShift]
5414 static constexpr HasDisableOutputLaneArray tcgen05MMAIDs = {
5415 { // without diable output lane
5416 {{// without scale input D
5417 {{
5418 // shared
5419 {{// cg1
5420 {llvm::Intrinsic::nvvm_tcgen05_mma_shared, notIntrinsic},
5421 // cg2
5422 {llvm::Intrinsic::nvvm_tcgen05_mma_shared, notIntrinsic}}},
5423 {{// tensor
5424 {
5425 // cg1
5426 llvm::Intrinsic::nvvm_tcgen05_mma_tensor,
5427 llvm::Intrinsic::nvvm_tcgen05_mma_tensor_ashift,
5428 },
5429 {
5430 // cg2
5431 llvm::Intrinsic::nvvm_tcgen05_mma_tensor,
5432 llvm::Intrinsic::nvvm_tcgen05_mma_tensor_ashift,
5433 }}},
5434 }},
5435 // with scale input D
5436 {{ // shared
5437 {{// cg1
5438 {llvm::Intrinsic::nvvm_tcgen05_mma_shared_scale_d, notIntrinsic},
5439 // cg2
5440 {llvm::Intrinsic::nvvm_tcgen05_mma_shared_scale_d, notIntrinsic}}},
5441 {{// tensor
5442 {
5443 // cg1
5444 llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d,
5445 llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_ashift,
5446 },
5447 {
5448 // cg2
5449 llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d,
5450 llvm::Intrinsic::nvvm_tcgen05_mma_tensor_scale_d_ashift,
5451 }}}}}}},
5452 // with disable output lane
5453 {{ // without scale input D
5454 {{ // shared
5455 {{// cg1
5456 {llvm::Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg1,
5457 notIntrinsic},
5458 // cg2
5459 {llvm::Intrinsic::nvvm_tcgen05_mma_shared_disable_output_lane_cg2,
5460 notIntrinsic}}},
5461 {{// cg1
5462 {
5463 llvm::Intrinsic::
5464 nvvm_tcgen05_mma_tensor_disable_output_lane_cg1,
5465 llvm::Intrinsic::
5466 nvvm_tcgen05_mma_tensor_disable_output_lane_cg1_ashift,
5467 },
5468 // cg2
5469 {
5470 llvm::Intrinsic::
5471 nvvm_tcgen05_mma_tensor_disable_output_lane_cg2,
5472 llvm::Intrinsic::
5473 nvvm_tcgen05_mma_tensor_disable_output_lane_cg2_ashift,
5474 }}}}},
5475 // with scale input D
5476 {{ // shared
5477 {{// cg1
5478 {llvm::Intrinsic::
5479 nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg1,
5480 notIntrinsic},
5481 // cg2
5482 {llvm::Intrinsic::
5483 nvvm_tcgen05_mma_shared_scale_d_disable_output_lane_cg2,
5484 notIntrinsic}}},
5485 // tensor
5486 {{// cg1
5487 {llvm::Intrinsic::
5488 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1,
5489 llvm::Intrinsic::
5490 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg1_ashift},
5491 // cg2
5492 {
5493 llvm::Intrinsic::
5494 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2,
5495 llvm::Intrinsic::
5496 nvvm_tcgen05_mma_tensor_scale_d_disable_output_lane_cg2_ashift,
5497 }}}}}}}}};
5498
5499 llvm::Value *ScaleInputD = mt.lookupValue(thisOp.getScaleInputD());
5500 bool hasScaleInputD = ScaleInputD != nullptr;
5501
5502 llvm::Value *DisableOutputLane =
5503 mt.lookupValue(thisOp.getDisableOutputLane());
5504 bool hasDisableOutputLane = DisableOutputLane != nullptr;
5505
5506 const unsigned ctaGroup =
5507 static_cast<unsigned>(getNVVMCtaGroupKind(thisOp.getCtaGroup()));
5508
5509 llvm::Intrinsic::ID ID =
5510 tcgen05MMAIDs[hasDisableOutputLane][hasScaleInputD][isATensor]
5511 [ctaGroup - 1][thisOp.getAShift()];
5512
5513 assert(ID != notIntrinsic && "Invalid intrinsic for Tcgen05MMAOp.");
5514
5515 if (hasScaleInputD)
5516 args.push_back(ScaleInputD);
5517
5518 if (hasDisableOutputLane)
5519 args.push_back(DisableOutputLane);
5520
5521 args.push_back(builder.getInt32(static_cast<unsigned>(thisOp.getKind())));
5522
5523 if (!hasDisableOutputLane)
5524 args.push_back(builder.getInt32(ctaGroup));
5525
5526 args.push_back(
5527 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorOp())));
5528
5529 return {ID, args};
5530}
5531
5532static LogicalResult
5533verifyTcgen05MMAOp(bool isATensor, mlir::Value disableOutputLane,
5534 NVVM::CTAGroupKind ctaGroup, bool hasAShift,
5535 NVVM::Tcgen05MMACollectorOp collectorOp, Location loc) {
5536
5537 if (disableOutputLane) {
5538 mlir::VectorType disableOutputLaneType =
5539 cast<mlir::VectorType>(disableOutputLane.getType());
5540 if ((ctaGroup == NVVM::CTAGroupKind::CTA_1 &&
5541 disableOutputLaneType.getNumElements() != 4) ||
5542 (ctaGroup == NVVM::CTAGroupKind::CTA_2 &&
5543 disableOutputLaneType.getNumElements() != 8))
5544 return emitError(loc) << "Disable Output Lane of length "
5545 << disableOutputLaneType.getNumElements()
5546 << " is incompatible with CtaGroupAttr";
5547 }
5548
5549 if (hasAShift && !isATensor)
5550 return emitError(
5551 loc, "A-shift can be applied only when matrix A is in tensor memory");
5552
5553 if (hasAShift == true && (collectorOp == Tcgen05MMACollectorOp::FILL ||
5554 collectorOp == Tcgen05MMACollectorOp::USE))
5555 return emitError(
5556 loc, "Cannot use collector buffer operation fill or use with ashift");
5557
5558 return success();
5559}
5560
5561LogicalResult Tcgen05MMAOp::verify() {
5562 return verifyTcgen05MMAOp(isa<LLVM::LLVMPointerType>(getMatrixA().getType()),
5563 getDisableOutputLane(), getCtaGroup(), getAShift(),
5564 getCollectorOp(), getLoc());
5565}
5566
5567//===----------------------------------------------------------------------===//
5568// NVVM tcgen05.mma.sp functions
5569//===----------------------------------------------------------------------===//
5570
5571mlir::NVVM::IDArgPair Tcgen05MMASparseOp::getIntrinsicIDAndArgs(
5572 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5573
5574 auto thisOp = cast<NVVM::Tcgen05MMASparseOp>(op);
5576
5577 args.push_back(mt.lookupValue(thisOp.getMatrixD()));
5578
5579 llvm::Value *A = mt.lookupValue(thisOp.getMatrixA());
5580 bool isATensor = isa<llvm::PointerType>(A->getType());
5581 args.push_back(A);
5582
5583 args.push_back(mt.lookupValue(thisOp.getMatrixB()));
5584 args.push_back(mt.lookupValue(thisOp.getIdesc()));
5585 args.push_back(mt.lookupValue(thisOp.getEnableInputD()));
5586 args.push_back(mt.lookupValue(thisOp.getSparseMetadata()));
5587
5588 using EnableAShiftArray = std::array<llvm::Intrinsic::ID, 2>;
5589 using CtaGroupArray = std::array<EnableAShiftArray, 2>;
5590 using IsATensorArray = std::array<CtaGroupArray, 2>;
5591 using HasScaleInputDArray = std::array<IsATensorArray, 2>;
5592 using HasDisableOutputLaneArray = std::array<HasScaleInputDArray, 2>;
5593
5594 // [hasDisableOutputLane][hasScaleInputD][isATensor][CtaGroup][EnableAShift]
5595 static constexpr HasDisableOutputLaneArray tcgen05MMASparseIDs = {
5596 { // without diable output lane
5597 {{// without scale input D
5598 {{
5599 // shared
5600 {{// cg1
5601 {llvm::Intrinsic::nvvm_tcgen05_mma_sp_shared, notIntrinsic},
5602 // cg2
5603 {llvm::Intrinsic::nvvm_tcgen05_mma_sp_shared, notIntrinsic}}},
5604 {{// tensor
5605 {
5606 // cg1
5607 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor,
5608 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor_ashift,
5609 },
5610 {
5611 // cg2
5612 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor,
5613 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor_ashift,
5614 }}},
5615 }},
5616 // with scale input D
5617 {{ // shared
5618 {{// cg1
5619 {llvm::Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d,
5620 notIntrinsic},
5621 // cg2
5622 {llvm::Intrinsic::nvvm_tcgen05_mma_sp_shared_scale_d,
5623 notIntrinsic}}},
5624 {{// tensor
5625 {
5626 // cg1
5627 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d,
5628 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_ashift,
5629 },
5630 {
5631 // cg2
5632 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d,
5633 llvm::Intrinsic::nvvm_tcgen05_mma_sp_tensor_scale_d_ashift,
5634 }}}}}}},
5635 // with disable output lane
5636 {{ // without scale input D
5637 {{ // shared
5638 {{// cg1
5639 {llvm::Intrinsic::
5640 nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg1,
5641 notIntrinsic},
5642 // cg2
5643 {llvm::Intrinsic::
5644 nvvm_tcgen05_mma_sp_shared_disable_output_lane_cg2,
5645 notIntrinsic}}},
5646 {{// cg1
5647 {
5648 llvm::Intrinsic::
5649 nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1,
5650 llvm::Intrinsic::
5651 nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg1_ashift,
5652 },
5653 // cg2
5654 {
5655 llvm::Intrinsic::
5656 nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2,
5657 llvm::Intrinsic::
5658 nvvm_tcgen05_mma_sp_tensor_disable_output_lane_cg2_ashift,
5659 }}}}},
5660 // with scale input D
5661 {{ // shared
5662 {{// cg1
5663 {llvm::Intrinsic::
5664 nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg1,
5665 notIntrinsic},
5666 // cg2
5667 {llvm::Intrinsic::
5668 nvvm_tcgen05_mma_sp_shared_scale_d_disable_output_lane_cg2,
5669 notIntrinsic}}},
5670 // tensor
5671 {{// cg1
5672 {llvm::Intrinsic::
5673 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1,
5674 llvm::Intrinsic::
5675 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg1_ashift},
5676 // cg2
5677 {
5678 llvm::Intrinsic::
5679 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2,
5680 llvm::Intrinsic::
5681 nvvm_tcgen05_mma_sp_tensor_scale_d_disable_output_lane_cg2_ashift,
5682 }}}}}}}}};
5683
5684 llvm::Value *ScaleInputD = mt.lookupValue(thisOp.getScaleInputD());
5685 bool hasScaleInputD = ScaleInputD != nullptr;
5686
5687 llvm::Value *DisableOutputLane =
5688 mt.lookupValue(thisOp.getDisableOutputLane());
5689 bool hasDisableOutputLane = DisableOutputLane != nullptr;
5690
5691 unsigned ctaGroup =
5692 static_cast<unsigned>(getNVVMCtaGroupKind(thisOp.getCtaGroup()));
5693
5694 llvm::Intrinsic::ID ID =
5695 tcgen05MMASparseIDs[hasDisableOutputLane][hasScaleInputD][isATensor]
5696 [ctaGroup - 1][thisOp.getAShift()];
5697
5698 assert(ID != notIntrinsic && "Invalid intrinsic for Tcgen05MMASparseOp.");
5699
5700 if (hasScaleInputD)
5701 args.push_back(ScaleInputD);
5702
5703 if (hasDisableOutputLane)
5704 args.push_back(DisableOutputLane);
5705
5706 args.push_back(builder.getInt32(static_cast<unsigned>(thisOp.getKind())));
5707
5708 if (!hasDisableOutputLane)
5709 args.push_back(builder.getInt32(ctaGroup));
5710
5711 args.push_back(
5712 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorOp())));
5713
5714 return {ID, args};
5715}
5716
5717LogicalResult Tcgen05MMASparseOp::verify() {
5718 return verifyTcgen05MMAOp(isa<LLVM::LLVMPointerType>(getMatrixA().getType()),
5719 getDisableOutputLane(), getCtaGroup(), getAShift(),
5720 getCollectorOp(), getLoc());
5721}
5722
5723//===----------------------------------------------------------------------===//
5724// NVVM tcgen05.mma.block_scale functions
5725//===----------------------------------------------------------------------===//
5726
5727mlir::NVVM::IDArgPair Tcgen05MMABlockScaleOp::getIntrinsicIDAndArgs(
5728 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5729
5730 auto thisOp = cast<NVVM::Tcgen05MMABlockScaleOp>(op);
5732
5733 args.push_back(mt.lookupValue(thisOp.getMatrixD()));
5734
5735 llvm::Value *A = mt.lookupValue(thisOp.getMatrixA());
5736 bool isATensor = isa<llvm::PointerType>(A->getType());
5737 args.push_back(A);
5738
5739 args.push_back(mt.lookupValue(thisOp.getMatrixB()));
5740 args.push_back(mt.lookupValue(thisOp.getIdesc()));
5741 args.push_back(mt.lookupValue(thisOp.getEnableInputD()));
5742 args.push_back(mt.lookupValue(thisOp.getScaleA()));
5743 args.push_back(mt.lookupValue(thisOp.getScaleB()));
5744 args.push_back(builder.getInt32(
5745 static_cast<unsigned>(getNVVMCtaGroupKind(thisOp.getCtaGroup()))));
5746 args.push_back(
5747 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorOp())));
5748
5749 auto kind = thisOp.getKind();
5750 auto blockScale = thisOp.getBlockScale();
5751 llvm::Intrinsic::ID ID = [&]() {
5752 if (kind == NVVM::Tcgen05MMAKind::MXF8F6F4) {
5753 if (blockScale == NVVM::Tcgen05MMABlockScale::DEFAULT) {
5754 return isATensor ? llvm::Intrinsic::
5755 nvvm_tcgen05_mma_tensor_mxf8f6f4_block_scale
5756 : llvm::Intrinsic::
5757 nvvm_tcgen05_mma_shared_mxf8f6f4_block_scale;
5758 } else if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK32) {
5759 return isATensor
5760 ? llvm::Intrinsic::
5761 nvvm_tcgen05_mma_tensor_mxf8f6f4_block_scale_block32
5762 : llvm::Intrinsic::
5763 nvvm_tcgen05_mma_shared_mxf8f6f4_block_scale_block32;
5764 }
5765 } else if (kind == NVVM::Tcgen05MMAKind::MXF4) {
5766 if (blockScale == NVVM::Tcgen05MMABlockScale::DEFAULT) {
5767 return isATensor
5768 ? llvm::Intrinsic::nvvm_tcgen05_mma_tensor_mxf4_block_scale
5769 : llvm::Intrinsic::nvvm_tcgen05_mma_shared_mxf4_block_scale;
5770 } else if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK32) {
5771 return isATensor ? llvm::Intrinsic::
5772 nvvm_tcgen05_mma_tensor_mxf4_block_scale_block32
5773 : llvm::Intrinsic::
5774 nvvm_tcgen05_mma_shared_mxf4_block_scale_block32;
5775 }
5776 } else if (kind == NVVM::Tcgen05MMAKind::MXF4NVF4) {
5777 if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK32) {
5778 return isATensor
5779 ? llvm::Intrinsic::
5780 nvvm_tcgen05_mma_tensor_mxf4nvf4_block_scale_block32
5781 : llvm::Intrinsic::
5782 nvvm_tcgen05_mma_shared_mxf4nvf4_block_scale_block32;
5783
5784 } else if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK16) {
5785 return isATensor
5786 ? llvm::Intrinsic::
5787 nvvm_tcgen05_mma_tensor_mxf4nvf4_block_scale_block16
5788 : llvm::Intrinsic::
5789 nvvm_tcgen05_mma_shared_mxf4nvf4_block_scale_block16;
5790 }
5791 }
5792 llvm_unreachable("Invalid tcgen05.mma.block_scale attributes");
5793 }();
5794
5795 return {ID, args};
5796}
5797
5798static LogicalResult verifyTcgen05MMABlockScaleOp(
5799 NVVM::Tcgen05MMACollectorOp collectorOp, NVVM::Tcgen05MMAKind kind,
5800 NVVM::Tcgen05MMABlockScale blockScale, Location loc) {
5801 if (blockScale == NVVM::Tcgen05MMABlockScale::DEFAULT &&
5802 kind == NVVM::Tcgen05MMAKind::MXF4NVF4)
5803 return emitError(loc, "mxf4nvf4 requires block scale attribute");
5804
5805 if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK16 &&
5806 kind != NVVM::Tcgen05MMAKind::MXF4NVF4)
5807 return emitError(loc,
5808 llvm::formatv("{} kind does not support block16 attribute",
5809 stringifyEnum(kind)));
5810
5811 return success();
5812}
5813
5814LogicalResult Tcgen05MMABlockScaleOp::verify() {
5815 return verifyTcgen05MMABlockScaleOp(getCollectorOp(), getKind(),
5816 getBlockScale(), getLoc());
5817}
5818
5819//===----------------------------------------------------------------------===//
5820// NVVM tcgen05.mma.sp.block_scale functions
5821//===----------------------------------------------------------------------===//
5822
5823mlir::NVVM::IDArgPair Tcgen05MMASparseBlockScaleOp::getIntrinsicIDAndArgs(
5824 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5825
5826 auto thisOp = cast<NVVM::Tcgen05MMASparseBlockScaleOp>(op);
5828
5829 args.push_back(mt.lookupValue(thisOp.getMatrixD()));
5830
5831 llvm::Value *A = mt.lookupValue(thisOp.getMatrixA());
5832 bool isATensor = isa<llvm::PointerType>(A->getType());
5833 args.push_back(A);
5834
5835 args.push_back(mt.lookupValue(thisOp.getMatrixB()));
5836 args.push_back(mt.lookupValue(thisOp.getIdesc()));
5837 args.push_back(mt.lookupValue(thisOp.getEnableInputD()));
5838 args.push_back(mt.lookupValue(thisOp.getSparseMetadata()));
5839 args.push_back(mt.lookupValue(thisOp.getScaleA()));
5840 args.push_back(mt.lookupValue(thisOp.getScaleB()));
5841 args.push_back(builder.getInt32(
5842 static_cast<unsigned>(getNVVMCtaGroupKind(thisOp.getCtaGroup()))));
5843 args.push_back(
5844 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorOp())));
5845
5846 auto kind = thisOp.getKind();
5847 auto blockScale = thisOp.getBlockScale();
5848 llvm::Intrinsic::ID ID = [&]() {
5849 if (kind == NVVM::Tcgen05MMAKind::MXF8F6F4) {
5850 if (blockScale == NVVM::Tcgen05MMABlockScale::DEFAULT) {
5851 return isATensor ? llvm::Intrinsic::
5852 nvvm_tcgen05_mma_sp_tensor_mxf8f6f4_block_scale
5853 : llvm::Intrinsic::
5854 nvvm_tcgen05_mma_sp_shared_mxf8f6f4_block_scale;
5855 } else if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK32) {
5856 return isATensor
5857 ? llvm::Intrinsic::
5858 nvvm_tcgen05_mma_sp_tensor_mxf8f6f4_block_scale_block32
5859 : llvm::Intrinsic::
5860 nvvm_tcgen05_mma_sp_shared_mxf8f6f4_block_scale_block32;
5861 }
5862 } else if (kind == NVVM::Tcgen05MMAKind::MXF4) {
5863 if (blockScale == NVVM::Tcgen05MMABlockScale::DEFAULT) {
5864 return isATensor ? llvm::Intrinsic::
5865 nvvm_tcgen05_mma_sp_tensor_mxf4_block_scale
5866 : llvm::Intrinsic::
5867 nvvm_tcgen05_mma_sp_shared_mxf4_block_scale;
5868 } else if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK32) {
5869 return isATensor
5870 ? llvm::Intrinsic::
5871 nvvm_tcgen05_mma_sp_tensor_mxf4_block_scale_block32
5872 : llvm::Intrinsic::
5873 nvvm_tcgen05_mma_sp_shared_mxf4_block_scale_block32;
5874 }
5875 } else if (kind == NVVM::Tcgen05MMAKind::MXF4NVF4) {
5876 if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK32) {
5877 return isATensor
5878 ? llvm::Intrinsic::
5879 nvvm_tcgen05_mma_sp_tensor_mxf4nvf4_block_scale_block32
5880 : llvm::Intrinsic::
5881 nvvm_tcgen05_mma_sp_shared_mxf4nvf4_block_scale_block32;
5882
5883 } else if (blockScale == NVVM::Tcgen05MMABlockScale::BLOCK16) {
5884 return isATensor
5885 ? llvm::Intrinsic::
5886 nvvm_tcgen05_mma_sp_tensor_mxf4nvf4_block_scale_block16
5887 : llvm::Intrinsic::
5888 nvvm_tcgen05_mma_sp_shared_mxf4nvf4_block_scale_block16;
5889 }
5890 }
5891 llvm_unreachable("Invalid tcgen05.mma.sp.block_scale attributes");
5892 }();
5893
5894 return {ID, args};
5895}
5896
5897LogicalResult Tcgen05MMASparseBlockScaleOp::verify() {
5898 return verifyTcgen05MMABlockScaleOp(getCollectorOp(), getKind(),
5899 getBlockScale(), getLoc());
5900}
5901
5902//===----------------------------------------------------------------------===//
5903// NVVM tcgen05.mma.ws functions
5904//===----------------------------------------------------------------------===//
5905
5906mlir::NVVM::IDArgPair Tcgen05MMAWsOp::getIntrinsicIDAndArgs(
5907 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5908
5909 auto thisOp = cast<NVVM::Tcgen05MMAWsOp>(op);
5911
5912 args.push_back(mt.lookupValue(thisOp.getMatrixD()));
5913
5914 llvm::Value *A = mt.lookupValue(thisOp.getMatrixA());
5915 bool isATensor = isa<llvm::PointerType>(A->getType());
5916 args.push_back(A);
5917
5918 args.push_back(mt.lookupValue(thisOp.getMatrixB()));
5919 args.push_back(mt.lookupValue(thisOp.getIdesc()));
5920 args.push_back(mt.lookupValue(thisOp.getEnableInputD()));
5921
5922 mlir::Value ZeroColMask = thisOp.getZeroColMask();
5923 llvm::Intrinsic::ID ID = notIntrinsic;
5924 if (ZeroColMask) {
5925 args.push_back(mt.lookupValue(ZeroColMask));
5926 ID = isATensor ? llvm::Intrinsic::nvvm_tcgen05_mma_ws_tensor_zero_col_mask
5927 : llvm::Intrinsic::nvvm_tcgen05_mma_ws_shared_zero_col_mask;
5928 } else
5929 ID = isATensor ? llvm::Intrinsic::nvvm_tcgen05_mma_ws_tensor
5930 : llvm::Intrinsic::nvvm_tcgen05_mma_ws_shared;
5931
5932 args.push_back(builder.getInt32(static_cast<unsigned>(thisOp.getKind())));
5933 args.push_back(
5934 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorBBuffer())));
5935 args.push_back(
5936 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorOp())));
5937
5938 return {ID, args};
5939}
5940
5941//===----------------------------------------------------------------------===//
5942// NVVM tcgen05.mma.ws.sp functions
5943//===----------------------------------------------------------------------===//
5944
5945mlir::NVVM::IDArgPair Tcgen05MMAWsSparseOp::getIntrinsicIDAndArgs(
5946 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5947
5948 auto thisOp = cast<NVVM::Tcgen05MMAWsSparseOp>(op);
5950
5951 args.push_back(mt.lookupValue(thisOp.getMatrixD()));
5952
5953 llvm::Value *A = mt.lookupValue(thisOp.getMatrixA());
5954 bool isATensor = isa<llvm::PointerType>(A->getType());
5955 args.push_back(A);
5956
5957 args.push_back(mt.lookupValue(thisOp.getMatrixB()));
5958 args.push_back(mt.lookupValue(thisOp.getIdesc()));
5959 args.push_back(mt.lookupValue(thisOp.getEnableInputD()));
5960 args.push_back(mt.lookupValue(thisOp.getSparseMetadata()));
5961
5962 mlir::Value ZeroColMask = thisOp.getZeroColMask();
5963 llvm::Intrinsic::ID ID = notIntrinsic;
5964 if (ZeroColMask) {
5965 args.push_back(mt.lookupValue(ZeroColMask));
5966 ID = isATensor
5967 ? llvm::Intrinsic::nvvm_tcgen05_mma_ws_sp_tensor_zero_col_mask
5968 : llvm::Intrinsic::nvvm_tcgen05_mma_ws_sp_shared_zero_col_mask;
5969 } else
5970 ID = isATensor ? llvm::Intrinsic::nvvm_tcgen05_mma_ws_sp_tensor
5971 : llvm::Intrinsic::nvvm_tcgen05_mma_ws_sp_shared;
5972
5973 args.push_back(builder.getInt32(static_cast<unsigned>(thisOp.getKind())));
5974 args.push_back(
5975 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorBBuffer())));
5976 args.push_back(
5977 builder.getInt32(static_cast<unsigned>(thisOp.getCollectorOp())));
5978
5979 return {ID, args};
5980}
5981
5982//===----------------------------------------------------------------------===//
5983// NVVM tcgen05.ld.red functions
5984//===----------------------------------------------------------------------===//
5985
5986#define TCGEN05LDRED(SHAPE, NUM, TYPE) \
5987 llvm::Intrinsic::nvvm_tcgen05_ld_red_##SHAPE##_##NUM##_##TYPE
5988
5989mlir::NVVM::IDArgPair NVVM::Tcgen05LdRedOp::getIntrinsicIDAndArgs(
5990 Operation &op, LLVM::ModuleTranslation &mt, llvm::IRBuilderBase &builder) {
5991 auto thisOp = cast<NVVM::Tcgen05LdRedOp>(op);
5993
5994 mlir::VectorType VecResTy =
5995 cast<mlir::VectorType>(thisOp.getData().getType());
5996 unsigned Num = VecResTy.getNumElements();
5997 bool IsFloat = thisOp.getRedVal().getType().isF32();
5998
5999 llvm::Intrinsic::ID Shape32x32b[][2] = {
6001 {TCGEN05LDRED(32x32b, x2, i32), TCGEN05LDRED(32x32b, x2, f32)},
6002 {TCGEN05LDRED(32x32b, x4, i32), TCGEN05LDRED(32x32b, x4, f32)},
6003 {TCGEN05LDRED(32x32b, x8, i32), TCGEN05LDRED(32x32b, x8, f32)},
6004 {TCGEN05LDRED(32x32b, x16, i32), TCGEN05LDRED(32x32b, x16, f32)},
6005 {TCGEN05LDRED(32x32b, x32, i32), TCGEN05LDRED(32x32b, x32, f32)},
6006 {TCGEN05LDRED(32x32b, x64, i32), TCGEN05LDRED(32x32b, x64, f32)},
6007 {TCGEN05LDRED(32x32b, x128, i32), TCGEN05LDRED(32x32b, x128, f32)},
6008 };
6009
6010 llvm::Intrinsic::ID Shape16x32bx2[][2] = {
6012 {TCGEN05LDRED(16x32bx2, x2, i32), TCGEN05LDRED(16x32bx2, x2, f32)},
6013 {TCGEN05LDRED(16x32bx2, x4, i32), TCGEN05LDRED(16x32bx2, x4, f32)},
6014 {TCGEN05LDRED(16x32bx2, x8, i32), TCGEN05LDRED(16x32bx2, x8, f32)},
6015 {TCGEN05LDRED(16x32bx2, x16, i32), TCGEN05LDRED(16x32bx2, x16, f32)},
6016 {TCGEN05LDRED(16x32bx2, x32, i32), TCGEN05LDRED(16x32bx2, x32, f32)},
6017 {TCGEN05LDRED(16x32bx2, x64, i32), TCGEN05LDRED(16x32bx2, x64, f32)},
6018 {TCGEN05LDRED(16x32bx2, x128, i32), TCGEN05LDRED(16x32bx2, x128, f32)},
6019 };
6020
6021 NVVM::Tcgen05LdStShape shape = thisOp.getShape();
6022 unsigned ID = [&]() {
6023 // `num` contains the length of vector and log2 of `num` returns the index
6024 // into the shape array
6025 unsigned idx = std::log2(Num);
6026 switch (shape) {
6027 case NVVM::Tcgen05LdStShape::SHAPE_32X32B:
6028 return Shape32x32b[idx][IsFloat];
6029 case NVVM::Tcgen05LdStShape::SHAPE_16X32BX2:
6030 return Shape16x32bx2[idx][IsFloat];
6031 default:
6032 llvm_unreachable("unhandled tcgen05.ld lowering");
6033 }
6034 }();
6035
6036 args.push_back(mt.lookupValue(thisOp.getAddr()));
6037
6038 if (shape == NVVM::Tcgen05LdStShape::SHAPE_16X32BX2)
6039 args.push_back(mt.lookupValue(thisOp.getOffset()));
6040
6041 args.push_back(
6042 builder.getInt32(thisOp.getOp() == NVVM::ReductionKind::MIN ? 0 : 1));
6043
6044 if (IsFloat) {
6045 args.push_back(builder.getInt1(static_cast<unsigned>(thisOp.getAbs())));
6046 args.push_back(builder.getInt1(static_cast<unsigned>(thisOp.getNan())));
6047 }
6048 return {ID, args};
6049}
6050
6051LogicalResult Tcgen05LdRedOp::verify() {
6052 VectorType data = cast<VectorType>(getData().getType());
6053 Type redVal = getRedVal().getType();
6054
6055 if (data.getElementType() != redVal)
6056 return emitError(
6057 "type of reduction value and element type of vector data should match");
6058
6059 if (getOp() != NVVM::ReductionKind::MIN &&
6060 getOp() != NVVM::ReductionKind::MAX)
6061 return emitError("only min and max reduction kinds are supported");
6062
6063 if (redVal.isInteger() && (getAbs() || getNan())) {
6064 return emitError("abs or nan is only applicable for f32 type");
6065 }
6066 return success();
6067}
6068
6069//===----------------------------------------------------------------------===//
6070// NVVMDialect initialization, type parsing, and registration.
6071//===----------------------------------------------------------------------===//
6072
6073// TODO: This should be the llvm.nvvm dialect once this is supported.
6074void NVVMDialect::initialize() {
6075 addOperations<
6076#define GET_OP_LIST
6077#include "mlir/Dialect/LLVMIR/NVVMOps.cpp.inc"
6078 >();
6079 addAttributes<
6080#define GET_ATTRDEF_LIST
6081#include "mlir/Dialect/LLVMIR/NVVMOpsAttributes.cpp.inc"
6082 >();
6083
6084 // Support unknown operations because not all NVVM operations are
6085 // registered.
6086 allowUnknownOperations();
6087 declarePromisedInterface<ConvertToLLVMPatternInterface, NVVMDialect>();
6088 declarePromisedInterface<gpu::TargetAttrInterface, NVVMTargetAttr>();
6089}
6090
6091LogicalResult NVVMDialect::verifyOperationAttribute(Operation *op,
6092 NamedAttribute attr) {
6093 StringAttr attrName = attr.getName();
6094 // Kernel function attribute should be attached to functions.
6095 if (attrName == NVVMDialect::getKernelFuncAttrName()) {
6096 if (!isa<LLVM::LLVMFuncOp>(op)) {
6097 return op->emitError() << "'" << NVVMDialect::getKernelFuncAttrName()
6098 << "' attribute attached to unexpected op";
6099 }
6100 }
6101 // If maxntid / reqntid / cluster_dim exist, it must be an array with max 3
6102 // dim
6103 if (attrName == NVVMDialect::getMaxntidAttrName() ||
6104 attrName == NVVMDialect::getReqntidAttrName() ||
6105 attrName == NVVMDialect::getClusterDimAttrName()) {
6106 auto values = llvm::dyn_cast<DenseI32ArrayAttr>(attr.getValue());
6107 if (!values || values.empty() || values.size() > 3) {
6108 return op->emitError()
6109 << "'" << attrName
6110 << "' attribute must be integer array with maximum 3 index";
6111 }
6112 }
6113 // If minctasm / maxnreg / cluster_max_blocks exist, it must be an integer
6114 // attribute
6115 if (attrName == NVVMDialect::getMinctasmAttrName() ||
6116 attrName == NVVMDialect::getMaxnregAttrName() ||
6117 attrName == NVVMDialect::getClusterMaxBlocksAttrName()) {
6118 if (!llvm::dyn_cast<IntegerAttr>(attr.getValue())) {
6119 return op->emitError()
6120 << "'" << attrName << "' attribute must be integer constant";
6121 }
6122 }
6123 // blocksareclusters must be used along with reqntid and cluster_dim
6124 if (attrName == NVVMDialect::getBlocksAreClustersAttrName()) {
6125 if (!op->hasAttr(NVVMDialect::getReqntidAttrName()) ||
6126 !op->hasAttr(NVVMDialect::getClusterDimAttrName())) {
6127 return op->emitError()
6128 << "'" << attrName << "' attribute must be used along with "
6129 << "'" << NVVMDialect::getReqntidAttrName() << "' and "
6130 << "'" << NVVMDialect::getClusterDimAttrName() << "'";
6131 }
6132 }
6133
6134 return success();
6135}
6136
6137LogicalResult NVVMDialect::verifyRegionArgAttribute(Operation *op,
6138 unsigned regionIndex,
6139 unsigned argIndex,
6140 NamedAttribute argAttr) {
6141 auto funcOp = dyn_cast<FunctionOpInterface>(op);
6142 if (!funcOp)
6143 return success();
6144
6145 bool isKernel = op->hasAttr(NVVMDialect::getKernelFuncAttrName());
6146 StringAttr attrName = argAttr.getName();
6147 if (attrName == NVVM::NVVMDialect::getGridConstantAttrName()) {
6148 if (!isKernel) {
6149 return op->emitError()
6150 << "'" << attrName
6151 << "' attribute must be present only on kernel arguments";
6152 }
6153 if (!isa<UnitAttr>(argAttr.getValue()))
6154 return op->emitError() << "'" << attrName << "' must be a unit attribute";
6155 if (!funcOp.getArgAttr(argIndex, LLVM::LLVMDialect::getByValAttrName())) {
6156 return op->emitError()
6157 << "'" << attrName
6158 << "' attribute requires the argument to also have attribute '"
6159 << LLVM::LLVMDialect::getByValAttrName() << "'";
6160 }
6161 }
6162
6163 return success();
6164}
6165
6166//===----------------------------------------------------------------------===//
6167// NVVM Address Space Attr
6168//===----------------------------------------------------------------------===//
6169
6170unsigned NVVMMemorySpaceAttr::getAddressSpace() const {
6171 return static_cast<unsigned>(getValue());
6172}
6173
6174bool NVVMMemorySpaceAttr::isValidLoad(
6175 Type type, ptr::AtomicOrdering ordering, std::optional<int64_t> alignment,
6176 const ::mlir::DataLayout *dataLayout,
6178 return LLVM::detail::isValidLoadStoreImpl(type, ordering, alignment,
6179 dataLayout, emitError);
6180}
6181
6182bool NVVMMemorySpaceAttr::isValidStore(
6183 Type type, ptr::AtomicOrdering ordering, std::optional<int64_t> alignment,
6184 const ::mlir::DataLayout *dataLayout,
6186 return LLVM::detail::isValidLoadStoreImpl(type, ordering, alignment,
6187 dataLayout, emitError);
6188}
6189
6190bool NVVMMemorySpaceAttr::isValidAtomicOp(
6191 ptr::AtomicBinOp op, Type type, ptr::AtomicOrdering ordering,
6192 std::optional<int64_t> alignment, const ::mlir::DataLayout *dataLayout,
6194 // TODO: update this method once `ptr.atomic_rmw` is implemented.
6195 assert(false && "unimplemented, see TODO in the source.");
6196 return false;
6197}
6198
6199bool NVVMMemorySpaceAttr::isValidAtomicXchg(
6200 Type type, ptr::AtomicOrdering successOrdering,
6201 ptr::AtomicOrdering failureOrdering, std::optional<int64_t> alignment,
6202 const ::mlir::DataLayout *dataLayout,
6204 // TODO: update this method once `ptr.atomic_cmpxchg` is implemented.
6205 assert(false && "unimplemented, see TODO in the source.");
6206 return false;
6207}
6208
6209bool NVVMMemorySpaceAttr::isValidAddrSpaceCast(
6210 Type tgt, Type src, function_ref<InFlightDiagnostic()> emitError) const {
6211 // TODO: update this method once the `ptr.addrspace_cast` op is added to the
6212 // dialect.
6213 assert(false && "unimplemented, see TODO in the source.");
6214 return false;
6215}
6216
6217bool NVVMMemorySpaceAttr::isValidPtrIntCast(
6218 Type intLikeTy, Type ptrLikeTy,
6220 // TODO: update this method once the int-cast ops are added to the `ptr`
6221 // dialect.
6222 assert(false && "unimplemented, see TODO in the source.");
6223 return false;
6224}
6225
6226//===----------------------------------------------------------------------===//
6227// NVVM target attribute.
6228//===----------------------------------------------------------------------===//
6229LogicalResult
6230NVVMTargetAttr::verify(function_ref<InFlightDiagnostic()> emitError,
6231 int optLevel, StringRef triple, StringRef chip,
6232 StringRef features, DictionaryAttr flags,
6233 ArrayAttr files, bool verifyTarget) {
6234 if (optLevel < 0 || optLevel > 3) {
6235 emitError() << "The optimization level must be a number between 0 and 3.";
6236 return failure();
6237 }
6238 if (triple.empty()) {
6239 emitError() << "The target triple cannot be empty.";
6240 return failure();
6241 }
6242 if (chip.empty()) {
6243 emitError() << "The target chip cannot be empty.";
6244 return failure();
6245 }
6246 if (files && !llvm::all_of(files, [](::mlir::Attribute attr) {
6247 return mlir::isa_and_nonnull<StringAttr>(attr);
6248 })) {
6249 emitError() << "All the elements in the `link` array must be strings.";
6250 return failure();
6251 }
6252 return success();
6253}
6254
6255LogicalResult NVVMTargetAttr::verifyTarget(Operation *gpuModule) {
6256 if (!getVerifyTarget())
6257 return success();
6258
6259 auto gpuModuleOp = llvm::dyn_cast<gpu::GPUModuleOp>(gpuModule);
6260 if (!gpuModuleOp) {
6261 return emitError(gpuModule->getLoc(),
6262 "NVVM target attribute must be attached to a GPU module");
6263 }
6264
6265 const unsigned targetFullSmVersion =
6267 if (!NVVMCheckSMVersion::isMinimumSMVersion(targetFullSmVersion)) {
6268 return emitError(gpuModule->getLoc(),
6269 "Minimum NVVM target SM version is sm_20");
6270 }
6271
6272 if (gpuModuleOp
6273 ->walk([&](Operation *op) {
6274 if (auto reqOp = llvm::dyn_cast<NVVM::RequiresSMInterface>(op)) {
6275 const NVVMCheckSMVersion requirement =
6276 reqOp.getRequiredMinSMVersion();
6277 if (!requirement.isCompatibleWith(targetFullSmVersion)) {
6278 op->emitOpError() << "is not supported on " << getChip();
6279 return WalkResult::interrupt();
6280 }
6281 }
6282 return WalkResult::advance();
6283 })
6284 .wasInterrupted())
6285 return failure();
6286
6287 return success();
6288}
6289
6290#define GET_OP_CLASSES
6291#include "mlir/Dialect/LLVMIR/NVVMOps.cpp.inc"
6292
6293#define GET_ATTRDEF_CLASSES
6294#include "mlir/Dialect/LLVMIR/NVVMOpsAttributes.cpp.inc"
for(Operation *op :ops)
return success()
p<< " : "<< getMemRefType()<< ", "<< getType();}static LogicalResult verifyVectorMemoryOp(Operation *op, MemRefType memrefType, VectorType vectorType) { if(memrefType.getElementType() !=vectorType.getElementType()) return op-> emitOpError("requires memref and vector types of the same elemental type")
Given a list of lists of parsed operands, populates uniqueOperands with unique operands.
ArrayAttr()
b getContext())
#define GET_TCGEN05_CP_ID(shape_mc, src_fmt, is_2cta)
static LogicalResult verifyTMALoadParams(size_t tensorDims, size_t numIm2colOff, TMALoadMode mode, Location loc)
static LogicalResult verifyTcgen05MMAOp(bool isATensor, mlir::Value disableOutputLane, NVVM::CTAGroupKind ctaGroup, bool hasAShift, NVVM::Tcgen05MMACollectorOp collectorOp, Location loc)
#define _none
static bool isPtrInAddrSpace(mlir::Value ptr, NVVMMemorySpace targetAS)
static bool isCompatibleReturnTypesOptionalResult(TypeRange inferred, TypeRange actual)
For ops with optional results, allow the user to omit the result even when inference would produce on...
static bool isPtrInSharedCTASpace(mlir::Value ptr)
static LogicalResult isAllowedSizeN(int sizeN, NVVM::WGMMATypes typeA)
static llvm::nvvm::CTAGroupKind getNVVMCtaGroupKind(NVVM::CTAGroupKind ctaGroup)
static void addInferredMultiplicandTypes(MLIRContext *ctx, OperationState &result, ValueRange operandA, ValueRange operandB, std::optional< std::array< MMATypes, 2 > > multiplicandPtxTypes)
#define GET_CVT_F2TF32_ID(rnd, relu, sf)
static void addBlockScaleProperties(OpBuilder &builder, OperationState &result, ArrayRef< int64_t > shape, ScaleVecSize scaleVecSize, BlockScaleFormat blockScaleFormat, MMABlockScaleKind kind)
#define GET_F32x2_TO_F8X2_US_ID(rnd, has_satf)
static llvm::Value * getParamCastedAddr(llvm::Value *addr, llvm::IRBuilderBase &builder)
static LogicalResult verifyAddSubFOp(OpType op)
static LogicalResult verifyTcgen05MMABlockScaleOp(NVVM::Tcgen05MMACollectorOp collectorOp, NVVM::Tcgen05MMAKind kind, NVVM::Tcgen05MMABlockScale blockScale, Location loc)
static llvm::Value * packValInto64Bits(llvm::IRBuilderBase &builder, llvm::Value *result, llvm::Value *field, unsigned sizeInBits, unsigned start)
Packs the given field into the result.
static void printOperandList(OpAsmPrinter &p, StringRef name, ArrayRef< Value > operands)
#define GET_F32x2_TO_F6x2_ID(type, has_relu)
static llvm::Value * getAsPackedI32(llvm::Value *arg, llvm::IRBuilderBase &builder)
#define GET_F16x2_TO_F8X2_ID(type, has_relu)
static LogicalResult verifyMBarrierArriveLikeOp(Operation *op, Value addr, NVVM::MemScopeKind scope, Value retVal=nullptr)
static llvm::Value * castPtrToAddrSpace(llvm::IRBuilderBase &builder, llvm::Value *ptr, NVVMMemorySpace targetAS)
static LogicalResult isAllowedWGMMADataType(NVVM::WGMMATypes typeD, NVVM::WGMMATypes typeA, NVVM::WGMMATypes typeB)
static void inferAndSetMultiplicandTypes(MLIRContext *ctx, NamedAttrList &attrs, const SmallVectorImpl< Type > &operandTypes)
static LogicalResult parseMmaOperand(OpAsmParser &parser, StringRef operandName, SmallVectorImpl< OpAsmParser::UnresolvedOperand > &regs)
static std::pair< mlir::Type, unsigned > inferMMATypeFromMNK(NVVM::MMATypes type, NVVM::MMAFrag frag, int m, int n, int k, MLIRContext *context)
static bool isInt8PtxType(MMATypes type)
#define TCGEN05LDRED(SHAPE, NUM, TYPE)
static bool isInt4PtxType(MMATypes type)
static bool isIntegerPtxType(MMATypes type)
#define GET_F32x2_TO_F8X2_S_ID(type, has_relu)
static MMATypes inferPtxTypeFromResult(OpTy op)
static LogicalResult verifyConstantRangeAttr(Operation *op, std::optional< LLVM::ConstantRangeAttr > rangeAttr)
Verify the range attribute satisfies LLVM ConstantRange constructor requirements for NVVM SpecialRang...
static LogicalResult parseMmaTypeSignature(OpAsmParser &parser, SmallVectorImpl< Type > &operandTypes)
static FailureOr< int > getAllowedSizeK(NVVM::WGMMATypes typeA)
static bool isPtrInSharedClusterSpace(mlir::Value ptr)
#define GET_CP_ASYNC_ID(mod, size, has_cpsize)
static unsigned isValidVectorLength(NVVM::Tcgen05LdStShape shape, unsigned vecLen)
#define GET_TCGEN05_COMMIT_ID(cta_group, is_shared, has_mc)
static LogicalResult verifyConvertF32x2ToFP16x2Op(Twine dstType, FPRoundingMode rnd, bool hasRandomBits, Operation *op)
static void nvvmInferResultRanges(Operation *op, Value result, ArrayRef<::mlir::ConstantIntRanges > argRanges, SetIntRangeFn setResultRanges)
Infer the result ranges for the NVVM SpecialRangeableRegisterOp that might have ConstantRangeAttr.
static LogicalResult cpAsyncBulkTensorCommonVerifier(size_t tensorDims, bool isIm2Col, size_t numIm2ColOffsets, Location loc)
static bool isPtrInGenericSpace(mlir::Value ptr)
static void processOperandFragments(Op &op, std::array< MMAOperandFragment, 3 > &frags, SmallVectorImpl< Type > &regTypes, SmallVectorImpl< StringRef > &ignoreAttrNames)
static constexpr unsigned notIntrinsic
static LogicalResult inferMBarrierArriveResultTypes(MLIRContext *context, Value addr, SmallVectorImpl< Type > &inferredReturnTypes)
Only shared_cluster (ptr<7>) produces zero results; all other address spaces (including generic) retu...
static ArrayRef< int64_t > getShape(Type type)
Returns the shape of the given type.
Definition Traits.cpp:117
@ OptionalSquare
Square brackets supporting zero or more ops, or nothing.
virtual Builder & getBuilder() const =0
Return a builder which provides useful access to MLIRContext, global objects like types and attribute...
virtual ParseResult parseCommaSeparatedList(Delimiter delimiter, function_ref< ParseResult()> parseElementFn, StringRef contextMessage=StringRef())=0
Parse a list of comma-separated items with an optional delimiter.
virtual ParseResult parseOptionalAttrDict(NamedAttrList &result)=0
Parse a named dictionary into 'result' if it is present.
MLIRContext * getContext() const
virtual ParseResult parseRParen()=0
Parse a ) token.
virtual InFlightDiagnostic emitError(SMLoc loc, const Twine &message={})=0
Emit a diagnostic at the specified location and return failure.
virtual SMLoc getCurrentLocation()=0
Get the location of the next token and store it into the argument.
virtual ParseResult parseColon()=0
Parse a : token.
virtual SMLoc getNameLoc() const =0
Return the location of the original name token.
virtual ParseResult parseArrow()=0
Parse a '->' token.
virtual ParseResult parseLParen()=0
Parse a ( token.
virtual ParseResult parseType(Type &result)=0
Parse a type.
virtual ParseResult parseArrowTypeList(SmallVectorImpl< Type > &result)=0
Parse an arrow followed by a type list.
ParseResult parseTypeList(SmallVectorImpl< Type > &result)
Parse a type list.
ParseResult parseKeyword(StringRef keyword)
Parse a given keyword.
void printArrowTypeList(TypeRange &&types)
This class is a general helper class for creating context-global objects like types,...
Definition Builders.h:51
IntegerType getI16Type()
Definition Builders.cpp:65
UnitAttr getUnitAttr()
Definition Builders.cpp:102
DenseI32ArrayAttr getDenseI32ArrayAttr(ArrayRef< int32_t > values)
Definition Builders.cpp:167
IntegerType getI32Type()
Definition Builders.cpp:67
IntegerType getIntegerType(unsigned width)
Definition Builders.cpp:71
MLIRContext * getContext() const
Definition Builders.h:56
Attr getAttr(Args &&...args)
Get or construct an instance of the attribute Attr with provided arguments.
Definition Builders.h:100
This class represents a diagnostic that is inflight and set to be reported.
static IntegerValueRange getMaxRange(Value value)
Create a maximal range ([0, uint_max(t)] / [int_min(t), int_max(t)]) range that is used to mark the v...
Implementation class for module translation.
llvm::Value * lookupValue(Value value) const
Finds an LLVM IR value corresponding to the given MLIR value.
void mapValue(Value mlir, llvm::Value *llvm)
Stores the mapping between an MLIR value and its LLVM IR counterpart.
llvm::LLVMContext & getLLVMContext() const
Returns the LLVM context in which the IR is being constructed.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition MLIRContext.h:63
NamedAttrList is array of NamedAttributes that tracks whether it is sorted and does some basic work t...
std::optional< NamedAttribute > getNamed(StringRef name) const
Return the specified named attribute if present, std::nullopt otherwise.
Attribute get(StringAttr name) const
Return the specified attribute if present, null otherwise.
Attribute set(StringAttr name, Attribute value)
If the an attribute exists with the specified name, change it to the new value.
NamedAttribute represents a combination of a name and an Attribute value.
Definition Attributes.h:164
StringAttr getName() const
Return the name of the attribute.
Attribute getValue() const
Return the value of the attribute.
Definition Attributes.h:179
The OpAsmParser has methods for interacting with the asm parser: parsing things from it,...
ParseResult resolveOperands(Operands &&operands, Type type, SmallVectorImpl< Value > &result)
Resolve a list of operands to SSA values, emitting an error on failure, or appending the results to t...
virtual ParseResult parseOperandList(SmallVectorImpl< UnresolvedOperand > &result, Delimiter delimiter=Delimiter::None, bool allowResultNumber=true, int requiredOperandCount=-1)=0
Parse zero or more SSA comma-separated operand references with a specified surrounding delimiter,...
This is a pure-virtual base class that exposes the asmprinter hooks necessary to implement a custom p...
void printOperands(const ContainerType &container)
Print a comma separated list of operands.
virtual void printOptionalAttrDict(ArrayRef< NamedAttribute > attrs, ArrayRef< StringRef > elidedAttrs={})=0
If the specified operation has attributes, print out an attribute dictionary with their values.
This class helps build Operations.
Definition Builders.h:209
This provides public APIs that all operations should have.
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
AttrClass getAttrOfType(StringAttr name)
Definition Operation.h:576
bool hasAttr(StringAttr name)
Return true if the operation has an attribute with the provided name, false otherwise.
Definition Operation.h:586
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:241
InFlightDiagnostic emitError(const Twine &message={})
Emit an error about fatal conditions with this operation, reporting up to any diagnostic handlers tha...
InFlightDiagnostic emitOpError(const Twine &message={})
Emit an error with the op name prefixed, like "'dim' op " which is convenient for verifiers.
A special type of RewriterBase that coordinates the application of a rewrite pattern on the current I...
RewritePatternSet & add(ConstructorArg &&arg, ConstructorArgs &&...args)
Add an instance of each of the pattern types 'Ts' to the pattern list with the given arguments.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
OpTy replaceOpWithNewOp(Operation *op, Args &&...args)
Replace the results of the given (original) op with a new op that is created without verification (re...
This class provides an abstraction over the various different ranges of value types.
Definition TypeRange.h:40
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition Types.h:74
bool isF64() const
Definition Types.cpp:41
MLIRContext * getContext() const
Return the MLIRContext in which this type was uniqued.
Definition Types.cpp:35
bool isF32() const
Definition Types.cpp:40
bool isInteger() const
Return true if this is an integer type (with the specified width).
Definition Types.cpp:58
bool isF16() const
Definition Types.cpp:38
bool isBF16() const
Definition Types.cpp:37
This class provides an abstraction over the different types of ranges over Values.
Definition ValueRange.h:389
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition Value.h:96
Type getType() const
Return the type of this value.
Definition Value.h:105
static WalkResult advance()
Definition WalkResult.h:47
static WalkResult interrupt()
Definition WalkResult.h:46
bool isValidLoadStoreImpl(Type type, ptr::AtomicOrdering ordering, std::optional< int64_t > alignment, const ::mlir::DataLayout *dataLayout, function_ref< InFlightDiagnostic()> emitError)
Checks whether the given type is an LLVM type that can be loaded or stored.
Definition LLVMAttrs.cpp:60
SmallVector< int64_t, 4 > getCoordinates(ArrayRef< int64_t > basis, unsigned linearIndex)
@ Write
Write register with '=' modifier.
@ ReadWrite
ReadWrite register with '+' modifier.
@ Read
Read register with no modifier.
std::pair< mlir::Type, unsigned > inferMMAType(mlir::NVVM::MMATypes type, mlir::NVVM::MMAFrag frag, int nRow, int nCol, mlir::MLIRContext *context)
Return the element type and number of elements associated with a wmma matrix of given chracteristics.
std::pair< llvm::Intrinsic::ID, llvm::SmallVector< llvm::Value * > > IDArgPair
A pair type of LLVM's Intrinsic ID and args (which are llvm values).
Definition NVVMDialect.h:53
Value getReductionOp(AtomicRMWKind op, OpBuilder &builder, Location loc, Value lhs, Value rhs)
Returns the value obtained by applying the reduction operation kind associated with a binary AtomicRM...
void walk(Operation *op, function_ref< void(Region *)> callback, WalkOrder order)
Walk all of the regions, blocks, or operations nested under (and including) the given operation.
Definition Visitors.h:102
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:717
uint64_t getN(LevelType lt)
Definition Enums.h:442
uint64_t getM(LevelType lt)
Definition Enums.h:443
Include the generated interface declarations.
llvm::function_ref< void(Value, const ConstantIntRanges &)> SetIntRangeFn
The type of the setResultRanges callback provided to ops implementing InferIntRangeInterface.
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition Utils.cpp:307
InFlightDiagnostic emitError(Location loc)
Utility method to emit an error message using this location.
llvm::function_ref< Fn > function_ref
Definition LLVM.h:147
LogicalResult matchAndRewrite(SubFOp op, PatternRewriter &rewriter) const override
static bool isMinimumSMVersion(unsigned fullSmVersion)
static unsigned getTargetFullSmVersionFromStr(StringRef smVersionString)
bool isCompatibleWith(const unsigned &targetFullSmVersion) const
OpRewritePattern(MLIRContext *context, PatternBenefit benefit=1, ArrayRef< StringRef > generatedNames={})
This represents an operation in an abstracted form, suitable for use with the builder APIs.