16 #include "llvm/Support/Debug.h" 19 namespace sparse_tensor {
26 : kind(k), val(v), op(o) {
30 assert(x != -1u && y == -1u && !v && !o);
34 assert(x == -1u && y == -1u && v && !o);
37 assert(x != -1u && y == -1u && !v && !o);
60 assert(x != -1u && y == -1u && !v && !o);
75 assert(x != -1u && y == -1u && v && !o);
80 assert(x != -1u && y == -1u && !v && o);
87 assert(x != -1u && !v && o);
111 assert(x != -1u && y != -1u && !v && !o);
116 assert(x != -1u && y != -1u && !v && o);
124 : bits(n, false), simple(), exp(e) {
137 unsigned e = tensorExps.size();
138 tensorExps.push_back(
TensorExp(k, e0, e1, v, op));
143 assert(t < numTensors && i < numLoops);
144 unsigned p = latPoints.size();
145 latPoints.push_back(
LatPoint(numLoops * numTensors, e, numTensors * i + t));
150 unsigned s = latSets.size();
157 unsigned p = latPoints.size();
158 BitVector nb = BitVector(latPoints[p0].
bits);
159 nb |= latPoints[p1].bits;
160 unsigned e = addExp(kind, latPoints[p0].
exp, latPoints[p1].exp,
Value(), op);
161 latPoints.push_back(
LatPoint(nb, e));
166 unsigned s = addSet();
167 for (
unsigned p0 : latSets[s0])
168 for (
unsigned p1 : latSets[s1])
169 latSets[s].push_back(conjLatPoint(kind, p0, p1, op));
174 unsigned s = takeConj(kind, s0, s1, op);
176 for (
unsigned p : latSets[s0])
177 latSets[s].push_back(p);
181 s1 = mapSet(
kNegF, s1);
182 else if (kind ==
kSubC)
183 s1 = mapSet(
kNegC, s1);
184 else if (kind ==
kSubI)
185 s1 = mapSet(
kNegI, s1);
187 for (
unsigned p : latSets[s1])
188 latSets[s].push_back(p);
195 unsigned s = takeConj(kind, s0, s1, orig);
199 s0 = mapSet(ltrans, s0,
Value(), opleft);
200 for (
unsigned p : latSets[s0])
201 latSets[s].push_back(p);
206 s1 = mapSet(rtrans, s1,
Value(), opright);
207 for (
unsigned p : latSets[s1])
208 latSets[s].push_back(p);
215 unsigned s = addSet();
216 for (
unsigned p : latSets[s0]) {
217 unsigned e = addExp(kind, latPoints[p].
exp, v, op);
219 latSets[s].push_back(latPoints.size() - 1);
225 unsigned s = addSet();
226 assert(!latSets[s0].empty());
227 unsigned p0 = latSets[s0][0];
228 for (
unsigned p1 : latSets[s0]) {
232 unsigned e = latPoints[p1].exp;
233 if (tensorExps[e].kind ==
kTensor && tensorExps[e].tensor == outTensor)
236 for (
unsigned p2 : latSets[s]) {
237 assert(!latGT(p1, p2));
238 if (onlyDenseDiff(p2, p1)) {
243 assert(!add || latGT(p0, p1));
246 latSets[s].push_back(p1);
248 for (
unsigned p : latSets[s])
249 latPoints[p].simple = simplifyCond(s, p);
256 bool isSingleton =
true;
257 for (
unsigned p1 : latSets[s0]) {
258 if (p0 != p1 && latGT(p0, p1)) {
264 BitVector
simple = latPoints[p0].bits;
265 bool reset = isSingleton && hasAnyDimOf(simple,
kSparse);
266 for (
unsigned b = 0, be = simple.size(); b < be; b++) {
267 if (simple[b] && !isDim(b,
kSparse)) {
277 const BitVector &bitsi = latPoints[i].bits;
278 const BitVector &bitsj = latPoints[j].bits;
279 assert(bitsi.size() == bitsj.size());
280 if (bitsi.count() > bitsj.count()) {
281 for (
unsigned b = 0, be = bitsj.size(); b < be; b++)
282 if (bitsj[b] && !bitsi[b])
290 BitVector tmp = latPoints[j].bits;
291 tmp ^= latPoints[i].bits;
292 return !hasAnyDimOf(tmp,
kSparse);
296 for (
unsigned b = 0, be = bits.size(); b < be; b++)
297 if (bits[b] && isDim(b, d))
303 switch (tensorExps[e].kind) {
306 return tensorExps[e].tensor == t;
341 return isSingleCondition(t, tensorExps[e].children.e0);
350 assert(!maybeZero(tensorExps[e].children.e1));
351 return isSingleCondition(t, tensorExps[e].children.e0);
355 assert(isInvariant(tensorExps[e].children.e1));
356 return isSingleCondition(t, tensorExps[e].children.e0);
361 if (isSingleCondition(t, tensorExps[e].children.e0))
362 return isSingleCondition(t, tensorExps[e].children.e1) ||
363 isInvariant(tensorExps[e].children.e1);
364 if (isSingleCondition(t, tensorExps[e].children.e1))
365 return isInvariant(tensorExps[e].children.e0);
370 return isSingleCondition(t, tensorExps[e].children.e0) &&
371 isSingleCondition(t, tensorExps[e].children.e1);
380 llvm_unreachable(
"unexpected kind");
442 return "binary_branch";
478 llvm_unreachable(
"unexpected kind for symbol");
482 switch (tensorExps[e].kind) {
485 if (tensorExps[e].tensor == syntheticTensor)
486 llvm::dbgs() <<
"synthetic_";
487 else if (tensorExps[e].tensor == outTensor)
488 llvm::dbgs() <<
"output_";
489 llvm::dbgs() <<
"tensor_" << tensorExps[e].tensor;
492 llvm::dbgs() <<
"invariant";
495 llvm::dbgs() <<
"index_" << tensorExps[e].index;
531 dumpExp(tensorExps[e].children.e0);
555 dumpExp(tensorExps[e].children.e0);
557 dumpExp(tensorExps[e].children.e1);
563 llvm::dbgs() <<
"lat(";
564 dumpBits(latPoints[p].
bits);
565 llvm::dbgs() <<
" :";
566 dumpBits(latPoints[p].
simple);
567 llvm::dbgs() <<
" : ";
568 dumpExp(latPoints[p].
exp);
569 llvm::dbgs() <<
" )\n";
573 llvm::dbgs() <<
"{ #" << latSets[s].size() <<
"\n";
574 for (
unsigned p : latSets[s]) {
578 llvm::dbgs() <<
"}\n";
582 for (
unsigned b = 0, be = bits.size(); b < be; b++) {
584 unsigned t = tensor(b);
585 unsigned i = index(b);
586 llvm::dbgs() <<
" i_" << t <<
"_" << i <<
"_";
587 switch (dims[t][i]) {
612 Kind kind = tensorExps[e].kind;
623 unsigned s = addSet();
624 unsigned t = syntheticTensor;
626 t = tensorExps[e].tensor;
627 if (hasSparseOut && t == outTensor)
630 latSets[s].push_back(addLat(t, i, e));
670 return mapSet(kind, buildLattices(tensorExps[e].children.e0, i),
675 return mapSet(kind, buildLattices(tensorExps[e].children.e0, i),
Value(),
684 unsigned child0 = buildLattices(tensorExps[e].children.e0, i);
685 UnaryOp unop = cast<UnaryOp>(tensorExps[e].op);
686 Region &absentRegion = unop.getAbsentRegion();
688 if (absentRegion.empty()) {
690 return mapSet(kind, child0,
Value(), unop);
694 YieldOp absentYield = cast<YieldOp>(absentBlock.
getTerminator());
695 Value absentVal = absentYield.getResult();
697 return takeDisj(kind, child0, buildLattices(rhs, i), unop);
713 return takeConj(kind,
714 buildLattices(tensorExps[e].children.e0, i),
715 buildLattices(tensorExps[e].children.e1, i));
733 assert(!maybeZero(tensorExps[e].children.e1));
734 return takeConj(kind,
735 buildLattices(tensorExps[e].children.e0, i),
736 buildLattices(tensorExps[e].children.e1, i));
752 return takeDisj(kind,
753 buildLattices(tensorExps[e].children.e0, i),
754 buildLattices(tensorExps[e].children.e1, i));
761 assert(isInvariant(tensorExps[e].children.e1));
762 return takeConj(kind,
763 buildLattices(tensorExps[e].children.e0, i),
764 buildLattices(tensorExps[e].children.e1, i));
773 unsigned child0 = buildLattices(tensorExps[e].children.e0, i);
774 unsigned child1 = buildLattices(tensorExps[e].children.e1, i);
775 BinaryOp binop = cast<BinaryOp>(tensorExps[e].op);
776 Region &leftRegion = binop.getLeftRegion();
777 Region &rightRegion = binop.getRightRegion();
780 if (!leftRegion.empty()) {
786 if (!rightRegion.
empty()) {
790 bool includeLeft = binop.getLeftIdentity() || !leftRegion.empty();
791 bool includeRight = binop.getRightIdentity() || !rightRegion.
empty();
792 return takeCombi(
kBinary, child0, child1, binop, includeLeft,
797 llvm_unreachable(
"unexpected expression kind");
802 Operation *yield = op.region().front().getTerminator();
803 assert(isa<linalg::YieldOp>(yield));
804 return buildTensorExp(op, yield->
getOperand(0));
808 bool Merger::maybeZero(
unsigned e)
const {
810 if (
auto c = tensorExps[e].val.getDefiningOp<complex::ConstantOp>()) {
811 ArrayAttr arrayAttr = c.getValue();
812 return arrayAttr[0].cast<FloatAttr>().getValue().isZero() &&
813 arrayAttr[0].cast<FloatAttr>().getValue().isZero();
816 return c.value() == 0;
818 return c.value().isZero();
823 bool Merger::isInvariant(
unsigned e)
const {
827 Type Merger::inferType(
unsigned e,
Value src) {
829 Type dtp = tensorExps[e].val.getType();
833 return VectorType::get(vtp.getNumElements(), dtp, vtp.getNumScalableDims());
844 if (isa<linalg::IndexOp>(def))
864 assert(isa<YieldOp>(yield));
870 unsigned argN = arg.getArgNumber();
874 if (arg.getOwner()->getParentOp() == op) {
875 OpOperand *t = op.getInputAndOutputOperands()[argN];
890 if (
auto indexOp = dyn_cast<linalg::IndexOp>(def))
891 return addExp(
kIndex, indexOp.dim());
895 auto x = buildTensorExp(op, def->
getOperand(0));
897 unsigned e = x.getValue();
898 if (isa<math::AbsOp>(def))
899 return addExp(
kAbsF, e);
900 if (isa<complex::AbsOp>(def))
901 return addExp(
kAbsC, e);
902 if (isa<math::CeilOp>(def))
904 if (isa<math::FloorOp>(def))
906 if (isa<math::SqrtOp>(def))
908 if (isa<complex::SqrtOp>(def))
910 if (isa<math::ExpM1Op>(def))
912 if (isa<complex::Expm1Op>(def))
914 if (isa<math::Log1pOp>(def))
916 if (isa<complex::Log1pOp>(def))
918 if (isa<math::SinOp>(def))
919 return addExp(
kSinF, e);
920 if (isa<complex::SinOp>(def))
921 return addExp(
kSinC, e);
922 if (isa<math::TanhOp>(def))
924 if (isa<complex::TanhOp>(def))
926 if (isa<arith::NegFOp>(def))
927 return addExp(
kNegF, e);
928 if (isa<complex::NegOp>(def))
929 return addExp(
kNegC, e);
930 if (isa<arith::TruncFOp>(def))
932 if (isa<arith::ExtFOp>(def))
933 return addExp(
kExtF, e, v);
934 if (isa<arith::FPToSIOp>(def))
936 if (isa<arith::FPToUIOp>(def))
938 if (isa<arith::SIToFPOp>(def))
940 if (isa<arith::UIToFPOp>(def))
942 if (isa<arith::ExtSIOp>(def))
943 return addExp(
kCastS, e, v);
944 if (isa<arith::ExtUIOp>(def))
945 return addExp(
kCastU, e, v);
946 if (isa<arith::IndexCastOp>(def))
948 if (isa<arith::TruncIOp>(def))
950 if (isa<complex::ImOp>(def))
951 return addExp(
kCIm, e);
952 if (isa<complex::ReOp>(def))
953 return addExp(
kCRe, e);
954 if (isa<arith::BitcastOp>(def))
956 if (
auto unop = dyn_cast<sparse_tensor::UnaryOp>(def)) {
967 auto x = buildTensorExp(op, def->
getOperand(0));
968 auto y = buildTensorExp(op, def->
getOperand(1));
969 if (x.hasValue() && y.hasValue()) {
970 unsigned e0 = x.getValue();
971 unsigned e1 = y.getValue();
972 if (isa<arith::MulFOp>(def))
973 return addExp(
kMulF, e0, e1);
974 if (isa<complex::MulOp>(def))
975 return addExp(
kMulC, e0, e1);
976 if (isa<arith::MulIOp>(def))
977 return addExp(
kMulI, e0, e1);
978 if (isa<arith::DivFOp>(def) && !maybeZero(e1))
979 return addExp(
kDivF, e0, e1);
980 if (isa<complex::DivOp>(def) && !maybeZero(e1))
981 return addExp(
kDivC, e0, e1);
982 if (isa<arith::DivSIOp>(def) && !maybeZero(e1))
983 return addExp(
kDivS, e0, e1);
984 if (isa<arith::DivUIOp>(def) && !maybeZero(e1))
985 return addExp(
kDivU, e0, e1);
986 if (isa<arith::AddFOp>(def))
987 return addExp(
kAddF, e0, e1);
988 if (isa<complex::AddOp>(def))
989 return addExp(
kAddC, e0, e1);
990 if (isa<arith::AddIOp>(def))
991 return addExp(
kAddI, e0, e1);
992 if (isa<arith::SubFOp>(def))
993 return addExp(
kSubF, e0, e1);
994 if (isa<complex::SubOp>(def))
995 return addExp(
kSubC, e0, e1);
996 if (isa<arith::SubIOp>(def))
997 return addExp(
kSubI, e0, e1);
998 if (isa<arith::AndIOp>(def))
999 return addExp(
kAndI, e0, e1);
1000 if (isa<arith::OrIOp>(def))
1001 return addExp(
kOrI, e0, e1);
1002 if (isa<arith::XOrIOp>(def))
1003 return addExp(
kXorI, e0, e1);
1004 if (isa<arith::ShRSIOp>(def) && isInvariant(e1))
1005 return addExp(
kShrS, e0, e1);
1006 if (isa<arith::ShRUIOp>(def) && isInvariant(e1))
1007 return addExp(
kShrU, e0, e1);
1008 if (isa<arith::ShLIOp>(def) && isInvariant(e1))
1009 return addExp(
kShlI, e0, e1);
1010 if (
auto binop = dyn_cast<sparse_tensor::BinaryOp>(def)) {
1012 (binop.getLeftIdentity() ||
1014 (binop.getRightIdentity() ||
1031 YieldOp clonedYield = cast<YieldOp>(clonedBlock.
getTerminator());
1035 Value val = clonedYield.getResult();
1036 rewriter.
eraseOp(clonedYield);
1037 rewriter.
eraseOp(placeholder);
1046 UnaryOp unop = cast<UnaryOp>(op);
1047 Region &presentRegion = unop.getPresentRegion();
1048 if (presentRegion.empty())
1060 BinaryOp binop = cast<BinaryOp>(op);
1061 Region &overlapRegion = binop.getOverlapRegion();
1062 if (overlapRegion.empty())
1066 return insertYieldOp(rewriter, loc, overlapRegion, {v0, v1});
1071 switch (tensorExps[e].kind) {
1076 llvm_unreachable(
"unexpected non-op");
1079 return rewriter.
create<math::AbsOp>(loc, v0);
1082 auto eltType = type.getElementType().cast<
FloatType>();
1083 return rewriter.
create<complex::AbsOp>(loc, eltType, v0);
1086 return rewriter.
create<math::CeilOp>(loc, v0);
1088 return rewriter.
create<math::FloorOp>(loc, v0);
1090 return rewriter.
create<math::SqrtOp>(loc, v0);
1092 return rewriter.
create<complex::SqrtOp>(loc, v0);
1094 return rewriter.
create<math::ExpM1Op>(loc, v0);
1096 return rewriter.
create<complex::Expm1Op>(loc, v0);
1098 return rewriter.
create<math::Log1pOp>(loc, v0);
1100 return rewriter.
create<complex::Log1pOp>(loc, v0);
1102 return rewriter.
create<math::SinOp>(loc, v0);
1104 return rewriter.
create<complex::SinOp>(loc, v0);
1106 return rewriter.
create<math::TanhOp>(loc, v0);
1108 return rewriter.
create<complex::TanhOp>(loc, v0);
1110 return rewriter.
create<arith::NegFOp>(loc, v0);
1112 return rewriter.
create<complex::NegOp>(loc, v0);
1114 return rewriter.
create<arith::SubIOp>(
1120 return rewriter.
create<arith::TruncFOp>(loc, inferType(e, v0), v0);
1122 return rewriter.
create<arith::ExtFOp>(loc, inferType(e, v0), v0);
1124 return rewriter.
create<arith::FPToSIOp>(loc, inferType(e, v0), v0);
1126 return rewriter.
create<arith::FPToUIOp>(loc, inferType(e, v0), v0);
1128 return rewriter.
create<arith::SIToFPOp>(loc, inferType(e, v0), v0);
1130 return rewriter.
create<arith::UIToFPOp>(loc, inferType(e, v0), v0);
1132 return rewriter.
create<arith::ExtSIOp>(loc, inferType(e, v0), v0);
1134 return rewriter.
create<arith::ExtUIOp>(loc, inferType(e, v0), v0);
1136 return rewriter.
create<arith::IndexCastOp>(loc, inferType(e, v0), v0);
1138 return rewriter.
create<arith::TruncIOp>(loc, inferType(e, v0), v0);
1141 auto eltType = type.getElementType().cast<
FloatType>();
1142 return rewriter.
create<complex::ImOp>(loc, eltType, v0);
1146 auto eltType = type.getElementType().cast<
FloatType>();
1147 return rewriter.
create<complex::ReOp>(loc, eltType, v0);
1150 return rewriter.
create<arith::BitcastOp>(loc, inferType(e, v0), v0);
1153 return rewriter.
create<arith::MulFOp>(loc, v0, v1);
1155 return rewriter.
create<complex::MulOp>(loc, v0, v1);
1157 return rewriter.
create<arith::MulIOp>(loc, v0, v1);
1159 return rewriter.
create<arith::DivFOp>(loc, v0, v1);
1161 return rewriter.
create<complex::DivOp>(loc, v0, v1);
1163 return rewriter.
create<arith::DivSIOp>(loc, v0, v1);
1165 return rewriter.
create<arith::DivUIOp>(loc, v0, v1);
1167 return rewriter.
create<arith::AddFOp>(loc, v0, v1);
1169 return rewriter.
create<complex::AddOp>(loc, v0, v1);
1171 return rewriter.
create<arith::AddIOp>(loc, v0, v1);
1173 return rewriter.
create<arith::SubFOp>(loc, v0, v1);
1175 return rewriter.
create<complex::SubOp>(loc, v0, v1);
1177 return rewriter.
create<arith::SubIOp>(loc, v0, v1);
1179 return rewriter.
create<arith::AndIOp>(loc, v0, v1);
1181 return rewriter.
create<arith::OrIOp>(loc, v0, v1);
1183 return rewriter.
create<arith::XOrIOp>(loc, v0, v1);
1185 return rewriter.
create<arith::ShRSIOp>(loc, v0, v1);
1187 return rewriter.
create<arith::ShRUIOp>(loc, v0, v1);
1189 return rewriter.
create<arith::ShLIOp>(loc, v0, v1);
1198 llvm_unreachable(
"unexpected expression kind in build");
Kind
Tensor expression kind.
TODO: Remove this file when SCCP and integer range analysis have been ported to the new framework...
TensorExp(Kind k, unsigned x, unsigned y, Value v, Operation *operation)
This class contains a list of basic blocks and a link to the parent operation it is attached to...
Operation is a basic unit of execution within MLIR.
Attribute getZeroAttr(Type type)
Specialization of arith.constant op that returns an integer value.
static Value buildUnaryPresent(RewriterBase &rewriter, Location loc, Operation *op, Value v0)
Block represents an ordered list of Operations.
virtual void eraseOp(Operation *op)
This method erases an operation that is known to have no uses.
Value getOperand(unsigned idx)
unsigned getNumOperands()
unsigned mapSet(Kind kind, unsigned s0, Value v=Value(), Operation *op=nullptr)
Maps the unary operator over the lattice set of the operand, i.e.
bool onlyDenseDiff(unsigned i, unsigned j)
Returns true if Li and Lj only differ in dense.
unsigned addLat(unsigned t, unsigned i, unsigned e)
Adds an iteration lattice point. Returns its index.
BitVector bits
Conjunction of tensor loop indices as bitvector.
static Value insertYieldOp(RewriterBase &rewriter, Location loc, Region ®ion, ValueRange vals)
static bool isAdmissableBranch(Operation *op, Region ®ion)
Ensures that sparse compiler can generate code for branch.
static Value buildBinaryOverlap(RewriterBase &rewriter, Location loc, Operation *op, Value v0, Value v1)
Block * getBlock()
Returns the operation block that contains this operation.
bool latGT(unsigned i, unsigned j) const
Returns true if Li > Lj.
Region * getParent() const
Provide a 'getParent' method for ilist_node_with_parent methods.
unsigned optimizeSet(unsigned s0)
Optimizes the iteration lattice points in the given set.
static bool isAdmissableBranchExp(Operation *op, Block *block, Value v)
Ensures that sparse compiler can generate code for expression.
unsigned exp
Index of the tensor expression.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
unsigned takeDisj(Kind kind, unsigned s0, unsigned s1, Operation *op=nullptr)
Disjunctive merge of two lattice sets L0 and L1 is (L0 /_op L1, L0, L1).
Block * getBlock() const
Returns the current block of the builder.
void dumpBits(const BitVector &bits) const
Operation * create(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
void dumpSet(unsigned s) const
void dumpExp(unsigned e) const
Print methods (for debugging).
unsigned tensor
Expressions representing tensors simply have a tensor number.
unsigned addExp(Kind k, unsigned e0, unsigned e1=-1u, Value v=Value(), Operation *op=nullptr)
Adds a tensor expression. Returns its index.
Tensor expression. Represents a MLIR expression in tensor index notation.
Kind kind
Tensor expression kind.
IRValueT get() const
Return the current value being used by this operand.
void mergeBlockBefore(Block *source, Operation *op, ValueRange argValues=llvm::None)
unsigned takeConj(Kind kind, unsigned s0, unsigned s1, Operation *op=nullptr)
Conjunctive merge of two lattice sets L0 and L1 is conjunction of cartesian product.
void cloneInto(Region *dest, BlockAndValueMapping &mapper)
Clone the internal blocks from this region into dest.
This class represents an argument of a Block.
unsigned takeCombi(Kind kind, unsigned s0, unsigned s1, Operation *orig, bool includeLeft, Kind ltrans, Operation *opleft, bool includeRight, Kind rtrans, Operation *opright)
Disjunctive merge of two lattice sets L0 and L1 with custom handling of the overlap, left, and right regions.
Eliminates variable at the specified position using Fourier-Motzkin variable elimination.
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
BitVector simple
Simplified conjunction of tensor loop indices as bitvector.
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Operation * getTerminator()
Get the terminator operation of this block.
Specialization of arith.constant op that returns a floating point value.
BitVector simplifyCond(unsigned s0, unsigned p0)
Simplifies the conditions in a conjunction of a given lattice point within the given set using just t...
Dim
Dimension level type for a tensor (undef means index does not appear).
Type getType() const
Return the type of this value.
unsigned conjLatPoint(Kind kind, unsigned p0, unsigned p1, Operation *op=nullptr)
Computes a single conjunction of two lattice points by taking the "union" of loop indices (effectivel...
Specialization of arith.constant op that returns an integer of index type.
static const char * kindToOpSymbol(Kind kind)
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
This class represents an operand of an operation.
unsigned index
Indices hold the index number.
void dumpLat(unsigned p) const
unsigned addSet()
Adds a new, initially empty, set. Returns its index.
unsigned buildLattices(unsigned e, unsigned i)
Builds the iteration lattices in a bottom-up traversal given the remaining tensor (sub)expression and...
bool isSingleCondition(unsigned t, unsigned e) const
Returns true if given tensor iterates only in the given tensor expression.
Value buildExp(RewriterBase &rewriter, Location loc, unsigned e, Value v0, Value v1)
Rebuilds SSA format from a tensor expression.
bool hasAnyDimOf(const BitVector &bits, Dim d) const
Returns true if any set bit corresponds to queried dim.
LatPoint(unsigned n, unsigned e, unsigned b)
This class provides an abstraction over the different types of ranges over Values.
Optional< unsigned > buildTensorExpFromLinalg(linalg::GenericOp op)
Builds a tensor expression from the given Linalg operation.
This class coordinates the application of a rewrite on a set of IR, providing a way for clients to tr...
Children children
Tensor operations hold the indices of their children.