MLIR  22.0.0git
LLVMMemorySlot.cpp
Go to the documentation of this file.
1 //===- LLVMMemorySlot.cpp - MemorySlot interfaces ---------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements MemorySlot-related interfaces for LLVM dialect
10 // operations.
11 //
12 //===----------------------------------------------------------------------===//
13 
16 #include "mlir/IR/Matchers.h"
17 #include "mlir/IR/PatternMatch.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/TypeSwitch.h"
22 #include "llvm/Support/DebugLog.h"
23 
24 #define DEBUG_TYPE "sroa"
25 
26 using namespace mlir;
27 
28 //===----------------------------------------------------------------------===//
29 // Interfaces for AllocaOp
30 //===----------------------------------------------------------------------===//
31 
32 llvm::SmallVector<MemorySlot> LLVM::AllocaOp::getPromotableSlots() {
33  if (!getOperation()->getBlock()->isEntryBlock())
34  return {};
35 
36  return {MemorySlot{getResult(), getElemType()}};
37 }
38 
39 Value LLVM::AllocaOp::getDefaultValue(const MemorySlot &slot,
40  OpBuilder &builder) {
41  return LLVM::UndefOp::create(builder, getLoc(), slot.elemType);
42 }
43 
44 void LLVM::AllocaOp::handleBlockArgument(const MemorySlot &slot,
45  BlockArgument argument,
46  OpBuilder &builder) {
47  for (Operation *user : getOperation()->getUsers())
48  if (auto declareOp = llvm::dyn_cast<LLVM::DbgDeclareOp>(user))
49  LLVM::DbgValueOp::create(builder, declareOp.getLoc(), argument,
50  declareOp.getVarInfo(),
51  declareOp.getLocationExpr());
52 }
53 
54 std::optional<PromotableAllocationOpInterface>
55 LLVM::AllocaOp::handlePromotionComplete(const MemorySlot &slot,
56  Value defaultValue,
57  OpBuilder &builder) {
58  if (defaultValue && defaultValue.use_empty())
59  defaultValue.getDefiningOp()->erase();
60  this->erase();
61  return std::nullopt;
62 }
63 
64 SmallVector<DestructurableMemorySlot> LLVM::AllocaOp::getDestructurableSlots() {
65  if (!mlir::matchPattern(getArraySize(), m_One()))
66  return {};
67 
68  auto destructurable = dyn_cast<DestructurableTypeInterface>(getElemType());
69  if (!destructurable)
70  return {};
71 
72  std::optional<DenseMap<Attribute, Type>> destructuredType =
73  destructurable.getSubelementIndexMap();
74  if (!destructuredType)
75  return {};
76 
77  return {DestructurableMemorySlot{{getResult(), getElemType()},
78  *destructuredType}};
79 }
80 
81 DenseMap<Attribute, MemorySlot> LLVM::AllocaOp::destructure(
82  const DestructurableMemorySlot &slot,
83  const SmallPtrSetImpl<Attribute> &usedIndices, OpBuilder &builder,
85  assert(slot.ptr == getResult());
86  builder.setInsertionPointAfter(*this);
87 
88  auto destructurableType = cast<DestructurableTypeInterface>(getElemType());
90  for (Attribute index : usedIndices) {
91  Type elemType = destructurableType.getTypeAtIndex(index);
92  assert(elemType && "used index must exist");
93  auto subAlloca = LLVM::AllocaOp::create(
94  builder, getLoc(), LLVM::LLVMPointerType::get(getContext()), elemType,
95  getArraySize());
96  newAllocators.push_back(subAlloca);
97  slotMap.try_emplace<MemorySlot>(index, {subAlloca.getResult(), elemType});
98  }
99 
100  return slotMap;
101 }
102 
103 std::optional<DestructurableAllocationOpInterface>
104 LLVM::AllocaOp::handleDestructuringComplete(
105  const DestructurableMemorySlot &slot, OpBuilder &builder) {
106  assert(slot.ptr == getResult());
107  this->erase();
108  return std::nullopt;
109 }
110 
111 //===----------------------------------------------------------------------===//
112 // Interfaces for LoadOp/StoreOp
113 //===----------------------------------------------------------------------===//
114 
115 bool LLVM::LoadOp::loadsFrom(const MemorySlot &slot) {
116  return getAddr() == slot.ptr;
117 }
118 
119 bool LLVM::LoadOp::storesTo(const MemorySlot &slot) { return false; }
120 
121 Value LLVM::LoadOp::getStored(const MemorySlot &slot, OpBuilder &builder,
122  Value reachingDef, const DataLayout &dataLayout) {
123  llvm_unreachable("getStored should not be called on LoadOp");
124 }
125 
126 bool LLVM::StoreOp::loadsFrom(const MemorySlot &slot) { return false; }
127 
128 bool LLVM::StoreOp::storesTo(const MemorySlot &slot) {
129  return getAddr() == slot.ptr;
130 }
131 
132 /// Checks if `type` can be used in any kind of conversion sequences.
134  // Aggregate types are not bitcastable.
135  if (isa<LLVM::LLVMStructType, LLVM::LLVMArrayType>(type))
136  return false;
137 
138  if (auto vectorType = dyn_cast<VectorType>(type)) {
139  // Vectors of pointers cannot be casted.
140  if (isa<LLVM::LLVMPointerType>(vectorType.getElementType()))
141  return false;
142  // Scalable types are not supported.
143  return !vectorType.isScalable();
144  }
145  return true;
146 }
147 
148 /// Checks that `rhs` can be converted to `lhs` by a sequence of casts and
149 /// truncations. Checks for narrowing or widening conversion compatibility
150 /// depending on `narrowingConversion`.
151 static bool areConversionCompatible(const DataLayout &layout, Type targetType,
152  Type srcType, bool narrowingConversion) {
153  if (targetType == srcType)
154  return true;
155 
156  if (!isSupportedTypeForConversion(targetType) ||
158  return false;
159 
160  uint64_t targetSize = layout.getTypeSize(targetType);
161  uint64_t srcSize = layout.getTypeSize(srcType);
162 
163  // Pointer casts will only be sane when the bitsize of both pointer types is
164  // the same.
165  if (isa<LLVM::LLVMPointerType>(targetType) &&
166  isa<LLVM::LLVMPointerType>(srcType))
167  return targetSize == srcSize;
168 
169  if (narrowingConversion)
170  return targetSize <= srcSize;
171  return targetSize >= srcSize;
172 }
173 
174 /// Checks if `dataLayout` describes a little endian layout.
175 static bool isBigEndian(const DataLayout &dataLayout) {
176  auto endiannessStr = dyn_cast_or_null<StringAttr>(dataLayout.getEndianness());
177  return endiannessStr && endiannessStr == "big";
178 }
179 
180 /// Converts a value to an integer type of the same size.
181 /// Assumes that the type can be converted.
182 static Value castToSameSizedInt(OpBuilder &builder, Location loc, Value val,
183  const DataLayout &dataLayout) {
184  Type type = val.getType();
185  assert(isSupportedTypeForConversion(type) &&
186  "expected value to have a convertible type");
187 
188  if (isa<IntegerType>(type))
189  return val;
190 
191  uint64_t typeBitSize = dataLayout.getTypeSizeInBits(type);
192  IntegerType valueSizeInteger = builder.getIntegerType(typeBitSize);
193 
194  if (isa<LLVM::LLVMPointerType>(type))
195  return builder.createOrFold<LLVM::PtrToIntOp>(loc, valueSizeInteger, val);
196  return builder.createOrFold<LLVM::BitcastOp>(loc, valueSizeInteger, val);
197 }
198 
199 /// Converts a value with an integer type to `targetType`.
201  Value val, Type targetType) {
202  assert(isa<IntegerType>(val.getType()) &&
203  "expected value to have an integer type");
204  assert(isSupportedTypeForConversion(targetType) &&
205  "expected the target type to be supported for conversions");
206  if (val.getType() == targetType)
207  return val;
208  if (isa<LLVM::LLVMPointerType>(targetType))
209  return builder.createOrFold<LLVM::IntToPtrOp>(loc, targetType, val);
210  return builder.createOrFold<LLVM::BitcastOp>(loc, targetType, val);
211 }
212 
213 /// Constructs operations that convert `srcValue` into a new value of type
214 /// `targetType`. Assumes the types have the same bitsize.
216  Value srcValue, Type targetType,
217  const DataLayout &dataLayout) {
218  Type srcType = srcValue.getType();
219  assert(areConversionCompatible(dataLayout, targetType, srcType,
220  /*narrowingConversion=*/true) &&
221  "expected that the compatibility was checked before");
222 
223  // Nothing has to be done if the types are already the same.
224  if (srcType == targetType)
225  return srcValue;
226 
227  // In the special case of casting one pointer to another, we want to generate
228  // an address space cast. Bitcasts of pointers are not allowed and using
229  // pointer to integer conversions are not equivalent due to the loss of
230  // provenance.
231  if (isa<LLVM::LLVMPointerType>(targetType) &&
232  isa<LLVM::LLVMPointerType>(srcType))
233  return builder.createOrFold<LLVM::AddrSpaceCastOp>(loc, targetType,
234  srcValue);
235 
236  // For all other castable types, casting through integers is necessary.
237  Value replacement = castToSameSizedInt(builder, loc, srcValue, dataLayout);
238  return castIntValueToSameSizedType(builder, loc, replacement, targetType);
239 }
240 
241 /// Constructs operations that convert `srcValue` into a new value of type
242 /// `targetType`. Performs bit-level extraction if the source type is larger
243 /// than the target type. Assumes that this conversion is possible.
245  Value srcValue, Type targetType,
246  const DataLayout &dataLayout) {
247  // Get the types of the source and target values.
248  Type srcType = srcValue.getType();
249  assert(areConversionCompatible(dataLayout, targetType, srcType,
250  /*narrowingConversion=*/true) &&
251  "expected that the compatibility was checked before");
252 
253  uint64_t srcTypeSize = dataLayout.getTypeSizeInBits(srcType);
254  uint64_t targetTypeSize = dataLayout.getTypeSizeInBits(targetType);
255  if (srcTypeSize == targetTypeSize)
256  return castSameSizedTypes(builder, loc, srcValue, targetType, dataLayout);
257 
258  // First, cast the value to a same-sized integer type.
259  Value replacement = castToSameSizedInt(builder, loc, srcValue, dataLayout);
260 
261  // Truncate the integer if the size of the target is less than the value.
262  if (isBigEndian(dataLayout)) {
263  uint64_t shiftAmount = srcTypeSize - targetTypeSize;
264  auto shiftConstant = LLVM::ConstantOp::create(
265  builder, loc, builder.getIntegerAttr(srcType, shiftAmount));
266  replacement =
267  builder.createOrFold<LLVM::LShrOp>(loc, srcValue, shiftConstant);
268  }
269 
270  replacement = LLVM::TruncOp::create(
271  builder, loc, builder.getIntegerType(targetTypeSize), replacement);
272 
273  // Now cast the integer to the actual target type if required.
274  return castIntValueToSameSizedType(builder, loc, replacement, targetType);
275 }
276 
277 /// Constructs operations that insert the bits of `srcValue` into the
278 /// "beginning" of `reachingDef` (beginning is endianness dependent).
279 /// Assumes that this conversion is possible.
281  Value srcValue, Value reachingDef,
282  const DataLayout &dataLayout) {
283 
284  assert(areConversionCompatible(dataLayout, reachingDef.getType(),
285  srcValue.getType(),
286  /*narrowingConversion=*/false) &&
287  "expected that the compatibility was checked before");
288  uint64_t valueTypeSize = dataLayout.getTypeSizeInBits(srcValue.getType());
289  uint64_t slotTypeSize = dataLayout.getTypeSizeInBits(reachingDef.getType());
290  if (slotTypeSize == valueTypeSize)
291  return castSameSizedTypes(builder, loc, srcValue, reachingDef.getType(),
292  dataLayout);
293 
294  // In the case where the store only overwrites parts of the memory,
295  // bit fiddling is required to construct the new value.
296 
297  // First convert both values to integers of the same size.
298  Value defAsInt = castToSameSizedInt(builder, loc, reachingDef, dataLayout);
299  Value valueAsInt = castToSameSizedInt(builder, loc, srcValue, dataLayout);
300  // Extend the value to the size of the reaching definition.
301  valueAsInt =
302  builder.createOrFold<LLVM::ZExtOp>(loc, defAsInt.getType(), valueAsInt);
303  uint64_t sizeDifference = slotTypeSize - valueTypeSize;
304  if (isBigEndian(dataLayout)) {
305  // On big endian systems, a store to the base pointer overwrites the most
306  // significant bits. To accomodate for this, the stored value needs to be
307  // shifted into the according position.
308  Value bigEndianShift = LLVM::ConstantOp::create(
309  builder, loc,
310  builder.getIntegerAttr(defAsInt.getType(), sizeDifference));
311  valueAsInt =
312  builder.createOrFold<LLVM::ShlOp>(loc, valueAsInt, bigEndianShift);
313  }
314 
315  // Construct the mask that is used to erase the bits that are overwritten by
316  // the store.
317  APInt maskValue;
318  if (isBigEndian(dataLayout)) {
319  // Build a mask that has the most significant bits set to zero.
320  // Note: This is the same as 2^sizeDifference - 1
321  maskValue = APInt::getAllOnes(sizeDifference).zext(slotTypeSize);
322  } else {
323  // Build a mask that has the least significant bits set to zero.
324  // Note: This is the same as -(2^valueTypeSize)
325  maskValue = APInt::getAllOnes(valueTypeSize).zext(slotTypeSize);
326  maskValue.flipAllBits();
327  }
328 
329  // Mask out the affected bits ...
330  Value mask = LLVM::ConstantOp::create(
331  builder, loc, builder.getIntegerAttr(defAsInt.getType(), maskValue));
332  Value masked = builder.createOrFold<LLVM::AndOp>(loc, defAsInt, mask);
333 
334  // ... and combine the result with the new value.
335  Value combined = builder.createOrFold<LLVM::OrOp>(loc, masked, valueAsInt);
336 
337  return castIntValueToSameSizedType(builder, loc, combined,
338  reachingDef.getType());
339 }
340 
341 Value LLVM::StoreOp::getStored(const MemorySlot &slot, OpBuilder &builder,
342  Value reachingDef,
343  const DataLayout &dataLayout) {
344  assert(reachingDef && reachingDef.getType() == slot.elemType &&
345  "expected the reaching definition's type to match the slot's type");
346  return createInsertAndCast(builder, getLoc(), getValue(), reachingDef,
347  dataLayout);
348 }
349 
350 bool LLVM::LoadOp::canUsesBeRemoved(
351  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
352  SmallVectorImpl<OpOperand *> &newBlockingUses,
353  const DataLayout &dataLayout) {
354  if (blockingUses.size() != 1)
355  return false;
356  Value blockingUse = (*blockingUses.begin())->get();
357  // If the blocking use is the slot ptr itself, there will be enough
358  // context to reconstruct the result of the load at removal time, so it can
359  // be removed (provided it is not volatile).
360  return blockingUse == slot.ptr && getAddr() == slot.ptr &&
361  areConversionCompatible(dataLayout, getResult().getType(),
362  slot.elemType, /*narrowingConversion=*/true) &&
363  !getVolatile_();
364 }
365 
366 DeletionKind LLVM::LoadOp::removeBlockingUses(
367  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
368  OpBuilder &builder, Value reachingDefinition,
369  const DataLayout &dataLayout) {
370  // `canUsesBeRemoved` checked this blocking use must be the loaded slot
371  // pointer.
372  Value newResult = createExtractAndCast(builder, getLoc(), reachingDefinition,
373  getResult().getType(), dataLayout);
374  getResult().replaceAllUsesWith(newResult);
375  return DeletionKind::Delete;
376 }
377 
378 bool LLVM::StoreOp::canUsesBeRemoved(
379  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
380  SmallVectorImpl<OpOperand *> &newBlockingUses,
381  const DataLayout &dataLayout) {
382  if (blockingUses.size() != 1)
383  return false;
384  Value blockingUse = (*blockingUses.begin())->get();
385  // If the blocking use is the slot ptr itself, dropping the store is
386  // fine, provided we are currently promoting its target value. Don't allow a
387  // store OF the slot pointer, only INTO the slot pointer.
388  return blockingUse == slot.ptr && getAddr() == slot.ptr &&
389  getValue() != slot.ptr &&
390  areConversionCompatible(dataLayout, slot.elemType,
391  getValue().getType(),
392  /*narrowingConversion=*/false) &&
393  !getVolatile_();
394 }
395 
396 DeletionKind LLVM::StoreOp::removeBlockingUses(
397  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
398  OpBuilder &builder, Value reachingDefinition,
399  const DataLayout &dataLayout) {
400  return DeletionKind::Delete;
401 }
402 
403 /// Checks if `slot` can be accessed through the provided access type.
404 static bool isValidAccessType(const MemorySlot &slot, Type accessType,
405  const DataLayout &dataLayout) {
406  return dataLayout.getTypeSize(accessType) <=
407  dataLayout.getTypeSize(slot.elemType);
408 }
409 
410 LogicalResult LLVM::LoadOp::ensureOnlySafeAccesses(
411  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
412  const DataLayout &dataLayout) {
413  return success(getAddr() != slot.ptr ||
414  isValidAccessType(slot, getType(), dataLayout));
415 }
416 
417 LogicalResult LLVM::StoreOp::ensureOnlySafeAccesses(
418  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
419  const DataLayout &dataLayout) {
420  return success(getAddr() != slot.ptr ||
421  isValidAccessType(slot, getValue().getType(), dataLayout));
422 }
423 
424 /// Returns the subslot's type at the requested index.
426  Attribute index) {
427  auto subelementIndexMap =
428  cast<DestructurableTypeInterface>(slot.elemType).getSubelementIndexMap();
429  if (!subelementIndexMap)
430  return {};
431  assert(!subelementIndexMap->empty());
432 
433  // Note: Returns a null-type when no entry was found.
434  return subelementIndexMap->lookup(index);
435 }
436 
437 bool LLVM::LoadOp::canRewire(const DestructurableMemorySlot &slot,
438  SmallPtrSetImpl<Attribute> &usedIndices,
439  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
440  const DataLayout &dataLayout) {
441  if (getVolatile_())
442  return false;
443 
444  // A load always accesses the first element of the destructured slot.
445  auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
446  Type subslotType = getTypeAtIndex(slot, index);
447  if (!subslotType)
448  return false;
449 
450  // The access can only be replaced when the subslot is read within its bounds.
451  if (dataLayout.getTypeSize(getType()) > dataLayout.getTypeSize(subslotType))
452  return false;
453 
454  usedIndices.insert(index);
455  return true;
456 }
457 
458 DeletionKind LLVM::LoadOp::rewire(const DestructurableMemorySlot &slot,
460  OpBuilder &builder,
461  const DataLayout &dataLayout) {
462  auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
463  auto it = subslots.find(index);
464  assert(it != subslots.end());
465 
466  getAddrMutable().set(it->getSecond().ptr);
467  return DeletionKind::Keep;
468 }
469 
470 bool LLVM::StoreOp::canRewire(const DestructurableMemorySlot &slot,
471  SmallPtrSetImpl<Attribute> &usedIndices,
472  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
473  const DataLayout &dataLayout) {
474  if (getVolatile_())
475  return false;
476 
477  // Storing the pointer to memory cannot be dealt with.
478  if (getValue() == slot.ptr)
479  return false;
480 
481  // A store always accesses the first element of the destructured slot.
482  auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
483  Type subslotType = getTypeAtIndex(slot, index);
484  if (!subslotType)
485  return false;
486 
487  // The access can only be replaced when the subslot is read within its bounds.
488  if (dataLayout.getTypeSize(getValue().getType()) >
489  dataLayout.getTypeSize(subslotType))
490  return false;
491 
492  usedIndices.insert(index);
493  return true;
494 }
495 
496 DeletionKind LLVM::StoreOp::rewire(const DestructurableMemorySlot &slot,
498  OpBuilder &builder,
499  const DataLayout &dataLayout) {
500  auto index = IntegerAttr::get(IntegerType::get(getContext(), 32), 0);
501  auto it = subslots.find(index);
502  assert(it != subslots.end());
503 
504  getAddrMutable().set(it->getSecond().ptr);
505  return DeletionKind::Keep;
506 }
507 
508 //===----------------------------------------------------------------------===//
509 // Interfaces for discardable OPs
510 //===----------------------------------------------------------------------===//
511 
512 /// Conditions the deletion of the operation to the removal of all its uses.
513 static bool forwardToUsers(Operation *op,
514  SmallVectorImpl<OpOperand *> &newBlockingUses) {
515  for (Value result : op->getResults())
516  for (OpOperand &use : result.getUses())
517  newBlockingUses.push_back(&use);
518  return true;
519 }
520 
521 bool LLVM::BitcastOp::canUsesBeRemoved(
522  const SmallPtrSetImpl<OpOperand *> &blockingUses,
523  SmallVectorImpl<OpOperand *> &newBlockingUses,
524  const DataLayout &dataLayout) {
525  return forwardToUsers(*this, newBlockingUses);
526 }
527 
528 DeletionKind LLVM::BitcastOp::removeBlockingUses(
529  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
530  return DeletionKind::Delete;
531 }
532 
533 bool LLVM::AddrSpaceCastOp::canUsesBeRemoved(
534  const SmallPtrSetImpl<OpOperand *> &blockingUses,
535  SmallVectorImpl<OpOperand *> &newBlockingUses,
536  const DataLayout &dataLayout) {
537  return forwardToUsers(*this, newBlockingUses);
538 }
539 
540 DeletionKind LLVM::AddrSpaceCastOp::removeBlockingUses(
541  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
542  return DeletionKind::Delete;
543 }
544 
545 bool LLVM::LifetimeStartOp::canUsesBeRemoved(
546  const SmallPtrSetImpl<OpOperand *> &blockingUses,
547  SmallVectorImpl<OpOperand *> &newBlockingUses,
548  const DataLayout &dataLayout) {
549  return true;
550 }
551 
552 DeletionKind LLVM::LifetimeStartOp::removeBlockingUses(
553  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
554  return DeletionKind::Delete;
555 }
556 
557 bool LLVM::LifetimeEndOp::canUsesBeRemoved(
558  const SmallPtrSetImpl<OpOperand *> &blockingUses,
559  SmallVectorImpl<OpOperand *> &newBlockingUses,
560  const DataLayout &dataLayout) {
561  return true;
562 }
563 
564 DeletionKind LLVM::LifetimeEndOp::removeBlockingUses(
565  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
566  return DeletionKind::Delete;
567 }
568 
569 bool LLVM::InvariantStartOp::canUsesBeRemoved(
570  const SmallPtrSetImpl<OpOperand *> &blockingUses,
571  SmallVectorImpl<OpOperand *> &newBlockingUses,
572  const DataLayout &dataLayout) {
573  return true;
574 }
575 
576 DeletionKind LLVM::InvariantStartOp::removeBlockingUses(
577  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
578  return DeletionKind::Delete;
579 }
580 
581 bool LLVM::InvariantEndOp::canUsesBeRemoved(
582  const SmallPtrSetImpl<OpOperand *> &blockingUses,
583  SmallVectorImpl<OpOperand *> &newBlockingUses,
584  const DataLayout &dataLayout) {
585  return true;
586 }
587 
588 DeletionKind LLVM::InvariantEndOp::removeBlockingUses(
589  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
590  return DeletionKind::Delete;
591 }
592 
593 bool LLVM::LaunderInvariantGroupOp::canUsesBeRemoved(
594  const SmallPtrSetImpl<OpOperand *> &blockingUses,
595  SmallVectorImpl<OpOperand *> &newBlockingUses,
596  const DataLayout &dataLayout) {
597  return forwardToUsers(*this, newBlockingUses);
598 }
599 
600 DeletionKind LLVM::LaunderInvariantGroupOp::removeBlockingUses(
601  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
602  return DeletionKind::Delete;
603 }
604 
605 bool LLVM::StripInvariantGroupOp::canUsesBeRemoved(
606  const SmallPtrSetImpl<OpOperand *> &blockingUses,
607  SmallVectorImpl<OpOperand *> &newBlockingUses,
608  const DataLayout &dataLayout) {
609  return forwardToUsers(*this, newBlockingUses);
610 }
611 
612 DeletionKind LLVM::StripInvariantGroupOp::removeBlockingUses(
613  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
614  return DeletionKind::Delete;
615 }
616 
617 bool LLVM::DbgDeclareOp::canUsesBeRemoved(
618  const SmallPtrSetImpl<OpOperand *> &blockingUses,
619  SmallVectorImpl<OpOperand *> &newBlockingUses,
620  const DataLayout &dataLayout) {
621  return true;
622 }
623 
624 DeletionKind LLVM::DbgDeclareOp::removeBlockingUses(
625  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
626  return DeletionKind::Delete;
627 }
628 
629 bool LLVM::DbgValueOp::canUsesBeRemoved(
630  const SmallPtrSetImpl<OpOperand *> &blockingUses,
631  SmallVectorImpl<OpOperand *> &newBlockingUses,
632  const DataLayout &dataLayout) {
633  // There is only one operand that we can remove the use of.
634  if (blockingUses.size() != 1)
635  return false;
636 
637  return (*blockingUses.begin())->get() == getValue();
638 }
639 
640 DeletionKind LLVM::DbgValueOp::removeBlockingUses(
641  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
642  // builder by default is after '*this', but we need it before '*this'.
643  builder.setInsertionPoint(*this);
644 
645  // Rather than dropping the debug value, replace it with undef to preserve the
646  // debug local variable info. This allows the debugger to inform the user that
647  // the variable has been optimized out.
648  auto undef =
649  UndefOp::create(builder, getValue().getLoc(), getValue().getType());
650  getValueMutable().assign(undef);
651  return DeletionKind::Keep;
652 }
653 
654 bool LLVM::DbgDeclareOp::requiresReplacedValues() { return true; }
655 
656 void LLVM::DbgDeclareOp::visitReplacedValues(
657  ArrayRef<std::pair<Operation *, Value>> definitions, OpBuilder &builder) {
658  for (auto [op, value] : definitions) {
659  builder.setInsertionPointAfter(op);
660  LLVM::DbgValueOp::create(builder, getLoc(), value, getVarInfo(),
661  getLocationExpr());
662  }
663 }
664 
665 //===----------------------------------------------------------------------===//
666 // Interfaces for GEPOp
667 //===----------------------------------------------------------------------===//
668 
669 static bool hasAllZeroIndices(LLVM::GEPOp gepOp) {
670  return llvm::all_of(gepOp.getIndices(), [](auto index) {
671  auto indexAttr = llvm::dyn_cast_if_present<IntegerAttr>(index);
672  return indexAttr && indexAttr.getValue() == 0;
673  });
674 }
675 
676 bool LLVM::GEPOp::canUsesBeRemoved(
677  const SmallPtrSetImpl<OpOperand *> &blockingUses,
678  SmallVectorImpl<OpOperand *> &newBlockingUses,
679  const DataLayout &dataLayout) {
680  // GEP can be removed as long as it is a no-op and its users can be removed.
681  if (!hasAllZeroIndices(*this))
682  return false;
683  return forwardToUsers(*this, newBlockingUses);
684 }
685 
686 DeletionKind LLVM::GEPOp::removeBlockingUses(
687  const SmallPtrSetImpl<OpOperand *> &blockingUses, OpBuilder &builder) {
688  return DeletionKind::Delete;
689 }
690 
691 /// Returns the amount of bytes the provided GEP elements will offset the
692 /// pointer by. Returns nullopt if no constant offset could be computed.
693 static std::optional<uint64_t> gepToByteOffset(const DataLayout &dataLayout,
694  LLVM::GEPOp gep) {
695  // Collects all indices.
696  SmallVector<uint64_t> indices;
697  for (auto index : gep.getIndices()) {
698  auto constIndex = dyn_cast<IntegerAttr>(index);
699  if (!constIndex)
700  return {};
701  int64_t gepIndex = constIndex.getInt();
702  // Negative indices are not supported.
703  if (gepIndex < 0)
704  return {};
705  indices.push_back(gepIndex);
706  }
707 
708  Type currentType = gep.getElemType();
709  uint64_t offset = indices[0] * dataLayout.getTypeSize(currentType);
710 
711  for (uint64_t index : llvm::drop_begin(indices)) {
712  bool shouldCancel =
713  TypeSwitch<Type, bool>(currentType)
714  .Case([&](LLVM::LLVMArrayType arrayType) {
715  offset +=
716  index * dataLayout.getTypeSize(arrayType.getElementType());
717  currentType = arrayType.getElementType();
718  return false;
719  })
720  .Case([&](LLVM::LLVMStructType structType) {
721  ArrayRef<Type> body = structType.getBody();
722  assert(index < body.size() && "expected valid struct indexing");
723  for (uint32_t i : llvm::seq(index)) {
724  if (!structType.isPacked())
725  offset = llvm::alignTo(
726  offset, dataLayout.getTypeABIAlignment(body[i]));
727  offset += dataLayout.getTypeSize(body[i]);
728  }
729 
730  // Align for the current type as well.
731  if (!structType.isPacked())
732  offset = llvm::alignTo(
733  offset, dataLayout.getTypeABIAlignment(body[index]));
734  currentType = body[index];
735  return false;
736  })
737  .Default([&](Type type) {
738  LDBG() << "[sroa] Unsupported type for offset computations"
739  << type;
740  return true;
741  });
742 
743  if (shouldCancel)
744  return std::nullopt;
745  }
746 
747  return offset;
748 }
749 
750 namespace {
751 /// A struct that stores both the index into the aggregate type of the slot as
752 /// well as the corresponding byte offset in memory.
753 struct SubslotAccessInfo {
754  /// The parent slot's index that the access falls into.
755  uint32_t index;
756  /// The offset into the subslot of the access.
757  uint64_t subslotOffset;
758 };
759 } // namespace
760 
761 /// Computes subslot access information for an access into `slot` with the given
762 /// offset.
763 /// Returns nullopt when the offset is out-of-bounds or when the access is into
764 /// the padding of `slot`.
765 static std::optional<SubslotAccessInfo>
767  const DataLayout &dataLayout, LLVM::GEPOp gep) {
768  std::optional<uint64_t> offset = gepToByteOffset(dataLayout, gep);
769  if (!offset)
770  return {};
771 
772  // Helper to check that a constant index is in the bounds of the GEP index
773  // representation. LLVM dialects's GEP arguments have a limited bitwidth, thus
774  // this additional check is necessary.
775  auto isOutOfBoundsGEPIndex = [](uint64_t index) {
776  return index >= (1 << LLVM::kGEPConstantBitWidth);
777  };
778 
779  Type type = slot.elemType;
780  if (*offset >= dataLayout.getTypeSize(type))
781  return {};
783  .Case([&](LLVM::LLVMArrayType arrayType)
784  -> std::optional<SubslotAccessInfo> {
785  // Find which element of the array contains the offset.
786  uint64_t elemSize = dataLayout.getTypeSize(arrayType.getElementType());
787  uint64_t index = *offset / elemSize;
788  if (isOutOfBoundsGEPIndex(index))
789  return {};
790  return SubslotAccessInfo{static_cast<uint32_t>(index),
791  *offset - (index * elemSize)};
792  })
793  .Case([&](LLVM::LLVMStructType structType)
794  -> std::optional<SubslotAccessInfo> {
795  uint64_t distanceToStart = 0;
796  // Walk over the elements of the struct to find in which of
797  // them the offset is.
798  for (auto [index, elem] : llvm::enumerate(structType.getBody())) {
799  uint64_t elemSize = dataLayout.getTypeSize(elem);
800  if (!structType.isPacked()) {
801  distanceToStart = llvm::alignTo(
802  distanceToStart, dataLayout.getTypeABIAlignment(elem));
803  // If the offset is in padding, cancel the rewrite.
804  if (offset < distanceToStart)
805  return {};
806  }
807 
808  if (offset < distanceToStart + elemSize) {
809  if (isOutOfBoundsGEPIndex(index))
810  return {};
811  // The offset is within this element, stop iterating the
812  // struct and return the index.
813  return SubslotAccessInfo{static_cast<uint32_t>(index),
814  *offset - distanceToStart};
815  }
816 
817  // The offset is not within this element, continue walking
818  // over the struct.
819  distanceToStart += elemSize;
820  }
821 
822  return {};
823  });
824 }
825 
826 /// Constructs a byte array type of the given size.
827 static LLVM::LLVMArrayType getByteArrayType(MLIRContext *context,
828  unsigned size) {
829  auto byteType = IntegerType::get(context, 8);
830  return LLVM::LLVMArrayType::get(context, byteType, size);
831 }
832 
833 LogicalResult LLVM::GEPOp::ensureOnlySafeAccesses(
834  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
835  const DataLayout &dataLayout) {
836  if (getBase() != slot.ptr)
837  return success();
838  std::optional<uint64_t> gepOffset = gepToByteOffset(dataLayout, *this);
839  if (!gepOffset)
840  return failure();
841  uint64_t slotSize = dataLayout.getTypeSize(slot.elemType);
842  // Check that the access is strictly inside the slot.
843  if (*gepOffset >= slotSize)
844  return failure();
845  // Every access that remains in bounds of the remaining slot is considered
846  // legal.
847  mustBeSafelyUsed.emplace_back<MemorySlot>(
848  {getRes(), getByteArrayType(getContext(), slotSize - *gepOffset)});
849  return success();
850 }
851 
852 bool LLVM::GEPOp::canRewire(const DestructurableMemorySlot &slot,
853  SmallPtrSetImpl<Attribute> &usedIndices,
854  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
855  const DataLayout &dataLayout) {
856  if (!isa<LLVM::LLVMPointerType>(getBase().getType()))
857  return false;
858 
859  if (getBase() != slot.ptr)
860  return false;
861  std::optional<SubslotAccessInfo> accessInfo =
862  getSubslotAccessInfo(slot, dataLayout, *this);
863  if (!accessInfo)
864  return false;
865  auto indexAttr =
866  IntegerAttr::get(IntegerType::get(getContext(), 32), accessInfo->index);
867  assert(slot.subelementTypes.contains(indexAttr));
868  usedIndices.insert(indexAttr);
869 
870  // The remainder of the subslot should be accesses in-bounds. Thus, we create
871  // a dummy slot with the size of the remainder.
872  Type subslotType = slot.subelementTypes.lookup(indexAttr);
873  uint64_t slotSize = dataLayout.getTypeSize(subslotType);
874  LLVM::LLVMArrayType remainingSlotType =
875  getByteArrayType(getContext(), slotSize - accessInfo->subslotOffset);
876  mustBeSafelyUsed.emplace_back<MemorySlot>({getRes(), remainingSlotType});
877 
878  return true;
879 }
880 
881 DeletionKind LLVM::GEPOp::rewire(const DestructurableMemorySlot &slot,
883  OpBuilder &builder,
884  const DataLayout &dataLayout) {
885  std::optional<SubslotAccessInfo> accessInfo =
886  getSubslotAccessInfo(slot, dataLayout, *this);
887  assert(accessInfo && "expected access info to be checked before");
888  auto indexAttr =
889  IntegerAttr::get(IntegerType::get(getContext(), 32), accessInfo->index);
890  const MemorySlot &newSlot = subslots.at(indexAttr);
891 
892  auto byteType = IntegerType::get(builder.getContext(), 8);
893  auto newPtr = builder.createOrFold<LLVM::GEPOp>(
894  getLoc(), getResult().getType(), byteType, newSlot.ptr,
895  ArrayRef<GEPArg>(accessInfo->subslotOffset), getNoWrapFlags());
896  getResult().replaceAllUsesWith(newPtr);
897  return DeletionKind::Delete;
898 }
899 
900 //===----------------------------------------------------------------------===//
901 // Utilities for memory intrinsics
902 //===----------------------------------------------------------------------===//
903 
904 namespace {
905 
906 /// Returns the length of the given memory intrinsic in bytes if it can be known
907 /// at compile-time on a best-effort basis, nothing otherwise.
908 template <class MemIntr>
909 std::optional<uint64_t> getStaticMemIntrLen(MemIntr op) {
910  APInt memIntrLen;
911  if (!matchPattern(op.getLen(), m_ConstantInt(&memIntrLen)))
912  return {};
913  if (memIntrLen.getBitWidth() > 64)
914  return {};
915  return memIntrLen.getZExtValue();
916 }
917 
918 /// Returns the length of the given memory intrinsic in bytes if it can be known
919 /// at compile-time on a best-effort basis, nothing otherwise.
920 /// Because MemcpyInlineOp has its length encoded as an attribute, this requires
921 /// specialized handling.
922 template <>
923 std::optional<uint64_t> getStaticMemIntrLen(LLVM::MemcpyInlineOp op) {
924  APInt memIntrLen = op.getLen();
925  if (memIntrLen.getBitWidth() > 64)
926  return {};
927  return memIntrLen.getZExtValue();
928 }
929 
930 /// Returns the length of the given memory intrinsic in bytes if it can be known
931 /// at compile-time on a best-effort basis, nothing otherwise.
932 /// Because MemsetInlineOp has its length encoded as an attribute, this requires
933 /// specialized handling.
934 template <>
935 std::optional<uint64_t> getStaticMemIntrLen(LLVM::MemsetInlineOp op) {
936  APInt memIntrLen = op.getLen();
937  if (memIntrLen.getBitWidth() > 64)
938  return {};
939  return memIntrLen.getZExtValue();
940 }
941 
942 /// Returns an integer attribute representing the length of a memset intrinsic
943 template <class MemsetIntr>
944 IntegerAttr createMemsetLenAttr(MemsetIntr op) {
945  IntegerAttr memsetLenAttr;
946  bool successfulMatch =
947  matchPattern(op.getLen(), m_Constant<IntegerAttr>(&memsetLenAttr));
948  (void)successfulMatch;
949  assert(successfulMatch);
950  return memsetLenAttr;
951 }
952 
953 /// Returns an integer attribute representing the length of a memset intrinsic
954 /// Because MemsetInlineOp has its length encoded as an attribute, this requires
955 /// specialized handling.
956 template <>
957 IntegerAttr createMemsetLenAttr(LLVM::MemsetInlineOp op) {
958  return op.getLenAttr();
959 }
960 
961 /// Creates a memset intrinsic of that matches the `toReplace` intrinsic
962 /// using the provided parameters. There are template specializations for
963 /// MemsetOp and MemsetInlineOp.
964 template <class MemsetIntr>
965 void createMemsetIntr(OpBuilder &builder, MemsetIntr toReplace,
966  IntegerAttr memsetLenAttr, uint64_t newMemsetSize,
968  Attribute index);
969 
970 template <>
971 void createMemsetIntr(OpBuilder &builder, LLVM::MemsetOp toReplace,
972  IntegerAttr memsetLenAttr, uint64_t newMemsetSize,
974  Attribute index) {
975  Value newMemsetSizeValue =
976  LLVM::ConstantOp::create(
977  builder, toReplace.getLen().getLoc(),
978  IntegerAttr::get(memsetLenAttr.getType(), newMemsetSize))
979  .getResult();
980 
981  LLVM::MemsetOp::create(builder, toReplace.getLoc(), subslots.at(index).ptr,
982  toReplace.getVal(), newMemsetSizeValue,
983  toReplace.getIsVolatile());
984 }
985 
986 template <>
987 void createMemsetIntr(OpBuilder &builder, LLVM::MemsetInlineOp toReplace,
988  IntegerAttr memsetLenAttr, uint64_t newMemsetSize,
990  Attribute index) {
991  auto newMemsetSizeValue =
992  IntegerAttr::get(memsetLenAttr.getType(), newMemsetSize);
993 
994  LLVM::MemsetInlineOp::create(builder, toReplace.getLoc(),
995  subslots.at(index).ptr, toReplace.getVal(),
996  newMemsetSizeValue, toReplace.getIsVolatile());
997 }
998 
999 } // namespace
1000 
1001 /// Returns whether one can be sure the memory intrinsic does not write outside
1002 /// of the bounds of the given slot, on a best-effort basis.
1003 template <class MemIntr>
1004 static bool definitelyWritesOnlyWithinSlot(MemIntr op, const MemorySlot &slot,
1005  const DataLayout &dataLayout) {
1006  if (!isa<LLVM::LLVMPointerType>(slot.ptr.getType()) ||
1007  op.getDst() != slot.ptr)
1008  return false;
1009 
1010  std::optional<uint64_t> memIntrLen = getStaticMemIntrLen(op);
1011  return memIntrLen && *memIntrLen <= dataLayout.getTypeSize(slot.elemType);
1012 }
1013 
1014 /// Checks whether all indices are i32. This is used to check GEPs can index
1015 /// into them.
1016 static bool areAllIndicesI32(const DestructurableMemorySlot &slot) {
1017  Type i32 = IntegerType::get(slot.ptr.getContext(), 32);
1018  return llvm::all_of(llvm::make_first_range(slot.subelementTypes),
1019  [&](Attribute index) {
1020  auto intIndex = dyn_cast<IntegerAttr>(index);
1021  return intIndex && intIndex.getType() == i32;
1022  });
1023 }
1024 
1025 //===----------------------------------------------------------------------===//
1026 // Interfaces for memset and memset.inline
1027 //===----------------------------------------------------------------------===//
1028 
1029 template <class MemsetIntr>
1030 static bool memsetCanRewire(MemsetIntr op, const DestructurableMemorySlot &slot,
1031  SmallPtrSetImpl<Attribute> &usedIndices,
1032  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1033  const DataLayout &dataLayout) {
1034  if (&slot.elemType.getDialect() != op.getOperation()->getDialect())
1035  return false;
1036 
1037  if (op.getIsVolatile())
1038  return false;
1039 
1040  if (!cast<DestructurableTypeInterface>(slot.elemType).getSubelementIndexMap())
1041  return false;
1042 
1043  if (!areAllIndicesI32(slot))
1044  return false;
1045 
1046  return definitelyWritesOnlyWithinSlot(op, slot, dataLayout);
1047 }
1048 
1049 template <class MemsetIntr>
1050 static Value memsetGetStored(MemsetIntr op, const MemorySlot &slot,
1051  OpBuilder &builder) {
1052  /// Returns an integer value that is `width` bits wide representing the value
1053  /// assigned to the slot by memset.
1054  auto buildMemsetValue = [&](unsigned width) -> Value {
1055  assert(width % 8 == 0);
1056  auto intType = IntegerType::get(op.getContext(), width);
1057 
1058  // If we know the pattern at compile time, we can compute and assign a
1059  // constant directly.
1060  IntegerAttr constantPattern;
1061  if (matchPattern(op.getVal(), m_Constant(&constantPattern))) {
1062  assert(constantPattern.getValue().getBitWidth() == 8);
1063  APInt memsetVal(/*numBits=*/width, /*val=*/0);
1064  for (unsigned loBit = 0; loBit < width; loBit += 8)
1065  memsetVal.insertBits(constantPattern.getValue(), loBit);
1066  return LLVM::ConstantOp::create(builder, op.getLoc(),
1067  IntegerAttr::get(intType, memsetVal));
1068  }
1069 
1070  // If the output is a single byte, we can return the pattern directly.
1071  if (width == 8)
1072  return op.getVal();
1073 
1074  // Otherwise build the memset integer at runtime by repeatedly shifting the
1075  // value and or-ing it with the previous value.
1076  uint64_t coveredBits = 8;
1077  Value currentValue =
1078  LLVM::ZExtOp::create(builder, op.getLoc(), intType, op.getVal());
1079  while (coveredBits < width) {
1080  Value shiftBy =
1081  LLVM::ConstantOp::create(builder, op.getLoc(), intType, coveredBits);
1082  Value shifted =
1083  LLVM::ShlOp::create(builder, op.getLoc(), currentValue, shiftBy);
1084  currentValue =
1085  LLVM::OrOp::create(builder, op.getLoc(), currentValue, shifted);
1086  coveredBits *= 2;
1087  }
1088 
1089  return currentValue;
1090  };
1091  return TypeSwitch<Type, Value>(slot.elemType)
1092  .Case([&](IntegerType type) -> Value {
1093  return buildMemsetValue(type.getWidth());
1094  })
1095  .Case([&](FloatType type) -> Value {
1096  Value intVal = buildMemsetValue(type.getWidth());
1097  return LLVM::BitcastOp::create(builder, op.getLoc(), type, intVal);
1098  })
1099  .Default([](Type) -> Value {
1100  llvm_unreachable(
1101  "getStored should not be called on memset to unsupported type");
1102  });
1103 }
1104 
1105 template <class MemsetIntr>
1106 static bool
1107 memsetCanUsesBeRemoved(MemsetIntr op, const MemorySlot &slot,
1108  const SmallPtrSetImpl<OpOperand *> &blockingUses,
1109  SmallVectorImpl<OpOperand *> &newBlockingUses,
1110  const DataLayout &dataLayout) {
1111  bool canConvertType =
1113  .Case<IntegerType, FloatType>([](auto type) {
1114  return type.getWidth() % 8 == 0 && type.getWidth() > 0;
1115  })
1116  .Default([](Type) { return false; });
1117  if (!canConvertType)
1118  return false;
1119 
1120  if (op.getIsVolatile())
1121  return false;
1122 
1123  return getStaticMemIntrLen(op) == dataLayout.getTypeSize(slot.elemType);
1124 }
1125 
1126 template <class MemsetIntr>
1127 static DeletionKind
1128 memsetRewire(MemsetIntr op, const DestructurableMemorySlot &slot,
1129  DenseMap<Attribute, MemorySlot> &subslots, OpBuilder &builder,
1130  const DataLayout &dataLayout) {
1131 
1132  std::optional<DenseMap<Attribute, Type>> types =
1133  cast<DestructurableTypeInterface>(slot.elemType).getSubelementIndexMap();
1134 
1135  IntegerAttr memsetLenAttr = createMemsetLenAttr(op);
1136 
1137  bool packed = false;
1138  if (auto structType = dyn_cast<LLVM::LLVMStructType>(slot.elemType))
1139  packed = structType.isPacked();
1140 
1141  Type i32 = IntegerType::get(op.getContext(), 32);
1142  uint64_t memsetLen = memsetLenAttr.getValue().getZExtValue();
1143  uint64_t covered = 0;
1144  for (size_t i = 0; i < types->size(); i++) {
1145  // Create indices on the fly to get elements in the right order.
1146  Attribute index = IntegerAttr::get(i32, i);
1147  Type elemType = types->at(index);
1148  uint64_t typeSize = dataLayout.getTypeSize(elemType);
1149 
1150  if (!packed)
1151  covered =
1152  llvm::alignTo(covered, dataLayout.getTypeABIAlignment(elemType));
1153 
1154  if (covered >= memsetLen)
1155  break;
1156 
1157  // If this subslot is used, apply a new memset to it.
1158  // Otherwise, only compute its offset within the original memset.
1159  if (subslots.contains(index)) {
1160  uint64_t newMemsetSize = std::min(memsetLen - covered, typeSize);
1161  createMemsetIntr(builder, op, memsetLenAttr, newMemsetSize, subslots,
1162  index);
1163  }
1164 
1165  covered += typeSize;
1166  }
1167 
1168  return DeletionKind::Delete;
1169 }
1170 
1171 bool LLVM::MemsetOp::loadsFrom(const MemorySlot &slot) { return false; }
1172 
1173 bool LLVM::MemsetOp::storesTo(const MemorySlot &slot) {
1174  return getDst() == slot.ptr;
1175 }
1176 
1177 Value LLVM::MemsetOp::getStored(const MemorySlot &slot, OpBuilder &builder,
1178  Value reachingDef,
1179  const DataLayout &dataLayout) {
1180  return memsetGetStored(*this, slot, builder);
1181 }
1182 
1183 bool LLVM::MemsetOp::canUsesBeRemoved(
1184  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1185  SmallVectorImpl<OpOperand *> &newBlockingUses,
1186  const DataLayout &dataLayout) {
1187  return memsetCanUsesBeRemoved(*this, slot, blockingUses, newBlockingUses,
1188  dataLayout);
1189 }
1190 
1191 DeletionKind LLVM::MemsetOp::removeBlockingUses(
1192  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1193  OpBuilder &builder, Value reachingDefinition,
1194  const DataLayout &dataLayout) {
1195  return DeletionKind::Delete;
1196 }
1197 
1198 LogicalResult LLVM::MemsetOp::ensureOnlySafeAccesses(
1199  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1200  const DataLayout &dataLayout) {
1201  return success(definitelyWritesOnlyWithinSlot(*this, slot, dataLayout));
1202 }
1203 
1204 bool LLVM::MemsetOp::canRewire(const DestructurableMemorySlot &slot,
1205  SmallPtrSetImpl<Attribute> &usedIndices,
1206  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1207  const DataLayout &dataLayout) {
1208  return memsetCanRewire(*this, slot, usedIndices, mustBeSafelyUsed,
1209  dataLayout);
1210 }
1211 
1212 DeletionKind LLVM::MemsetOp::rewire(const DestructurableMemorySlot &slot,
1214  OpBuilder &builder,
1215  const DataLayout &dataLayout) {
1216  return memsetRewire(*this, slot, subslots, builder, dataLayout);
1217 }
1218 
1219 bool LLVM::MemsetInlineOp::loadsFrom(const MemorySlot &slot) { return false; }
1220 
1221 bool LLVM::MemsetInlineOp::storesTo(const MemorySlot &slot) {
1222  return getDst() == slot.ptr;
1223 }
1224 
1225 Value LLVM::MemsetInlineOp::getStored(const MemorySlot &slot,
1226  OpBuilder &builder, Value reachingDef,
1227  const DataLayout &dataLayout) {
1228  return memsetGetStored(*this, slot, builder);
1229 }
1230 
1231 bool LLVM::MemsetInlineOp::canUsesBeRemoved(
1232  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1233  SmallVectorImpl<OpOperand *> &newBlockingUses,
1234  const DataLayout &dataLayout) {
1235  return memsetCanUsesBeRemoved(*this, slot, blockingUses, newBlockingUses,
1236  dataLayout);
1237 }
1238 
1239 DeletionKind LLVM::MemsetInlineOp::removeBlockingUses(
1240  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1241  OpBuilder &builder, Value reachingDefinition,
1242  const DataLayout &dataLayout) {
1243  return DeletionKind::Delete;
1244 }
1245 
1246 LogicalResult LLVM::MemsetInlineOp::ensureOnlySafeAccesses(
1247  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1248  const DataLayout &dataLayout) {
1249  return success(definitelyWritesOnlyWithinSlot(*this, slot, dataLayout));
1250 }
1251 
1252 bool LLVM::MemsetInlineOp::canRewire(
1253  const DestructurableMemorySlot &slot,
1254  SmallPtrSetImpl<Attribute> &usedIndices,
1255  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1256  const DataLayout &dataLayout) {
1257  return memsetCanRewire(*this, slot, usedIndices, mustBeSafelyUsed,
1258  dataLayout);
1259 }
1260 
1262 LLVM::MemsetInlineOp::rewire(const DestructurableMemorySlot &slot,
1264  OpBuilder &builder, const DataLayout &dataLayout) {
1265  return memsetRewire(*this, slot, subslots, builder, dataLayout);
1266 }
1267 
1268 //===----------------------------------------------------------------------===//
1269 // Interfaces for memcpy/memmove
1270 //===----------------------------------------------------------------------===//
1271 
1272 template <class MemcpyLike>
1273 static bool memcpyLoadsFrom(MemcpyLike op, const MemorySlot &slot) {
1274  return op.getSrc() == slot.ptr;
1275 }
1276 
1277 template <class MemcpyLike>
1278 static bool memcpyStoresTo(MemcpyLike op, const MemorySlot &slot) {
1279  return op.getDst() == slot.ptr;
1280 }
1281 
1282 template <class MemcpyLike>
1283 static Value memcpyGetStored(MemcpyLike op, const MemorySlot &slot,
1284  OpBuilder &builder) {
1285  return LLVM::LoadOp::create(builder, op.getLoc(), slot.elemType, op.getSrc());
1286 }
1287 
1288 template <class MemcpyLike>
1289 static bool
1290 memcpyCanUsesBeRemoved(MemcpyLike op, const MemorySlot &slot,
1291  const SmallPtrSetImpl<OpOperand *> &blockingUses,
1292  SmallVectorImpl<OpOperand *> &newBlockingUses,
1293  const DataLayout &dataLayout) {
1294  // If source and destination are the same, memcpy behavior is undefined and
1295  // memmove is a no-op. Because there is no memory change happening here,
1296  // simplifying such operations is left to canonicalization.
1297  if (op.getDst() == op.getSrc())
1298  return false;
1299 
1300  if (op.getIsVolatile())
1301  return false;
1302 
1303  return getStaticMemIntrLen(op) == dataLayout.getTypeSize(slot.elemType);
1304 }
1305 
1306 template <class MemcpyLike>
1307 static DeletionKind
1308 memcpyRemoveBlockingUses(MemcpyLike op, const MemorySlot &slot,
1309  const SmallPtrSetImpl<OpOperand *> &blockingUses,
1310  OpBuilder &builder, Value reachingDefinition) {
1311  if (op.loadsFrom(slot))
1312  LLVM::StoreOp::create(builder, op.getLoc(), reachingDefinition,
1313  op.getDst());
1314  return DeletionKind::Delete;
1315 }
1316 
1317 template <class MemcpyLike>
1318 static LogicalResult
1319 memcpyEnsureOnlySafeAccesses(MemcpyLike op, const MemorySlot &slot,
1320  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed) {
1321  DataLayout dataLayout = DataLayout::closest(op);
1322  // While rewiring memcpy-like intrinsics only supports full copies, partial
1323  // copies are still safe accesses so it is enough to only check for writes
1324  // within bounds.
1325  return success(definitelyWritesOnlyWithinSlot(op, slot, dataLayout));
1326 }
1327 
1328 template <class MemcpyLike>
1329 static bool memcpyCanRewire(MemcpyLike op, const DestructurableMemorySlot &slot,
1330  SmallPtrSetImpl<Attribute> &usedIndices,
1331  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1332  const DataLayout &dataLayout) {
1333  if (op.getIsVolatile())
1334  return false;
1335 
1336  if (!cast<DestructurableTypeInterface>(slot.elemType).getSubelementIndexMap())
1337  return false;
1338 
1339  if (!areAllIndicesI32(slot))
1340  return false;
1341 
1342  // Only full copies are supported.
1343  if (getStaticMemIntrLen(op) != dataLayout.getTypeSize(slot.elemType))
1344  return false;
1345 
1346  if (op.getSrc() == slot.ptr)
1347  usedIndices.insert_range(llvm::make_first_range(slot.subelementTypes));
1348 
1349  return true;
1350 }
1351 
1352 namespace {
1353 
1354 template <class MemcpyLike>
1355 void createMemcpyLikeToReplace(OpBuilder &builder, const DataLayout &layout,
1356  MemcpyLike toReplace, Value dst, Value src,
1357  Type toCpy, bool isVolatile) {
1358  Value memcpySize =
1359  LLVM::ConstantOp::create(builder, toReplace.getLoc(),
1360  IntegerAttr::get(toReplace.getLen().getType(),
1361  layout.getTypeSize(toCpy)));
1362  MemcpyLike::create(builder, toReplace.getLoc(), dst, src, memcpySize,
1363  isVolatile);
1364 }
1365 
1366 template <>
1367 void createMemcpyLikeToReplace(OpBuilder &builder, const DataLayout &layout,
1368  LLVM::MemcpyInlineOp toReplace, Value dst,
1369  Value src, Type toCpy, bool isVolatile) {
1370  Type lenType = IntegerType::get(toReplace->getContext(),
1371  toReplace.getLen().getBitWidth());
1372  LLVM::MemcpyInlineOp::create(
1373  builder, toReplace.getLoc(), dst, src,
1374  IntegerAttr::get(lenType, layout.getTypeSize(toCpy)), isVolatile);
1375 }
1376 
1377 } // namespace
1378 
1379 /// Rewires a memcpy-like operation. Only copies to or from the full slot are
1380 /// supported.
1381 template <class MemcpyLike>
1382 static DeletionKind
1383 memcpyRewire(MemcpyLike op, const DestructurableMemorySlot &slot,
1384  DenseMap<Attribute, MemorySlot> &subslots, OpBuilder &builder,
1385  const DataLayout &dataLayout) {
1386  if (subslots.empty())
1387  return DeletionKind::Delete;
1388 
1389  assert((slot.ptr == op.getDst()) != (slot.ptr == op.getSrc()));
1390  bool isDst = slot.ptr == op.getDst();
1391 
1392 #ifndef NDEBUG
1393  size_t slotsTreated = 0;
1394 #endif
1395 
1396  // It was previously checked that index types are consistent, so this type can
1397  // be fetched now.
1398  Type indexType = cast<IntegerAttr>(subslots.begin()->first).getType();
1399  for (size_t i = 0, e = slot.subelementTypes.size(); i != e; i++) {
1400  Attribute index = IntegerAttr::get(indexType, i);
1401  if (!subslots.contains(index))
1402  continue;
1403  const MemorySlot &subslot = subslots.at(index);
1404 
1405 #ifndef NDEBUG
1406  slotsTreated++;
1407 #endif
1408 
1409  // First get a pointer to the equivalent of this subslot from the source
1410  // pointer.
1411  SmallVector<LLVM::GEPArg> gepIndices{
1412  0, static_cast<int32_t>(
1413  cast<IntegerAttr>(index).getValue().getZExtValue())};
1414  Value subslotPtrInOther = LLVM::GEPOp::create(
1415  builder, op.getLoc(), LLVM::LLVMPointerType::get(op.getContext()),
1416  slot.elemType, isDst ? op.getSrc() : op.getDst(), gepIndices);
1417 
1418  // Then create a new memcpy out of this source pointer.
1419  createMemcpyLikeToReplace(builder, dataLayout, op,
1420  isDst ? subslot.ptr : subslotPtrInOther,
1421  isDst ? subslotPtrInOther : subslot.ptr,
1422  subslot.elemType, op.getIsVolatile());
1423  }
1424 
1425  assert(subslots.size() == slotsTreated);
1426 
1427  return DeletionKind::Delete;
1428 }
1429 
1430 bool LLVM::MemcpyOp::loadsFrom(const MemorySlot &slot) {
1431  return memcpyLoadsFrom(*this, slot);
1432 }
1433 
1434 bool LLVM::MemcpyOp::storesTo(const MemorySlot &slot) {
1435  return memcpyStoresTo(*this, slot);
1436 }
1437 
1438 Value LLVM::MemcpyOp::getStored(const MemorySlot &slot, OpBuilder &builder,
1439  Value reachingDef,
1440  const DataLayout &dataLayout) {
1441  return memcpyGetStored(*this, slot, builder);
1442 }
1443 
1444 bool LLVM::MemcpyOp::canUsesBeRemoved(
1445  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1446  SmallVectorImpl<OpOperand *> &newBlockingUses,
1447  const DataLayout &dataLayout) {
1448  return memcpyCanUsesBeRemoved(*this, slot, blockingUses, newBlockingUses,
1449  dataLayout);
1450 }
1451 
1452 DeletionKind LLVM::MemcpyOp::removeBlockingUses(
1453  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1454  OpBuilder &builder, Value reachingDefinition,
1455  const DataLayout &dataLayout) {
1456  return memcpyRemoveBlockingUses(*this, slot, blockingUses, builder,
1457  reachingDefinition);
1458 }
1459 
1460 LogicalResult LLVM::MemcpyOp::ensureOnlySafeAccesses(
1461  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1462  const DataLayout &dataLayout) {
1463  return memcpyEnsureOnlySafeAccesses(*this, slot, mustBeSafelyUsed);
1464 }
1465 
1466 bool LLVM::MemcpyOp::canRewire(const DestructurableMemorySlot &slot,
1467  SmallPtrSetImpl<Attribute> &usedIndices,
1468  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1469  const DataLayout &dataLayout) {
1470  return memcpyCanRewire(*this, slot, usedIndices, mustBeSafelyUsed,
1471  dataLayout);
1472 }
1473 
1474 DeletionKind LLVM::MemcpyOp::rewire(const DestructurableMemorySlot &slot,
1476  OpBuilder &builder,
1477  const DataLayout &dataLayout) {
1478  return memcpyRewire(*this, slot, subslots, builder, dataLayout);
1479 }
1480 
1481 bool LLVM::MemcpyInlineOp::loadsFrom(const MemorySlot &slot) {
1482  return memcpyLoadsFrom(*this, slot);
1483 }
1484 
1485 bool LLVM::MemcpyInlineOp::storesTo(const MemorySlot &slot) {
1486  return memcpyStoresTo(*this, slot);
1487 }
1488 
1489 Value LLVM::MemcpyInlineOp::getStored(const MemorySlot &slot,
1490  OpBuilder &builder, Value reachingDef,
1491  const DataLayout &dataLayout) {
1492  return memcpyGetStored(*this, slot, builder);
1493 }
1494 
1495 bool LLVM::MemcpyInlineOp::canUsesBeRemoved(
1496  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1497  SmallVectorImpl<OpOperand *> &newBlockingUses,
1498  const DataLayout &dataLayout) {
1499  return memcpyCanUsesBeRemoved(*this, slot, blockingUses, newBlockingUses,
1500  dataLayout);
1501 }
1502 
1503 DeletionKind LLVM::MemcpyInlineOp::removeBlockingUses(
1504  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1505  OpBuilder &builder, Value reachingDefinition,
1506  const DataLayout &dataLayout) {
1507  return memcpyRemoveBlockingUses(*this, slot, blockingUses, builder,
1508  reachingDefinition);
1509 }
1510 
1511 LogicalResult LLVM::MemcpyInlineOp::ensureOnlySafeAccesses(
1512  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1513  const DataLayout &dataLayout) {
1514  return memcpyEnsureOnlySafeAccesses(*this, slot, mustBeSafelyUsed);
1515 }
1516 
1517 bool LLVM::MemcpyInlineOp::canRewire(
1518  const DestructurableMemorySlot &slot,
1519  SmallPtrSetImpl<Attribute> &usedIndices,
1520  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1521  const DataLayout &dataLayout) {
1522  return memcpyCanRewire(*this, slot, usedIndices, mustBeSafelyUsed,
1523  dataLayout);
1524 }
1525 
1527 LLVM::MemcpyInlineOp::rewire(const DestructurableMemorySlot &slot,
1529  OpBuilder &builder, const DataLayout &dataLayout) {
1530  return memcpyRewire(*this, slot, subslots, builder, dataLayout);
1531 }
1532 
1533 bool LLVM::MemmoveOp::loadsFrom(const MemorySlot &slot) {
1534  return memcpyLoadsFrom(*this, slot);
1535 }
1536 
1537 bool LLVM::MemmoveOp::storesTo(const MemorySlot &slot) {
1538  return memcpyStoresTo(*this, slot);
1539 }
1540 
1541 Value LLVM::MemmoveOp::getStored(const MemorySlot &slot, OpBuilder &builder,
1542  Value reachingDef,
1543  const DataLayout &dataLayout) {
1544  return memcpyGetStored(*this, slot, builder);
1545 }
1546 
1547 bool LLVM::MemmoveOp::canUsesBeRemoved(
1548  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1549  SmallVectorImpl<OpOperand *> &newBlockingUses,
1550  const DataLayout &dataLayout) {
1551  return memcpyCanUsesBeRemoved(*this, slot, blockingUses, newBlockingUses,
1552  dataLayout);
1553 }
1554 
1555 DeletionKind LLVM::MemmoveOp::removeBlockingUses(
1556  const MemorySlot &slot, const SmallPtrSetImpl<OpOperand *> &blockingUses,
1557  OpBuilder &builder, Value reachingDefinition,
1558  const DataLayout &dataLayout) {
1559  return memcpyRemoveBlockingUses(*this, slot, blockingUses, builder,
1560  reachingDefinition);
1561 }
1562 
1563 LogicalResult LLVM::MemmoveOp::ensureOnlySafeAccesses(
1564  const MemorySlot &slot, SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1565  const DataLayout &dataLayout) {
1566  return memcpyEnsureOnlySafeAccesses(*this, slot, mustBeSafelyUsed);
1567 }
1568 
1569 bool LLVM::MemmoveOp::canRewire(const DestructurableMemorySlot &slot,
1570  SmallPtrSetImpl<Attribute> &usedIndices,
1571  SmallVectorImpl<MemorySlot> &mustBeSafelyUsed,
1572  const DataLayout &dataLayout) {
1573  return memcpyCanRewire(*this, slot, usedIndices, mustBeSafelyUsed,
1574  dataLayout);
1575 }
1576 
1577 DeletionKind LLVM::MemmoveOp::rewire(const DestructurableMemorySlot &slot,
1579  OpBuilder &builder,
1580  const DataLayout &dataLayout) {
1581  return memcpyRewire(*this, slot, subslots, builder, dataLayout);
1582 }
1583 
1584 //===----------------------------------------------------------------------===//
1585 // Interfaces for destructurable types
1586 //===----------------------------------------------------------------------===//
1587 
1588 std::optional<DenseMap<Attribute, Type>>
1589 LLVM::LLVMStructType::getSubelementIndexMap() const {
1590  Type i32 = IntegerType::get(getContext(), 32);
1591  DenseMap<Attribute, Type> destructured;
1592  for (const auto &[index, elemType] : llvm::enumerate(getBody()))
1593  destructured.insert({IntegerAttr::get(i32, index), elemType});
1594  return destructured;
1595 }
1596 
1598  auto indexAttr = llvm::dyn_cast<IntegerAttr>(index);
1599  if (!indexAttr || !indexAttr.getType().isInteger(32))
1600  return {};
1601  int32_t indexInt = indexAttr.getInt();
1602  ArrayRef<Type> body = getBody();
1603  if (indexInt < 0 || body.size() <= static_cast<uint32_t>(indexInt))
1604  return {};
1605  return body[indexInt];
1606 }
1607 
1608 std::optional<DenseMap<Attribute, Type>>
1609 LLVM::LLVMArrayType::getSubelementIndexMap() const {
1610  constexpr size_t maxArraySizeForDestructuring = 16;
1611  if (getNumElements() > maxArraySizeForDestructuring)
1612  return {};
1613  int32_t numElements = getNumElements();
1614 
1615  Type i32 = IntegerType::get(getContext(), 32);
1616  DenseMap<Attribute, Type> destructured;
1617  for (int32_t index = 0; index < numElements; ++index)
1618  destructured.insert({IntegerAttr::get(i32, index), getElementType()});
1619  return destructured;
1620 }
1621 
1623  auto indexAttr = llvm::dyn_cast<IntegerAttr>(index);
1624  if (!indexAttr || !indexAttr.getType().isInteger(32))
1625  return {};
1626  int32_t indexInt = indexAttr.getInt();
1627  if (indexInt < 0 || getNumElements() <= static_cast<uint32_t>(indexInt))
1628  return {};
1629  return getElementType();
1630 }
static Value getBase(Value v)
Looks through known "view-like" ops to find the base memref.
static MLIRContext * getContext(OpFoldResult val)
static Type getElementType(Type type)
Determine the element type of type.
static int64_t getNumElements(Type t)
Compute the total number of elements in the given type, also taking into account nested types.
static LLVM::LLVMArrayType getByteArrayType(MLIRContext *context, unsigned size)
Constructs a byte array type of the given size.
static LogicalResult memcpyEnsureOnlySafeAccesses(MemcpyLike op, const MemorySlot &slot, SmallVectorImpl< MemorySlot > &mustBeSafelyUsed)
static std::optional< uint64_t > gepToByteOffset(const DataLayout &dataLayout, LLVM::GEPOp gep)
Returns the amount of bytes the provided GEP elements will offset the pointer by.
static bool areAllIndicesI32(const DestructurableMemorySlot &slot)
Checks whether all indices are i32.
static Value castToSameSizedInt(OpBuilder &builder, Location loc, Value val, const DataLayout &dataLayout)
Converts a value to an integer type of the same size.
static Value castSameSizedTypes(OpBuilder &builder, Location loc, Value srcValue, Type targetType, const DataLayout &dataLayout)
Constructs operations that convert srcValue into a new value of type targetType.
static std::optional< SubslotAccessInfo > getSubslotAccessInfo(const DestructurableMemorySlot &slot, const DataLayout &dataLayout, LLVM::GEPOp gep)
Computes subslot access information for an access into slot with the given offset.
static bool memcpyStoresTo(MemcpyLike op, const MemorySlot &slot)
static DeletionKind memsetRewire(MemsetIntr op, const DestructurableMemorySlot &slot, DenseMap< Attribute, MemorySlot > &subslots, OpBuilder &builder, const DataLayout &dataLayout)
static Type getTypeAtIndex(const DestructurableMemorySlot &slot, Attribute index)
Returns the subslot's type at the requested index.
static bool areConversionCompatible(const DataLayout &layout, Type targetType, Type srcType, bool narrowingConversion)
Checks that rhs can be converted to lhs by a sequence of casts and truncations.
static bool forwardToUsers(Operation *op, SmallVectorImpl< OpOperand * > &newBlockingUses)
Conditions the deletion of the operation to the removal of all its uses.
static bool memsetCanUsesBeRemoved(MemsetIntr op, const MemorySlot &slot, const SmallPtrSetImpl< OpOperand * > &blockingUses, SmallVectorImpl< OpOperand * > &newBlockingUses, const DataLayout &dataLayout)
static bool memcpyLoadsFrom(MemcpyLike op, const MemorySlot &slot)
static bool isSupportedTypeForConversion(Type type)
Checks if type can be used in any kind of conversion sequences.
static Value createExtractAndCast(OpBuilder &builder, Location loc, Value srcValue, Type targetType, const DataLayout &dataLayout)
Constructs operations that convert srcValue into a new value of type targetType.
static Value createInsertAndCast(OpBuilder &builder, Location loc, Value srcValue, Value reachingDef, const DataLayout &dataLayout)
Constructs operations that insert the bits of srcValue into the "beginning" of reachingDef (beginning...
static DeletionKind memcpyRemoveBlockingUses(MemcpyLike op, const MemorySlot &slot, const SmallPtrSetImpl< OpOperand * > &blockingUses, OpBuilder &builder, Value reachingDefinition)
static bool memcpyCanUsesBeRemoved(MemcpyLike op, const MemorySlot &slot, const SmallPtrSetImpl< OpOperand * > &blockingUses, SmallVectorImpl< OpOperand * > &newBlockingUses, const DataLayout &dataLayout)
static bool isBigEndian(const DataLayout &dataLayout)
Checks if dataLayout describes a little endian layout.
static bool hasAllZeroIndices(LLVM::GEPOp gepOp)
static bool isValidAccessType(const MemorySlot &slot, Type accessType, const DataLayout &dataLayout)
Checks if slot can be accessed through the provided access type.
static Value memcpyGetStored(MemcpyLike op, const MemorySlot &slot, OpBuilder &builder)
static Value castIntValueToSameSizedType(OpBuilder &builder, Location loc, Value val, Type targetType)
Converts a value with an integer type to targetType.
static bool memsetCanRewire(MemsetIntr op, const DestructurableMemorySlot &slot, SmallPtrSetImpl< Attribute > &usedIndices, SmallVectorImpl< MemorySlot > &mustBeSafelyUsed, const DataLayout &dataLayout)
static DeletionKind memcpyRewire(MemcpyLike op, const DestructurableMemorySlot &slot, DenseMap< Attribute, MemorySlot > &subslots, OpBuilder &builder, const DataLayout &dataLayout)
Rewires a memcpy-like operation.
static Value memsetGetStored(MemsetIntr op, const MemorySlot &slot, OpBuilder &builder)
static bool definitelyWritesOnlyWithinSlot(MemIntr op, const MemorySlot &slot, const DataLayout &dataLayout)
Returns whether one can be sure the memory intrinsic does not write outside of the bounds of the give...
static bool memcpyCanRewire(MemcpyLike op, const DestructurableMemorySlot &slot, SmallPtrSetImpl< Attribute > &usedIndices, SmallVectorImpl< MemorySlot > &mustBeSafelyUsed, const DataLayout &dataLayout)
static Value min(ImplicitLocOpBuilder &builder, Value value, Value bound)
Attributes are known-constant values of operations.
Definition: Attributes.h:25
This class represents an argument of a Block.
Definition: Value.h:309
IntegerAttr getIntegerAttr(Type type, int64_t value)
Definition: Builders.cpp:223
IntegerType getIntegerType(unsigned width)
Definition: Builders.cpp:66
MLIRContext * getContext() const
Definition: Builders.h:55
The main mechanism for performing data layout queries.
static DataLayout closest(Operation *op)
Returns the layout of the closest parent operation carrying layout info.
llvm::TypeSize getTypeSize(Type t) const
Returns the size of the given type in the current scope.
uint64_t getTypeABIAlignment(Type t) const
Returns the required alignment of the given type in the current scope.
llvm::TypeSize getTypeSizeInBits(Type t) const
Returns the size in bits of the given type in the current scope.
Attribute getEndianness() const
Returns the specified endianness.
This class defines the main interface for locations in MLIR and acts as a non-nullable wrapper around...
Definition: Location.h:76
MLIRContext is the top-level object for a collection of MLIR operations.
Definition: MLIRContext.h:63
This class helps build Operations.
Definition: Builders.h:205
void setInsertionPoint(Block *block, Block::iterator insertPoint)
Set the insertion point to the specified location.
Definition: Builders.h:396
void createOrFold(SmallVectorImpl< Value > &results, Location location, Args &&...args)
Create an operation of specific op type at the current insertion point, and immediately try to fold i...
Definition: Builders.h:517
void setInsertionPointAfter(Operation *op)
Sets the insertion point to the node after the specified operation, which will cause subsequent inser...
Definition: Builders.h:410
This class represents an operand of an operation.
Definition: Value.h:257
Operation is the basic unit of execution within MLIR.
Definition: Operation.h:88
result_range getResults()
Definition: Operation.h:415
void erase()
Remove this operation from its parent block and delete it.
Definition: Operation.cpp:538
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
Dialect & getDialect() const
Get the dialect this type is registered to.
Definition: Types.h:107
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:96
bool use_empty() const
Returns true if this value has no uses.
Definition: Value.h:208
MLIRContext * getContext() const
Utility to get the associated MLIRContext that this value is defined in.
Definition: Value.h:108
Type getType() const
Return the type of this value.
Definition: Value.h:105
Operation * getDefiningOp() const
If this value is the result of an operation, return the operation that defines it.
Definition: Value.cpp:18
constexpr int kGEPConstantBitWidth
Bit-width of a 'GEPConstantIndex' within GEPArg.
Definition: LLVMDialect.h:62
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:344
Include the generated interface declarations.
bool matchPattern(Value value, const Pattern &pattern)
Entry point for matching a pattern over a Value.
Definition: Matchers.h:490
detail::constant_int_value_binder m_ConstantInt(IntegerAttr::ValueType *bind_value)
Matches a constant holding a scalar/vector/tensor integer (splat) and writes the integer value to bin...
Definition: Matchers.h:527
Type getType(OpFoldResult ofr)
Returns the int type of the integer in ofr.
Definition: Utils.cpp:304
detail::constant_int_predicate_matcher m_One()
Matches a constant scalar / vector splat / tensor splat integer one.
Definition: Matchers.h:478
DeletionKind
Returned by operation promotion logic requesting the deletion of an operation.
@ Keep
Keep the operation after promotion.
@ Delete
Delete the operation after promotion.
auto get(MLIRContext *context, Ts &&...params)
Helper method that injects context only if needed, this helps unify some of the attribute constructio...
detail::constant_op_matcher m_Constant()
Matches a constant foldable operation.
Definition: Matchers.h:369
Memory slot attached with information about its destructuring procedure.
DenseMap< Attribute, Type > subelementTypes
Maps an index within the memory slot to the corresponding subelement type.
Represents a slot in memory.
Value ptr
Pointer to the memory slot, used by operations to refer to it.
Type elemType
Type of the value contained in the slot.