MLIR  14.0.0git
NormalizeMemRefs.cpp
Go to the documentation of this file.
1 //===- NormalizeMemRefs.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass to normalize memrefs to have
10 // identity layout maps.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "PassDetail.h"
17 #include "mlir/Transforms/Passes.h"
18 #include "mlir/Transforms/Utils.h"
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/Support/Debug.h"
21 
22 #define DEBUG_TYPE "normalize-memrefs"
23 
24 using namespace mlir;
25 
26 namespace {
27 
28 /// All memrefs passed across functions with non-trivial layout maps are
29 /// converted to ones with trivial identity layout ones.
30 /// If all the memref types/uses in a function are normalizable, we treat
31 /// such functions as normalizable. Also, if a normalizable function is known
32 /// to call a non-normalizable function, we treat that function as
33 /// non-normalizable as well. We assume external functions to be normalizable.
34 struct NormalizeMemRefs : public NormalizeMemRefsBase<NormalizeMemRefs> {
35  void runOnOperation() override;
36  void normalizeFuncOpMemRefs(FuncOp funcOp, ModuleOp moduleOp);
37  bool areMemRefsNormalizable(FuncOp funcOp);
38  void updateFunctionSignature(FuncOp funcOp, ModuleOp moduleOp);
39  void setCalleesAndCallersNonNormalizable(FuncOp funcOp, ModuleOp moduleOp,
40  DenseSet<FuncOp> &normalizableFuncs);
41  Operation *createOpResultsNormalized(FuncOp funcOp, Operation *oldOp);
42 };
43 
44 } // namespace
45 
46 std::unique_ptr<OperationPass<ModuleOp>> mlir::createNormalizeMemRefsPass() {
47  return std::make_unique<NormalizeMemRefs>();
48 }
49 
50 void NormalizeMemRefs::runOnOperation() {
51  LLVM_DEBUG(llvm::dbgs() << "Normalizing Memrefs...\n");
52  ModuleOp moduleOp = getOperation();
53  // We maintain all normalizable FuncOps in a DenseSet. It is initialized
54  // with all the functions within a module and then functions which are not
55  // normalizable are removed from this set.
56  // TODO: Change this to work on FuncLikeOp once there is an operation
57  // interface for it.
58  DenseSet<FuncOp> normalizableFuncs;
59  // Initialize `normalizableFuncs` with all the functions within a module.
60  moduleOp.walk([&](FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
61 
62  // Traverse through all the functions applying a filter which determines
63  // whether that function is normalizable or not. All callers/callees of
64  // a non-normalizable function will also become non-normalizable even if
65  // they aren't passing any or specific non-normalizable memrefs. So,
66  // functions which calls or get called by a non-normalizable becomes non-
67  // normalizable functions themselves.
68  moduleOp.walk([&](FuncOp funcOp) {
69  if (normalizableFuncs.contains(funcOp)) {
70  if (!areMemRefsNormalizable(funcOp)) {
71  LLVM_DEBUG(llvm::dbgs()
72  << "@" << funcOp.getName()
73  << " contains ops that cannot normalize MemRefs\n");
74  // Since this function is not normalizable, we set all the caller
75  // functions and the callees of this function as not normalizable.
76  // TODO: Drop this conservative assumption in the future.
77  setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
78  normalizableFuncs);
79  }
80  }
81  });
82 
83  LLVM_DEBUG(llvm::dbgs() << "Normalizing " << normalizableFuncs.size()
84  << " functions\n");
85  // Those functions which can be normalized are subjected to normalization.
86  for (FuncOp &funcOp : normalizableFuncs)
87  normalizeFuncOpMemRefs(funcOp, moduleOp);
88 }
89 
90 /// Check whether all the uses of oldMemRef are either dereferencing uses or the
91 /// op is of type : DeallocOp, CallOp or ReturnOp. Only if these constraints
92 /// are satisfied will the value become a candidate for replacement.
93 /// TODO: Extend this for DimOps.
95  return llvm::all_of(opUsers, [](Operation *op) {
97  });
98 }
99 
100 /// Set all the calling functions and the callees of the function as not
101 /// normalizable.
102 void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
103  FuncOp funcOp, ModuleOp moduleOp, DenseSet<FuncOp> &normalizableFuncs) {
104  if (!normalizableFuncs.contains(funcOp))
105  return;
106 
107  LLVM_DEBUG(
108  llvm::dbgs() << "@" << funcOp.getName()
109  << " calls or is called by non-normalizable function\n");
110  normalizableFuncs.erase(funcOp);
111  // Caller of the function.
112  Optional<SymbolTable::UseRange> symbolUses = funcOp.getSymbolUses(moduleOp);
113  for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
114  // TODO: Extend this for ops that are FunctionOpInterface. This would
115  // require creating an OpInterface for FunctionOpInterface ops.
116  FuncOp parentFuncOp = symbolUse.getUser()->getParentOfType<FuncOp>();
117  for (FuncOp &funcOp : normalizableFuncs) {
118  if (parentFuncOp == funcOp) {
119  setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
120  normalizableFuncs);
121  break;
122  }
123  }
124  }
125 
126  // Functions called by this function.
127  funcOp.walk([&](CallOp callOp) {
128  StringAttr callee = callOp.getCalleeAttr().getAttr();
129  for (FuncOp &funcOp : normalizableFuncs) {
130  // We compare FuncOp and callee's name.
131  if (callee == funcOp.getNameAttr()) {
132  setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
133  normalizableFuncs);
134  break;
135  }
136  }
137  });
138 }
139 
140 /// Check whether all the uses of AllocOps, CallOps and function arguments of a
141 /// function are either of dereferencing type or are uses in: DeallocOp, CallOp
142 /// or ReturnOp. Only if these constraints are satisfied will the function
143 /// become a candidate for normalization. We follow a conservative approach here
144 /// wherein even if the non-normalizable memref is not a part of the function's
145 /// argument or return type, we still label the entire function as
146 /// non-normalizable. We assume external functions to be normalizable.
147 bool NormalizeMemRefs::areMemRefsNormalizable(FuncOp funcOp) {
148  // We assume external functions to be normalizable.
149  if (funcOp.isExternal())
150  return true;
151 
152  if (funcOp
153  .walk([&](memref::AllocOp allocOp) -> WalkResult {
154  Value oldMemRef = allocOp.getResult();
155  if (!isMemRefNormalizable(oldMemRef.getUsers()))
156  return WalkResult::interrupt();
157  return WalkResult::advance();
158  })
159  .wasInterrupted())
160  return false;
161 
162  if (funcOp
163  .walk([&](CallOp callOp) -> WalkResult {
164  for (unsigned resIndex :
165  llvm::seq<unsigned>(0, callOp.getNumResults())) {
166  Value oldMemRef = callOp.getResult(resIndex);
167  if (oldMemRef.getType().isa<MemRefType>())
168  if (!isMemRefNormalizable(oldMemRef.getUsers()))
169  return WalkResult::interrupt();
170  }
171  return WalkResult::advance();
172  })
173  .wasInterrupted())
174  return false;
175 
176  for (unsigned argIndex : llvm::seq<unsigned>(0, funcOp.getNumArguments())) {
177  BlockArgument oldMemRef = funcOp.getArgument(argIndex);
178  if (oldMemRef.getType().isa<MemRefType>())
179  if (!isMemRefNormalizable(oldMemRef.getUsers()))
180  return false;
181  }
182 
183  return true;
184 }
185 
186 /// Fetch the updated argument list and result of the function and update the
187 /// function signature. This updates the function's return type at the caller
188 /// site and in case the return type is a normalized memref then it updates
189 /// the calling function's signature.
190 /// TODO: An update to the calling function signature is required only if the
191 /// returned value is in turn used in ReturnOp of the calling function.
192 void NormalizeMemRefs::updateFunctionSignature(FuncOp funcOp,
193  ModuleOp moduleOp) {
194  FunctionType functionType = funcOp.getType();
195  SmallVector<Type, 4> resultTypes;
196  FunctionType newFuncType;
197  resultTypes = llvm::to_vector<4>(functionType.getResults());
198 
199  // External function's signature was already updated in
200  // 'normalizeFuncOpMemRefs()'.
201  if (!funcOp.isExternal()) {
202  SmallVector<Type, 8> argTypes;
203  for (const auto &argEn : llvm::enumerate(funcOp.getArguments()))
204  argTypes.push_back(argEn.value().getType());
205 
206  // Traverse ReturnOps to check if an update to the return type in the
207  // function signature is required.
208  funcOp.walk([&](ReturnOp returnOp) {
209  for (const auto &operandEn : llvm::enumerate(returnOp.getOperands())) {
210  Type opType = operandEn.value().getType();
211  MemRefType memrefType = opType.dyn_cast<MemRefType>();
212  // If type is not memref or if the memref type is same as that in
213  // function's return signature then no update is required.
214  if (!memrefType || memrefType == resultTypes[operandEn.index()])
215  continue;
216  // Update function's return type signature.
217  // Return type gets normalized either as a result of function argument
218  // normalization, AllocOp normalization or an update made at CallOp.
219  // There can be many call flows inside a function and an update to a
220  // specific ReturnOp has not yet been made. So we check that the result
221  // memref type is normalized.
222  // TODO: When selective normalization is implemented, handle multiple
223  // results case where some are normalized, some aren't.
224  if (memrefType.getLayout().isIdentity())
225  resultTypes[operandEn.index()] = memrefType;
226  }
227  });
228 
229  // We create a new function type and modify the function signature with this
230  // new type.
231  newFuncType = FunctionType::get(&getContext(), /*inputs=*/argTypes,
232  /*results=*/resultTypes);
233  }
234 
235  // Since we update the function signature, it might affect the result types at
236  // the caller site. Since this result might even be used by the caller
237  // function in ReturnOps, the caller function's signature will also change.
238  // Hence we record the caller function in 'funcOpsToUpdate' to update their
239  // signature as well.
240  llvm::SmallDenseSet<FuncOp, 8> funcOpsToUpdate;
241  // We iterate over all symbolic uses of the function and update the return
242  // type at the caller site.
243  Optional<SymbolTable::UseRange> symbolUses = funcOp.getSymbolUses(moduleOp);
244  for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
245  Operation *userOp = symbolUse.getUser();
246  OpBuilder builder(userOp);
247  // When `userOp` can not be casted to `CallOp`, it is skipped. This assumes
248  // that the non-CallOp has no memrefs to be replaced.
249  // TODO: Handle cases where a non-CallOp symbol use of a function deals with
250  // memrefs.
251  auto callOp = dyn_cast<CallOp>(userOp);
252  if (!callOp)
253  continue;
254  Operation *newCallOp =
255  builder.create<CallOp>(userOp->getLoc(), callOp.getCalleeAttr(),
256  resultTypes, userOp->getOperands());
257  bool replacingMemRefUsesFailed = false;
258  bool returnTypeChanged = false;
259  for (unsigned resIndex : llvm::seq<unsigned>(0, userOp->getNumResults())) {
260  OpResult oldResult = userOp->getResult(resIndex);
261  OpResult newResult = newCallOp->getResult(resIndex);
262  // This condition ensures that if the result is not of type memref or if
263  // the resulting memref was already having a trivial map layout then we
264  // need not perform any use replacement here.
265  if (oldResult.getType() == newResult.getType())
266  continue;
267  AffineMap layoutMap =
268  oldResult.getType().cast<MemRefType>().getLayout().getAffineMap();
269  if (failed(replaceAllMemRefUsesWith(oldResult, /*newMemRef=*/newResult,
270  /*extraIndices=*/{},
271  /*indexRemap=*/layoutMap,
272  /*extraOperands=*/{},
273  /*symbolOperands=*/{},
274  /*domOpFilter=*/nullptr,
275  /*postDomOpFilter=*/nullptr,
276  /*allowNonDereferencingOps=*/true,
277  /*replaceInDeallocOp=*/true))) {
278  // If it failed (due to escapes for example), bail out.
279  // It should never hit this part of the code because it is called by
280  // only those functions which are normalizable.
281  newCallOp->erase();
282  replacingMemRefUsesFailed = true;
283  break;
284  }
285  returnTypeChanged = true;
286  }
287  if (replacingMemRefUsesFailed)
288  continue;
289  // Replace all uses for other non-memref result types.
290  userOp->replaceAllUsesWith(newCallOp);
291  userOp->erase();
292  if (returnTypeChanged) {
293  // Since the return type changed it might lead to a change in function's
294  // signature.
295  // TODO: If funcOp doesn't return any memref type then no need to update
296  // signature.
297  // TODO: Further optimization - Check if the memref is indeed part of
298  // ReturnOp at the parentFuncOp and only then updation of signature is
299  // required.
300  // TODO: Extend this for ops that are FunctionOpInterface. This would
301  // require creating an OpInterface for FunctionOpInterface ops.
302  FuncOp parentFuncOp = newCallOp->getParentOfType<FuncOp>();
303  funcOpsToUpdate.insert(parentFuncOp);
304  }
305  }
306  // Because external function's signature is already updated in
307  // 'normalizeFuncOpMemRefs()', we don't need to update it here again.
308  if (!funcOp.isExternal())
309  funcOp.setType(newFuncType);
310 
311  // Updating the signature type of those functions which call the current
312  // function. Only if the return type of the current function has a normalized
313  // memref will the caller function become a candidate for signature update.
314  for (FuncOp parentFuncOp : funcOpsToUpdate)
315  updateFunctionSignature(parentFuncOp, moduleOp);
316 }
317 
318 /// Normalizes the memrefs within a function which includes those arising as a
319 /// result of AllocOps, CallOps and function's argument. The ModuleOp argument
320 /// is used to help update function's signature after normalization.
321 void NormalizeMemRefs::normalizeFuncOpMemRefs(FuncOp funcOp,
322  ModuleOp moduleOp) {
323  // Turn memrefs' non-identity layouts maps into ones with identity. Collect
324  // alloc ops first and then process since normalizeMemRef replaces/erases ops
325  // during memref rewriting.
327  funcOp.walk([&](memref::AllocOp op) { allocOps.push_back(op); });
328  for (memref::AllocOp allocOp : allocOps)
329  (void)normalizeMemRef(&allocOp);
330 
331  // We use this OpBuilder to create new memref layout later.
332  OpBuilder b(funcOp);
333 
334  FunctionType functionType = funcOp.getType();
335  SmallVector<Location> functionArgLocs(llvm::map_range(
336  funcOp.getArguments(), [](BlockArgument arg) { return arg.getLoc(); }));
337  SmallVector<Type, 8> inputTypes;
338  // Walk over each argument of a function to perform memref normalization (if
339  for (unsigned argIndex :
340  llvm::seq<unsigned>(0, functionType.getNumInputs())) {
341  Type argType = functionType.getInput(argIndex);
342  MemRefType memrefType = argType.dyn_cast<MemRefType>();
343  // Check whether argument is of MemRef type. Any other argument type can
344  // simply be part of the final function signature.
345  if (!memrefType) {
346  inputTypes.push_back(argType);
347  continue;
348  }
349  // Fetch a new memref type after normalizing the old memref to have an
350  // identity map layout.
351  MemRefType newMemRefType = normalizeMemRefType(memrefType, b,
352  /*numSymbolicOperands=*/0);
353  if (newMemRefType == memrefType || funcOp.isExternal()) {
354  // Either memrefType already had an identity map or the map couldn't be
355  // transformed to an identity map.
356  inputTypes.push_back(newMemRefType);
357  continue;
358  }
359 
360  // Insert a new temporary argument with the new memref type.
361  BlockArgument newMemRef = funcOp.front().insertArgument(
362  argIndex, newMemRefType, functionArgLocs[argIndex]);
363  BlockArgument oldMemRef = funcOp.getArgument(argIndex + 1);
364  AffineMap layoutMap = memrefType.getLayout().getAffineMap();
365  // Replace all uses of the old memref.
366  if (failed(replaceAllMemRefUsesWith(oldMemRef, /*newMemRef=*/newMemRef,
367  /*extraIndices=*/{},
368  /*indexRemap=*/layoutMap,
369  /*extraOperands=*/{},
370  /*symbolOperands=*/{},
371  /*domOpFilter=*/nullptr,
372  /*postDomOpFilter=*/nullptr,
373  /*allowNonDereferencingOps=*/true,
374  /*replaceInDeallocOp=*/true))) {
375  // If it failed (due to escapes for example), bail out. Removing the
376  // temporary argument inserted previously.
377  funcOp.front().eraseArgument(argIndex);
378  continue;
379  }
380 
381  // All uses for the argument with old memref type were replaced
382  // successfully. So we remove the old argument now.
383  funcOp.front().eraseArgument(argIndex + 1);
384  }
385 
386  // Walk over normalizable operations to normalize memrefs of the operation
387  // results. When `op` has memrefs with affine map in the operation results,
388  // new operation containin normalized memrefs is created. Then, the memrefs
389  // are replaced. `CallOp` is skipped here because it is handled in
390  // `updateFunctionSignature()`.
391  funcOp.walk([&](Operation *op) {
393  op->getNumResults() > 0 && !isa<CallOp>(op) && !funcOp.isExternal()) {
394  // Create newOp containing normalized memref in the operation result.
395  Operation *newOp = createOpResultsNormalized(funcOp, op);
396  // When all of the operation results have no memrefs or memrefs without
397  // affine map, `newOp` is the same with `op` and following process is
398  // skipped.
399  if (op != newOp) {
400  bool replacingMemRefUsesFailed = false;
401  for (unsigned resIndex : llvm::seq<unsigned>(0, op->getNumResults())) {
402  // Replace all uses of the old memrefs.
403  Value oldMemRef = op->getResult(resIndex);
404  Value newMemRef = newOp->getResult(resIndex);
405  MemRefType oldMemRefType = oldMemRef.getType().dyn_cast<MemRefType>();
406  // Check whether the operation result is MemRef type.
407  if (!oldMemRefType)
408  continue;
409  MemRefType newMemRefType = newMemRef.getType().cast<MemRefType>();
410  if (oldMemRefType == newMemRefType)
411  continue;
412  // TODO: Assume single layout map. Multiple maps not supported.
413  AffineMap layoutMap = oldMemRefType.getLayout().getAffineMap();
414  if (failed(replaceAllMemRefUsesWith(oldMemRef,
415  /*newMemRef=*/newMemRef,
416  /*extraIndices=*/{},
417  /*indexRemap=*/layoutMap,
418  /*extraOperands=*/{},
419  /*symbolOperands=*/{},
420  /*domOpFilter=*/nullptr,
421  /*postDomOpFilter=*/nullptr,
422  /*allowNonDereferencingOps=*/true,
423  /*replaceInDeallocOp=*/true))) {
424  newOp->erase();
425  replacingMemRefUsesFailed = true;
426  continue;
427  }
428  }
429  if (!replacingMemRefUsesFailed) {
430  // Replace other ops with new op and delete the old op when the
431  // replacement succeeded.
432  op->replaceAllUsesWith(newOp);
433  op->erase();
434  }
435  }
436  }
437  });
438 
439  // In a normal function, memrefs in the return type signature gets normalized
440  // as a result of normalization of functions arguments, AllocOps or CallOps'
441  // result types. Since an external function doesn't have a body, memrefs in
442  // the return type signature can only get normalized by iterating over the
443  // individual return types.
444  if (funcOp.isExternal()) {
445  SmallVector<Type, 4> resultTypes;
446  for (unsigned resIndex :
447  llvm::seq<unsigned>(0, functionType.getNumResults())) {
448  Type resType = functionType.getResult(resIndex);
449  MemRefType memrefType = resType.dyn_cast<MemRefType>();
450  // Check whether result is of MemRef type. Any other argument type can
451  // simply be part of the final function signature.
452  if (!memrefType) {
453  resultTypes.push_back(resType);
454  continue;
455  }
456  // Computing a new memref type after normalizing the old memref to have an
457  // identity map layout.
458  MemRefType newMemRefType = normalizeMemRefType(memrefType, b,
459  /*numSymbolicOperands=*/0);
460  resultTypes.push_back(newMemRefType);
461  }
462 
463  FunctionType newFuncType =
464  FunctionType::get(&getContext(), /*inputs=*/inputTypes,
465  /*results=*/resultTypes);
466  // Setting the new function signature for this external function.
467  funcOp.setType(newFuncType);
468  }
469  updateFunctionSignature(funcOp, moduleOp);
470 }
471 
472 /// Create an operation containing normalized memrefs in the operation results.
473 /// When the results of `oldOp` have memrefs with affine map, the memrefs are
474 /// normalized, and new operation containing them in the operation results is
475 /// returned. If all of the results of `oldOp` have no memrefs or memrefs
476 /// without affine map, `oldOp` is returned without modification.
477 Operation *NormalizeMemRefs::createOpResultsNormalized(FuncOp funcOp,
478  Operation *oldOp) {
479  // Prepare OperationState to create newOp containing normalized memref in
480  // the operation results.
481  OperationState result(oldOp->getLoc(), oldOp->getName());
482  result.addOperands(oldOp->getOperands());
483  result.addAttributes(oldOp->getAttrs());
484  // Add normalized MemRefType to the OperationState.
485  SmallVector<Type, 4> resultTypes;
486  OpBuilder b(funcOp);
487  bool resultTypeNormalized = false;
488  for (unsigned resIndex : llvm::seq<unsigned>(0, oldOp->getNumResults())) {
489  auto resultType = oldOp->getResult(resIndex).getType();
490  MemRefType memrefType = resultType.dyn_cast<MemRefType>();
491  // Check whether the operation result is MemRef type.
492  if (!memrefType) {
493  resultTypes.push_back(resultType);
494  continue;
495  }
496  // Fetch a new memref type after normalizing the old memref.
497  MemRefType newMemRefType = normalizeMemRefType(memrefType, b,
498  /*numSymbolicOperands=*/0);
499  if (newMemRefType == memrefType) {
500  // Either memrefType already had an identity map or the map couldn't
501  // be transformed to an identity map.
502  resultTypes.push_back(memrefType);
503  continue;
504  }
505  resultTypes.push_back(newMemRefType);
506  resultTypeNormalized = true;
507  }
508  result.addTypes(resultTypes);
509  // When all of the results of `oldOp` have no memrefs or memrefs without
510  // affine map, `oldOp` is returned without modification.
511  if (resultTypeNormalized) {
512  OpBuilder bb(oldOp);
513  for (auto &oldRegion : oldOp->getRegions()) {
514  Region *newRegion = result.addRegion();
515  newRegion->takeBody(oldRegion);
516  }
517  return bb.createOperation(result);
518  }
519  return oldOp;
520 }
Include the generated interface declarations.
OpTy create(Location location, Args &&...args)
Create an operation of specific op type at the current insertion point.
Definition: Builders.h:430
This class contains a list of basic blocks and a link to the parent operation it is attached to...
Definition: Region.h:26
Operation is a basic unit of execution within MLIR.
Definition: Operation.h:28
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:423
This is a value defined by a result of an operation.
Definition: Value.h:423
operand_range getOperands()
Returns an iterator on the underlying Value&#39;s.
Definition: Operation.h:247
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Definition: Operation.h:308
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value...
Definition: LogicalResult.h:72
static bool isMemRefNormalizable(Value::user_range opUsers)
Check whether all the uses of oldMemRef are either dereferencing uses or the op is of type : DeallocO...
user_range getUsers() const
Definition: Value.h:212
void takeBody(Region &other)
Takes body of another region (that region will have no body after this operation completes).
Definition: Region.h:237
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type &#39;OpTy&#39;.
Definition: Operation.h:120
LogicalResult normalizeMemRef(memref::AllocOp *op)
Rewrites the memref defined by this alloc op to have an identity layout map and updates all its index...
Definition: Utils.cpp:634
void erase()
Remove this operation from its parent block and delete it.
Definition: Operation.cpp:424
void addOperands(ValueRange newOperands)
U dyn_cast() const
Definition: Types.h:244
Operation * createOperation(const OperationState &state)
Creates an operation given the fields represented as an OperationState.
Definition: Builders.cpp:380
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
Definition: Operation.h:470
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:206
void replaceAllUsesWith(ValuesT &&values)
Replace all uses of results of this operation with the provided &#39;values&#39;.
Definition: Operation.h:154
OpResult getResult(unsigned idx)
Get the &#39;idx&#39;th result of this operation.
Definition: Operation.h:276
static WalkResult advance()
Definition: Visitors.h:51
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:106
This represents an operation in an abstracted form, suitable for use with the builder APIs...
A multi-dimensional affine map Affine map&#39;s are immutable like Type&#39;s, and they are uniqued...
Definition: AffineMap.h:38
static WalkResult interrupt()
Definition: Visitors.h:50
A utility result that is used to signal how to proceed with an ongoing walk:
Definition: Visitors.h:34
This class represents an argument of a Block.
Definition: Value.h:298
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:72
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:84
MemRefType normalizeMemRefType(MemRefType memrefType, OpBuilder builder, unsigned numSymbolicOperands)
Uses the old memref type map layout and computes the new memref type to have a new shape and a layout...
Definition: Utils.cpp:692
Type getType() const
Return the type of this value.
Definition: Value.h:117
void walk(Operation *op, function_ref< void(Region *)> callback, WalkOrder order)
Walk all of the regions, blocks, or operations nested under (and including) the given operation...
Definition: Visitors.cpp:24
std::unique_ptr< OperationPass< ModuleOp > > createNormalizeMemRefsPass()
Creates an interprocedural pass to normalize memrefs to have a trivial (identity) layout map...
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:273
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:57
bool isa() const
Definition: Types.h:234
This class represents a specific symbol use.
Definition: SymbolTable.h:144
This class helps build Operations.
Definition: Builders.h:177
U cast() const
Definition: Types.h:250
LogicalResult replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, ArrayRef< Value > extraIndices={}, AffineMap indexRemap=AffineMap(), ArrayRef< Value > extraOperands={}, ArrayRef< Value > symbolOperands={}, Operation *domOpFilter=nullptr, Operation *postDomOpFilter=nullptr, bool allowNonDereferencingOps=false, bool replaceInDeallocOp=false)
Replaces all "dereferencing" uses of oldMemRef with newMemRef while optionally remapping the old memr...
Definition: Utils.cpp:211