MLIR  16.0.0git
NormalizeMemRefs.cpp
Go to the documentation of this file.
1 //===- NormalizeMemRefs.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass to normalize memrefs to have
10 // identity layout maps.
11 //
12 //===----------------------------------------------------------------------===//
13 
19 #include "llvm/ADT/SmallSet.h"
20 #include "llvm/Support/Debug.h"
21 
22 namespace mlir {
23 namespace memref {
24 #define GEN_PASS_DEF_NORMALIZEMEMREFS
25 #include "mlir/Dialect/MemRef/Transforms/Passes.h.inc"
26 } // namespace memref
27 } // namespace mlir
28 
29 #define DEBUG_TYPE "normalize-memrefs"
30 
31 using namespace mlir;
32 
33 namespace {
34 
35 /// All memrefs passed across functions with non-trivial layout maps are
36 /// converted to ones with trivial identity layout ones.
37 /// If all the memref types/uses in a function are normalizable, we treat
38 /// such functions as normalizable. Also, if a normalizable function is known
39 /// to call a non-normalizable function, we treat that function as
40 /// non-normalizable as well. We assume external functions to be normalizable.
41 struct NormalizeMemRefs
42  : public memref::impl::NormalizeMemRefsBase<NormalizeMemRefs> {
43  void runOnOperation() override;
44  void normalizeFuncOpMemRefs(func::FuncOp funcOp, ModuleOp moduleOp);
45  bool areMemRefsNormalizable(func::FuncOp funcOp);
46  void updateFunctionSignature(func::FuncOp funcOp, ModuleOp moduleOp);
47  void setCalleesAndCallersNonNormalizable(
48  func::FuncOp funcOp, ModuleOp moduleOp,
49  DenseSet<func::FuncOp> &normalizableFuncs);
50  Operation *createOpResultsNormalized(func::FuncOp funcOp, Operation *oldOp);
51 };
52 
53 } // namespace
54 
55 std::unique_ptr<OperationPass<ModuleOp>>
57  return std::make_unique<NormalizeMemRefs>();
58 }
59 
60 void NormalizeMemRefs::runOnOperation() {
61  LLVM_DEBUG(llvm::dbgs() << "Normalizing Memrefs...\n");
62  ModuleOp moduleOp = getOperation();
63  // We maintain all normalizable FuncOps in a DenseSet. It is initialized
64  // with all the functions within a module and then functions which are not
65  // normalizable are removed from this set.
66  // TODO: Change this to work on FuncLikeOp once there is an operation
67  // interface for it.
68  DenseSet<func::FuncOp> normalizableFuncs;
69  // Initialize `normalizableFuncs` with all the functions within a module.
70  moduleOp.walk([&](func::FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
71 
72  // Traverse through all the functions applying a filter which determines
73  // whether that function is normalizable or not. All callers/callees of
74  // a non-normalizable function will also become non-normalizable even if
75  // they aren't passing any or specific non-normalizable memrefs. So,
76  // functions which calls or get called by a non-normalizable becomes non-
77  // normalizable functions themselves.
78  moduleOp.walk([&](func::FuncOp funcOp) {
79  if (normalizableFuncs.contains(funcOp)) {
80  if (!areMemRefsNormalizable(funcOp)) {
81  LLVM_DEBUG(llvm::dbgs()
82  << "@" << funcOp.getName()
83  << " contains ops that cannot normalize MemRefs\n");
84  // Since this function is not normalizable, we set all the caller
85  // functions and the callees of this function as not normalizable.
86  // TODO: Drop this conservative assumption in the future.
87  setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
88  normalizableFuncs);
89  }
90  }
91  });
92 
93  LLVM_DEBUG(llvm::dbgs() << "Normalizing " << normalizableFuncs.size()
94  << " functions\n");
95  // Those functions which can be normalized are subjected to normalization.
96  for (func::FuncOp &funcOp : normalizableFuncs)
97  normalizeFuncOpMemRefs(funcOp, moduleOp);
98 }
99 
100 /// Check whether all the uses of oldMemRef are either dereferencing uses or the
101 /// op is of type : DeallocOp, CallOp or ReturnOp. Only if these constraints
102 /// are satisfied will the value become a candidate for replacement.
103 /// TODO: Extend this for DimOps.
105  return llvm::all_of(opUsers, [](Operation *op) {
107  });
108 }
109 
110 /// Set all the calling functions and the callees of the function as not
111 /// normalizable.
112 void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
113  func::FuncOp funcOp, ModuleOp moduleOp,
114  DenseSet<func::FuncOp> &normalizableFuncs) {
115  if (!normalizableFuncs.contains(funcOp))
116  return;
117 
118  LLVM_DEBUG(
119  llvm::dbgs() << "@" << funcOp.getName()
120  << " calls or is called by non-normalizable function\n");
121  normalizableFuncs.erase(funcOp);
122  // Caller of the function.
123  Optional<SymbolTable::UseRange> symbolUses = funcOp.getSymbolUses(moduleOp);
124  for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
125  // TODO: Extend this for ops that are FunctionOpInterface. This would
126  // require creating an OpInterface for FunctionOpInterface ops.
127  func::FuncOp parentFuncOp =
128  symbolUse.getUser()->getParentOfType<func::FuncOp>();
129  for (func::FuncOp &funcOp : normalizableFuncs) {
130  if (parentFuncOp == funcOp) {
131  setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
132  normalizableFuncs);
133  break;
134  }
135  }
136  }
137 
138  // Functions called by this function.
139  funcOp.walk([&](func::CallOp callOp) {
140  StringAttr callee = callOp.getCalleeAttr().getAttr();
141  for (func::FuncOp &funcOp : normalizableFuncs) {
142  // We compare func::FuncOp and callee's name.
143  if (callee == funcOp.getNameAttr()) {
144  setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
145  normalizableFuncs);
146  break;
147  }
148  }
149  });
150 }
151 
152 /// Check whether all the uses of AllocOps, CallOps and function arguments of a
153 /// function are either of dereferencing type or are uses in: DeallocOp, CallOp
154 /// or ReturnOp. Only if these constraints are satisfied will the function
155 /// become a candidate for normalization. When the uses of a memref are
156 /// non-normalizable and the memref map layout is trivial (identity), we can
157 /// still label the entire function as normalizable. We assume external
158 /// functions to be normalizable.
159 bool NormalizeMemRefs::areMemRefsNormalizable(func::FuncOp funcOp) {
160  // We assume external functions to be normalizable.
161  if (funcOp.isExternal())
162  return true;
163 
164  if (funcOp
165  .walk([&](memref::AllocOp allocOp) -> WalkResult {
166  Value oldMemRef = allocOp.getResult();
167  if (!allocOp.getType().getLayout().isIdentity() &&
168  !isMemRefNormalizable(oldMemRef.getUsers()))
169  return WalkResult::interrupt();
170  return WalkResult::advance();
171  })
172  .wasInterrupted())
173  return false;
174 
175  if (funcOp
176  .walk([&](func::CallOp callOp) -> WalkResult {
177  for (unsigned resIndex :
178  llvm::seq<unsigned>(0, callOp.getNumResults())) {
179  Value oldMemRef = callOp.getResult(resIndex);
180  if (auto oldMemRefType =
181  oldMemRef.getType().dyn_cast<MemRefType>())
182  if (!oldMemRefType.getLayout().isIdentity() &&
183  !isMemRefNormalizable(oldMemRef.getUsers()))
184  return WalkResult::interrupt();
185  }
186  return WalkResult::advance();
187  })
188  .wasInterrupted())
189  return false;
190 
191  for (unsigned argIndex : llvm::seq<unsigned>(0, funcOp.getNumArguments())) {
192  BlockArgument oldMemRef = funcOp.getArgument(argIndex);
193  if (auto oldMemRefType = oldMemRef.getType().dyn_cast<MemRefType>())
194  if (!oldMemRefType.getLayout().isIdentity() &&
195  !isMemRefNormalizable(oldMemRef.getUsers()))
196  return false;
197  }
198 
199  return true;
200 }
201 
202 /// Fetch the updated argument list and result of the function and update the
203 /// function signature. This updates the function's return type at the caller
204 /// site and in case the return type is a normalized memref then it updates
205 /// the calling function's signature.
206 /// TODO: An update to the calling function signature is required only if the
207 /// returned value is in turn used in ReturnOp of the calling function.
208 void NormalizeMemRefs::updateFunctionSignature(func::FuncOp funcOp,
209  ModuleOp moduleOp) {
210  FunctionType functionType = funcOp.getFunctionType();
211  SmallVector<Type, 4> resultTypes;
212  FunctionType newFuncType;
213  resultTypes = llvm::to_vector<4>(functionType.getResults());
214 
215  // External function's signature was already updated in
216  // 'normalizeFuncOpMemRefs()'.
217  if (!funcOp.isExternal()) {
218  SmallVector<Type, 8> argTypes;
219  for (const auto &argEn : llvm::enumerate(funcOp.getArguments()))
220  argTypes.push_back(argEn.value().getType());
221 
222  // Traverse ReturnOps to check if an update to the return type in the
223  // function signature is required.
224  funcOp.walk([&](func::ReturnOp returnOp) {
225  for (const auto &operandEn : llvm::enumerate(returnOp.getOperands())) {
226  Type opType = operandEn.value().getType();
227  MemRefType memrefType = opType.dyn_cast<MemRefType>();
228  // If type is not memref or if the memref type is same as that in
229  // function's return signature then no update is required.
230  if (!memrefType || memrefType == resultTypes[operandEn.index()])
231  continue;
232  // Update function's return type signature.
233  // Return type gets normalized either as a result of function argument
234  // normalization, AllocOp normalization or an update made at CallOp.
235  // There can be many call flows inside a function and an update to a
236  // specific ReturnOp has not yet been made. So we check that the result
237  // memref type is normalized.
238  // TODO: When selective normalization is implemented, handle multiple
239  // results case where some are normalized, some aren't.
240  if (memrefType.getLayout().isIdentity())
241  resultTypes[operandEn.index()] = memrefType;
242  }
243  });
244 
245  // We create a new function type and modify the function signature with this
246  // new type.
247  newFuncType = FunctionType::get(&getContext(), /*inputs=*/argTypes,
248  /*results=*/resultTypes);
249  }
250 
251  // Since we update the function signature, it might affect the result types at
252  // the caller site. Since this result might even be used by the caller
253  // function in ReturnOps, the caller function's signature will also change.
254  // Hence we record the caller function in 'funcOpsToUpdate' to update their
255  // signature as well.
256  llvm::SmallDenseSet<func::FuncOp, 8> funcOpsToUpdate;
257  // We iterate over all symbolic uses of the function and update the return
258  // type at the caller site.
259  Optional<SymbolTable::UseRange> symbolUses = funcOp.getSymbolUses(moduleOp);
260  for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
261  Operation *userOp = symbolUse.getUser();
262  OpBuilder builder(userOp);
263  // When `userOp` can not be casted to `CallOp`, it is skipped. This assumes
264  // that the non-CallOp has no memrefs to be replaced.
265  // TODO: Handle cases where a non-CallOp symbol use of a function deals with
266  // memrefs.
267  auto callOp = dyn_cast<func::CallOp>(userOp);
268  if (!callOp)
269  continue;
270  Operation *newCallOp =
271  builder.create<func::CallOp>(userOp->getLoc(), callOp.getCalleeAttr(),
272  resultTypes, userOp->getOperands());
273  bool replacingMemRefUsesFailed = false;
274  bool returnTypeChanged = false;
275  for (unsigned resIndex : llvm::seq<unsigned>(0, userOp->getNumResults())) {
276  OpResult oldResult = userOp->getResult(resIndex);
277  OpResult newResult = newCallOp->getResult(resIndex);
278  // This condition ensures that if the result is not of type memref or if
279  // the resulting memref was already having a trivial map layout then we
280  // need not perform any use replacement here.
281  if (oldResult.getType() == newResult.getType())
282  continue;
283  AffineMap layoutMap =
284  oldResult.getType().cast<MemRefType>().getLayout().getAffineMap();
285  if (failed(replaceAllMemRefUsesWith(oldResult, /*newMemRef=*/newResult,
286  /*extraIndices=*/{},
287  /*indexRemap=*/layoutMap,
288  /*extraOperands=*/{},
289  /*symbolOperands=*/{},
290  /*domOpFilter=*/nullptr,
291  /*postDomOpFilter=*/nullptr,
292  /*allowNonDereferencingOps=*/true,
293  /*replaceInDeallocOp=*/true))) {
294  // If it failed (due to escapes for example), bail out.
295  // It should never hit this part of the code because it is called by
296  // only those functions which are normalizable.
297  newCallOp->erase();
298  replacingMemRefUsesFailed = true;
299  break;
300  }
301  returnTypeChanged = true;
302  }
303  if (replacingMemRefUsesFailed)
304  continue;
305  // Replace all uses for other non-memref result types.
306  userOp->replaceAllUsesWith(newCallOp);
307  userOp->erase();
308  if (returnTypeChanged) {
309  // Since the return type changed it might lead to a change in function's
310  // signature.
311  // TODO: If funcOp doesn't return any memref type then no need to update
312  // signature.
313  // TODO: Further optimization - Check if the memref is indeed part of
314  // ReturnOp at the parentFuncOp and only then updation of signature is
315  // required.
316  // TODO: Extend this for ops that are FunctionOpInterface. This would
317  // require creating an OpInterface for FunctionOpInterface ops.
318  func::FuncOp parentFuncOp = newCallOp->getParentOfType<func::FuncOp>();
319  funcOpsToUpdate.insert(parentFuncOp);
320  }
321  }
322  // Because external function's signature is already updated in
323  // 'normalizeFuncOpMemRefs()', we don't need to update it here again.
324  if (!funcOp.isExternal())
325  funcOp.setType(newFuncType);
326 
327  // Updating the signature type of those functions which call the current
328  // function. Only if the return type of the current function has a normalized
329  // memref will the caller function become a candidate for signature update.
330  for (func::FuncOp parentFuncOp : funcOpsToUpdate)
331  updateFunctionSignature(parentFuncOp, moduleOp);
332 }
333 
334 /// Normalizes the memrefs within a function which includes those arising as a
335 /// result of AllocOps, CallOps and function's argument. The ModuleOp argument
336 /// is used to help update function's signature after normalization.
337 void NormalizeMemRefs::normalizeFuncOpMemRefs(func::FuncOp funcOp,
338  ModuleOp moduleOp) {
339  // Turn memrefs' non-identity layouts maps into ones with identity. Collect
340  // alloc ops first and then process since normalizeMemRef replaces/erases ops
341  // during memref rewriting.
343  funcOp.walk([&](memref::AllocOp op) { allocOps.push_back(op); });
344  for (memref::AllocOp allocOp : allocOps)
345  (void)normalizeMemRef(&allocOp);
346 
347  // We use this OpBuilder to create new memref layout later.
348  OpBuilder b(funcOp);
349 
350  FunctionType functionType = funcOp.getFunctionType();
351  SmallVector<Location> functionArgLocs(llvm::map_range(
352  funcOp.getArguments(), [](BlockArgument arg) { return arg.getLoc(); }));
353  SmallVector<Type, 8> inputTypes;
354  // Walk over each argument of a function to perform memref normalization (if
355  for (unsigned argIndex :
356  llvm::seq<unsigned>(0, functionType.getNumInputs())) {
357  Type argType = functionType.getInput(argIndex);
358  MemRefType memrefType = argType.dyn_cast<MemRefType>();
359  // Check whether argument is of MemRef type. Any other argument type can
360  // simply be part of the final function signature.
361  if (!memrefType) {
362  inputTypes.push_back(argType);
363  continue;
364  }
365  // Fetch a new memref type after normalizing the old memref to have an
366  // identity map layout.
367  MemRefType newMemRefType = normalizeMemRefType(memrefType,
368  /*numSymbolicOperands=*/0);
369  if (newMemRefType == memrefType || funcOp.isExternal()) {
370  // Either memrefType already had an identity map or the map couldn't be
371  // transformed to an identity map.
372  inputTypes.push_back(newMemRefType);
373  continue;
374  }
375 
376  // Insert a new temporary argument with the new memref type.
377  BlockArgument newMemRef = funcOp.front().insertArgument(
378  argIndex, newMemRefType, functionArgLocs[argIndex]);
379  BlockArgument oldMemRef = funcOp.getArgument(argIndex + 1);
380  AffineMap layoutMap = memrefType.getLayout().getAffineMap();
381  // Replace all uses of the old memref.
382  if (failed(replaceAllMemRefUsesWith(oldMemRef, /*newMemRef=*/newMemRef,
383  /*extraIndices=*/{},
384  /*indexRemap=*/layoutMap,
385  /*extraOperands=*/{},
386  /*symbolOperands=*/{},
387  /*domOpFilter=*/nullptr,
388  /*postDomOpFilter=*/nullptr,
389  /*allowNonDereferencingOps=*/true,
390  /*replaceInDeallocOp=*/true))) {
391  // If it failed (due to escapes for example), bail out. Removing the
392  // temporary argument inserted previously.
393  funcOp.front().eraseArgument(argIndex);
394  continue;
395  }
396 
397  // All uses for the argument with old memref type were replaced
398  // successfully. So we remove the old argument now.
399  funcOp.front().eraseArgument(argIndex + 1);
400  }
401 
402  // Walk over normalizable operations to normalize memrefs of the operation
403  // results. When `op` has memrefs with affine map in the operation results,
404  // new operation containin normalized memrefs is created. Then, the memrefs
405  // are replaced. `CallOp` is skipped here because it is handled in
406  // `updateFunctionSignature()`.
407  funcOp.walk([&](Operation *op) {
409  op->getNumResults() > 0 && !isa<func::CallOp>(op) &&
410  !funcOp.isExternal()) {
411  // Create newOp containing normalized memref in the operation result.
412  Operation *newOp = createOpResultsNormalized(funcOp, op);
413  // When all of the operation results have no memrefs or memrefs without
414  // affine map, `newOp` is the same with `op` and following process is
415  // skipped.
416  if (op != newOp) {
417  bool replacingMemRefUsesFailed = false;
418  for (unsigned resIndex : llvm::seq<unsigned>(0, op->getNumResults())) {
419  // Replace all uses of the old memrefs.
420  Value oldMemRef = op->getResult(resIndex);
421  Value newMemRef = newOp->getResult(resIndex);
422  MemRefType oldMemRefType = oldMemRef.getType().dyn_cast<MemRefType>();
423  // Check whether the operation result is MemRef type.
424  if (!oldMemRefType)
425  continue;
426  MemRefType newMemRefType = newMemRef.getType().cast<MemRefType>();
427  if (oldMemRefType == newMemRefType)
428  continue;
429  // TODO: Assume single layout map. Multiple maps not supported.
430  AffineMap layoutMap = oldMemRefType.getLayout().getAffineMap();
431  if (failed(replaceAllMemRefUsesWith(oldMemRef,
432  /*newMemRef=*/newMemRef,
433  /*extraIndices=*/{},
434  /*indexRemap=*/layoutMap,
435  /*extraOperands=*/{},
436  /*symbolOperands=*/{},
437  /*domOpFilter=*/nullptr,
438  /*postDomOpFilter=*/nullptr,
439  /*allowNonDereferencingOps=*/true,
440  /*replaceInDeallocOp=*/true))) {
441  newOp->erase();
442  replacingMemRefUsesFailed = true;
443  continue;
444  }
445  }
446  if (!replacingMemRefUsesFailed) {
447  // Replace other ops with new op and delete the old op when the
448  // replacement succeeded.
449  op->replaceAllUsesWith(newOp);
450  op->erase();
451  }
452  }
453  }
454  });
455 
456  // In a normal function, memrefs in the return type signature gets normalized
457  // as a result of normalization of functions arguments, AllocOps or CallOps'
458  // result types. Since an external function doesn't have a body, memrefs in
459  // the return type signature can only get normalized by iterating over the
460  // individual return types.
461  if (funcOp.isExternal()) {
462  SmallVector<Type, 4> resultTypes;
463  for (unsigned resIndex :
464  llvm::seq<unsigned>(0, functionType.getNumResults())) {
465  Type resType = functionType.getResult(resIndex);
466  MemRefType memrefType = resType.dyn_cast<MemRefType>();
467  // Check whether result is of MemRef type. Any other argument type can
468  // simply be part of the final function signature.
469  if (!memrefType) {
470  resultTypes.push_back(resType);
471  continue;
472  }
473  // Computing a new memref type after normalizing the old memref to have an
474  // identity map layout.
475  MemRefType newMemRefType = normalizeMemRefType(memrefType,
476  /*numSymbolicOperands=*/0);
477  resultTypes.push_back(newMemRefType);
478  }
479 
480  FunctionType newFuncType =
481  FunctionType::get(&getContext(), /*inputs=*/inputTypes,
482  /*results=*/resultTypes);
483  // Setting the new function signature for this external function.
484  funcOp.setType(newFuncType);
485  }
486  updateFunctionSignature(funcOp, moduleOp);
487 }
488 
489 /// Create an operation containing normalized memrefs in the operation results.
490 /// When the results of `oldOp` have memrefs with affine map, the memrefs are
491 /// normalized, and new operation containing them in the operation results is
492 /// returned. If all of the results of `oldOp` have no memrefs or memrefs
493 /// without affine map, `oldOp` is returned without modification.
494 Operation *NormalizeMemRefs::createOpResultsNormalized(func::FuncOp funcOp,
495  Operation *oldOp) {
496  // Prepare OperationState to create newOp containing normalized memref in
497  // the operation results.
498  OperationState result(oldOp->getLoc(), oldOp->getName());
499  result.addOperands(oldOp->getOperands());
500  result.addAttributes(oldOp->getAttrs());
501  // Add normalized MemRefType to the OperationState.
502  SmallVector<Type, 4> resultTypes;
503  OpBuilder b(funcOp);
504  bool resultTypeNormalized = false;
505  for (unsigned resIndex : llvm::seq<unsigned>(0, oldOp->getNumResults())) {
506  auto resultType = oldOp->getResult(resIndex).getType();
507  MemRefType memrefType = resultType.dyn_cast<MemRefType>();
508  // Check whether the operation result is MemRef type.
509  if (!memrefType) {
510  resultTypes.push_back(resultType);
511  continue;
512  }
513  // Fetch a new memref type after normalizing the old memref.
514  MemRefType newMemRefType = normalizeMemRefType(memrefType,
515  /*numSymbolicOperands=*/0);
516  if (newMemRefType == memrefType) {
517  // Either memrefType already had an identity map or the map couldn't
518  // be transformed to an identity map.
519  resultTypes.push_back(memrefType);
520  continue;
521  }
522  resultTypes.push_back(newMemRefType);
523  resultTypeNormalized = true;
524  }
525  result.addTypes(resultTypes);
526  // When all of the results of `oldOp` have no memrefs or memrefs without
527  // affine map, `oldOp` is returned without modification.
528  if (resultTypeNormalized) {
529  OpBuilder bb(oldOp);
530  for (auto &oldRegion : oldOp->getRegions()) {
531  Region *newRegion = result.addRegion();
532  newRegion->takeBody(oldRegion);
533  }
534  return bb.create(result);
535  }
536  return oldOp;
537 }
static bool isMemRefNormalizable(Value::user_range opUsers)
Check whether all the uses of oldMemRef are either dereferencing uses or the op is of type : DeallocO...
A multi-dimensional affine map Affine map's are immutable like Type's, and they are uniqued.
Definition: AffineMap.h:42
This class represents an argument of a Block.
Definition: Value.h:296
This class helps build Operations.
Definition: Builders.h:198
This is a value defined by a result of an operation.
Definition: Value.h:442
Operation is a basic unit of execution within MLIR.
Definition: Operation.h:31
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
Definition: Operation.h:528
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition: Operation.h:324
Location getLoc()
The source location the operation was defined or derived from.
Definition: Operation.h:154
static Operation * create(Location location, OperationName name, TypeRange resultTypes, ValueRange operands, NamedAttrList &&attributes, BlockRange successors, unsigned numRegions)
Create a new Operation with the specific fields.
Definition: Operation.cpp:49
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Definition: Operation.h:356
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition: Operation.h:169
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition: Operation.h:480
OperationName getName()
The name of an operation is the key identifier for it.
Definition: Operation.h:50
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition: Operation.h:295
void replaceAllUsesWith(ValuesT &&values)
Replace all uses of results of this operation with the provided 'values'.
Definition: Operation.h:203
void erase()
Remove this operation from its parent block and delete it.
Definition: Operation.cpp:418
unsigned getNumResults()
Return the number of results held by this operation.
Definition: Operation.h:321
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition: Region.h:26
void takeBody(Region &other)
Takes body of another region (that region will have no body after this operation completes).
Definition: Region.h:242
This class represents a specific symbol use.
Definition: SymbolTable.h:147
Instances of the Type class are uniqued, have an immutable identifier and an optional mutable compone...
Definition: Types.h:74
U cast() const
Definition: Types.h:280
U dyn_cast() const
Definition: Types.h:270
This class represents an instance of an SSA value in the MLIR system, representing a computable value...
Definition: Value.h:85
Type getType() const
Return the type of this value.
Definition: Value.h:114
user_range getUsers() const
Definition: Value.h:209
A utility result that is used to signal how to proceed with an ongoing walk:
Definition: Visitors.h:34
void walk(Operation *op, function_ref< void(Region *)> callback, WalkOrder order)
Walk all of the regions, blocks, or operations nested under (and including) the given operation.
Definition: Visitors.cpp:24
constexpr void enumerate(std::tuple< Tys... > &tuple, CallbackT &&callback)
Definition: Matchers.h:230
std::unique_ptr< OperationPass< ModuleOp > > createNormalizeMemRefsPass()
Creates an interprocedural pass to normalize memrefs to have a trivial (identity) layout map.
Include the generated interface declarations.
LogicalResult replaceAllMemRefUsesWith(Value oldMemRef, Value newMemRef, ArrayRef< Value > extraIndices={}, AffineMap indexRemap=AffineMap(), ArrayRef< Value > extraOperands={}, ArrayRef< Value > symbolOperands={}, Operation *domOpFilter=nullptr, Operation *postDomOpFilter=nullptr, bool allowNonDereferencingOps=false, bool replaceInDeallocOp=false)
Replaces all "dereferencing" uses of oldMemRef with newMemRef while optionally remapping the old memr...
Definition: Utils.cpp:1261
LogicalResult normalizeMemRef(memref::AllocOp *op)
Rewrites the memref defined by this alloc op to have an identity layout map and updates all its index...
Definition: Utils.cpp:1685
MemRefType normalizeMemRefType(MemRefType memrefType, unsigned numSymbolicOperands)
Normalizes memrefType so that the affine layout map of the memref is transformed to an identity map w...
Definition: Utils.cpp:1743
bool failed(LogicalResult result)
Utility function that returns true if the provided LogicalResult corresponds to a failure value.
Definition: LogicalResult.h:72
This represents an operation in an abstracted form, suitable for use with the builder APIs.