MLIR 22.0.0git
NormalizeMemRefs.cpp
Go to the documentation of this file.
1//===- NormalizeMemRefs.cpp -----------------------------------------------===//
2//
3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4// See https://llvm.org/LICENSE.txt for license information.
5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6//
7//===----------------------------------------------------------------------===//
8//
9// This file implements an interprocedural pass to normalize memrefs to have
10// identity layout maps.
11//
12//===----------------------------------------------------------------------===//
13
18#include "llvm/Support/Debug.h"
19
20namespace mlir {
21namespace memref {
22#define GEN_PASS_DEF_NORMALIZEMEMREFSPASS
23#include "mlir/Dialect/MemRef/Transforms/Passes.h.inc"
24} // namespace memref
25} // namespace mlir
26
27#define DEBUG_TYPE "normalize-memrefs"
28
29using namespace mlir;
30using namespace mlir::affine;
31using namespace mlir::memref;
32
33namespace {
34
35/// All memrefs passed across functions with non-trivial layout maps are
36/// converted to ones with trivial identity layout ones.
37/// If all the memref types/uses in a function are normalizable, we treat
38/// such functions as normalizable. Also, if a normalizable function is known
39/// to call a non-normalizable function, we treat that function as
40/// non-normalizable as well. We assume external functions to be normalizable.
41struct NormalizeMemRefs
42 : public memref::impl::NormalizeMemRefsPassBase<NormalizeMemRefs> {
43 void runOnOperation() override;
44 void normalizeFuncOpMemRefs(func::FuncOp funcOp, ModuleOp moduleOp);
45 bool areMemRefsNormalizable(func::FuncOp funcOp);
46 void updateFunctionSignature(func::FuncOp funcOp, ModuleOp moduleOp);
47 void setCalleesAndCallersNonNormalizable(
48 func::FuncOp funcOp, ModuleOp moduleOp,
49 DenseSet<func::FuncOp> &normalizableFuncs);
50 Operation *createOpResultsNormalized(func::FuncOp funcOp, Operation *oldOp);
51};
52
53} // namespace
54
55void NormalizeMemRefs::runOnOperation() {
56 LLVM_DEBUG(llvm::dbgs() << "Normalizing Memrefs...\n");
57 ModuleOp moduleOp = getOperation();
58 // We maintain all normalizable FuncOps in a DenseSet. It is initialized
59 // with all the functions within a module and then functions which are not
60 // normalizable are removed from this set.
61 // TODO: Change this to work on FuncLikeOp once there is an operation
62 // interface for it.
63 DenseSet<func::FuncOp> normalizableFuncs;
64 // Initialize `normalizableFuncs` with all the functions within a module.
65 moduleOp.walk([&](func::FuncOp funcOp) { normalizableFuncs.insert(funcOp); });
66
67 // Traverse through all the functions applying a filter which determines
68 // whether that function is normalizable or not. All callers/callees of
69 // a non-normalizable function will also become non-normalizable even if
70 // they aren't passing any or specific non-normalizable memrefs. So,
71 // functions which calls or get called by a non-normalizable becomes non-
72 // normalizable functions themselves.
73 moduleOp.walk([&](func::FuncOp funcOp) {
74 if (normalizableFuncs.contains(funcOp)) {
75 if (!areMemRefsNormalizable(funcOp)) {
76 LLVM_DEBUG(llvm::dbgs()
77 << "@" << funcOp.getName()
78 << " contains ops that cannot normalize MemRefs\n");
79 // Since this function is not normalizable, we set all the caller
80 // functions and the callees of this function as not normalizable.
81 // TODO: Drop this conservative assumption in the future.
82 setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
83 normalizableFuncs);
84 }
85 }
86 });
87
88 LLVM_DEBUG(llvm::dbgs() << "Normalizing " << normalizableFuncs.size()
89 << " functions\n");
90 // Those functions which can be normalized are subjected to normalization.
91 for (func::FuncOp &funcOp : normalizableFuncs)
92 normalizeFuncOpMemRefs(funcOp, moduleOp);
93}
94
95/// Check whether all the uses of oldMemRef are either dereferencing uses or the
96/// op is of type : DeallocOp, CallOp or ReturnOp. Only if these constraints
97/// are satisfied will the value become a candidate for replacement.
98/// TODO: Extend this for DimOps.
100 return llvm::all_of(opUsers, [](Operation *op) {
102 });
103}
104
105/// Set all the calling functions and the callees of the function as not
106/// normalizable.
107void NormalizeMemRefs::setCalleesAndCallersNonNormalizable(
108 func::FuncOp funcOp, ModuleOp moduleOp,
109 DenseSet<func::FuncOp> &normalizableFuncs) {
110 if (!normalizableFuncs.contains(funcOp))
111 return;
112
113 LLVM_DEBUG(
114 llvm::dbgs() << "@" << funcOp.getName()
115 << " calls or is called by non-normalizable function\n");
116 normalizableFuncs.erase(funcOp);
117 // Caller of the function.
118 std::optional<SymbolTable::UseRange> symbolUses =
119 funcOp.getSymbolUses(moduleOp);
120 for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
121 // TODO: Extend this for ops that are FunctionOpInterface. This would
122 // require creating an OpInterface for FunctionOpInterface ops.
123 func::FuncOp parentFuncOp =
124 symbolUse.getUser()->getParentOfType<func::FuncOp>();
125 for (func::FuncOp &funcOp : normalizableFuncs) {
126 if (parentFuncOp == funcOp) {
127 setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
128 normalizableFuncs);
129 break;
130 }
131 }
132 }
133
134 // Functions called by this function.
135 funcOp.walk([&](func::CallOp callOp) {
136 StringAttr callee = callOp.getCalleeAttr().getAttr();
137 for (func::FuncOp &funcOp : normalizableFuncs) {
138 // We compare func::FuncOp and callee's name.
139 if (callee == funcOp.getNameAttr()) {
140 setCalleesAndCallersNonNormalizable(funcOp, moduleOp,
141 normalizableFuncs);
142 break;
143 }
144 }
145 });
146}
147
148/// Check whether all the uses of AllocOps, AllocaOps, CallOps and function
149/// arguments of a function are either of dereferencing type or are uses in:
150/// DeallocOp, CallOp or ReturnOp. Only if these constraints are satisfied will
151/// the function become a candidate for normalization. When the uses of a memref
152/// are non-normalizable and the memref map layout is trivial (identity), we can
153/// still label the entire function as normalizable. We assume external
154/// functions to be normalizable.
155bool NormalizeMemRefs::areMemRefsNormalizable(func::FuncOp funcOp) {
156 // We assume external functions to be normalizable.
157 if (funcOp.isExternal())
158 return true;
159
160 if (funcOp
161 .walk([&](AllocOp allocOp) -> WalkResult {
162 Value oldMemRef = allocOp.getResult();
163 if (!allocOp.getType().getLayout().isIdentity() &&
164 !isMemRefNormalizable(oldMemRef.getUsers()))
165 return WalkResult::interrupt();
166 return WalkResult::advance();
167 })
168 .wasInterrupted())
169 return false;
170
171 if (funcOp
172 .walk([&](AllocaOp allocaOp) -> WalkResult {
173 Value oldMemRef = allocaOp.getResult();
174 if (!allocaOp.getType().getLayout().isIdentity() &&
175 !isMemRefNormalizable(oldMemRef.getUsers()))
176 return WalkResult::interrupt();
177 return WalkResult::advance();
178 })
179 .wasInterrupted())
180 return false;
181
182 if (funcOp
183 .walk([&](func::CallOp callOp) -> WalkResult {
184 for (unsigned resIndex :
185 llvm::seq<unsigned>(0, callOp.getNumResults())) {
186 Value oldMemRef = callOp.getResult(resIndex);
187 if (auto oldMemRefType =
188 dyn_cast<MemRefType>(oldMemRef.getType()))
189 if (!oldMemRefType.getLayout().isIdentity() &&
190 !isMemRefNormalizable(oldMemRef.getUsers()))
191 return WalkResult::interrupt();
192 }
193 return WalkResult::advance();
194 })
195 .wasInterrupted())
196 return false;
197
198 for (unsigned argIndex : llvm::seq<unsigned>(0, funcOp.getNumArguments())) {
199 BlockArgument oldMemRef = funcOp.getArgument(argIndex);
200 if (auto oldMemRefType = dyn_cast<MemRefType>(oldMemRef.getType()))
201 if (!oldMemRefType.getLayout().isIdentity() &&
202 !isMemRefNormalizable(oldMemRef.getUsers()))
203 return false;
204 }
205
206 return true;
207}
208
209/// Fetch the updated argument list and result of the function and update the
210/// function signature. This updates the function's return type at the caller
211/// site and in case the return type is a normalized memref then it updates
212/// the calling function's signature.
213/// TODO: An update to the calling function signature is required only if the
214/// returned value is in turn used in ReturnOp of the calling function.
215void NormalizeMemRefs::updateFunctionSignature(func::FuncOp funcOp,
216 ModuleOp moduleOp) {
217 FunctionType functionType = funcOp.getFunctionType();
218 SmallVector<Type, 4> resultTypes;
219 FunctionType newFuncType;
220 resultTypes = llvm::to_vector<4>(functionType.getResults());
221
222 // External function's signature was already updated in
223 // 'normalizeFuncOpMemRefs()'.
224 if (!funcOp.isExternal()) {
225 SmallVector<Type, 8> argTypes;
226 for (const auto &argEn : llvm::enumerate(funcOp.getArguments()))
227 argTypes.push_back(argEn.value().getType());
228
229 // Traverse ReturnOps to check if an update to the return type in the
230 // function signature is required.
231 funcOp.walk([&](func::ReturnOp returnOp) {
232 for (const auto &operandEn : llvm::enumerate(returnOp.getOperands())) {
233 Type opType = operandEn.value().getType();
234 MemRefType memrefType = dyn_cast<MemRefType>(opType);
235 // If type is not memref or if the memref type is same as that in
236 // function's return signature then no update is required.
237 if (!memrefType || memrefType == resultTypes[operandEn.index()])
238 continue;
239 // Update function's return type signature.
240 // Return type gets normalized either as a result of function argument
241 // normalization, AllocOp normalization or an update made at CallOp.
242 // There can be many call flows inside a function and an update to a
243 // specific ReturnOp has not yet been made. So we check that the result
244 // memref type is normalized.
245 // TODO: When selective normalization is implemented, handle multiple
246 // results case where some are normalized, some aren't.
247 if (memrefType.getLayout().isIdentity())
248 resultTypes[operandEn.index()] = memrefType;
249 }
250 });
251
252 // We create a new function type and modify the function signature with this
253 // new type.
254 newFuncType = FunctionType::get(&getContext(), /*inputs=*/argTypes,
255 /*results=*/resultTypes);
256 }
257
258 // Since we update the function signature, it might affect the result types at
259 // the caller site. Since this result might even be used by the caller
260 // function in ReturnOps, the caller function's signature will also change.
261 // Hence we record the caller function in 'funcOpsToUpdate' to update their
262 // signature as well.
263 llvm::SmallDenseSet<func::FuncOp, 8> funcOpsToUpdate;
264 // We iterate over all symbolic uses of the function and update the return
265 // type at the caller site.
266 std::optional<SymbolTable::UseRange> symbolUses =
267 funcOp.getSymbolUses(moduleOp);
268 for (SymbolTable::SymbolUse symbolUse : *symbolUses) {
269 Operation *userOp = symbolUse.getUser();
270 OpBuilder builder(userOp);
271 // When `userOp` can not be casted to `CallOp`, it is skipped. This assumes
272 // that the non-CallOp has no memrefs to be replaced.
273 // TODO: Handle cases where a non-CallOp symbol use of a function deals with
274 // memrefs.
275 auto callOp = dyn_cast<func::CallOp>(userOp);
276 if (!callOp)
277 continue;
278 Operation *newCallOp =
279 func::CallOp::create(builder, userOp->getLoc(), callOp.getCalleeAttr(),
280 resultTypes, userOp->getOperands());
281 bool replacingMemRefUsesFailed = false;
282 bool returnTypeChanged = false;
283 for (unsigned resIndex : llvm::seq<unsigned>(0, userOp->getNumResults())) {
284 OpResult oldResult = userOp->getResult(resIndex);
285 OpResult newResult = newCallOp->getResult(resIndex);
286 // This condition ensures that if the result is not of type memref or if
287 // the resulting memref was already having a trivial map layout then we
288 // need not perform any use replacement here.
289 if (oldResult.getType() == newResult.getType())
290 continue;
291 AffineMap layoutMap =
292 cast<MemRefType>(oldResult.getType()).getLayout().getAffineMap();
293 if (failed(replaceAllMemRefUsesWith(oldResult, /*newMemRef=*/newResult,
294 /*extraIndices=*/{},
295 /*indexRemap=*/layoutMap,
296 /*extraOperands=*/{},
297 /*symbolOperands=*/{},
298 /*userFilterFn=*/nullptr,
299 /*allowNonDereferencingOps=*/true,
300 /*replaceInDeallocOp=*/true))) {
301 // If it failed (due to escapes for example), bail out.
302 // It should never hit this part of the code because it is called by
303 // only those functions which are normalizable.
304 newCallOp->erase();
305 replacingMemRefUsesFailed = true;
306 break;
307 }
308 returnTypeChanged = true;
309 }
310 if (replacingMemRefUsesFailed)
311 continue;
312 // Replace all uses for other non-memref result types.
313 userOp->replaceAllUsesWith(newCallOp);
314 userOp->erase();
315 if (returnTypeChanged) {
316 // Since the return type changed it might lead to a change in function's
317 // signature.
318 // TODO: If funcOp doesn't return any memref type then no need to update
319 // signature.
320 // TODO: Further optimization - Check if the memref is indeed part of
321 // ReturnOp at the parentFuncOp and only then updation of signature is
322 // required.
323 // TODO: Extend this for ops that are FunctionOpInterface. This would
324 // require creating an OpInterface for FunctionOpInterface ops.
325 func::FuncOp parentFuncOp = newCallOp->getParentOfType<func::FuncOp>();
326 funcOpsToUpdate.insert(parentFuncOp);
327 }
328 }
329 // Because external function's signature is already updated in
330 // 'normalizeFuncOpMemRefs()', we don't need to update it here again.
331 if (!funcOp.isExternal())
332 funcOp.setType(newFuncType);
333
334 // Updating the signature type of those functions which call the current
335 // function. Only if the return type of the current function has a normalized
336 // memref will the caller function become a candidate for signature update.
337 for (func::FuncOp parentFuncOp : funcOpsToUpdate)
338 updateFunctionSignature(parentFuncOp, moduleOp);
339}
340
341/// Normalizes the memrefs within a function which includes those arising as a
342/// result of AllocOps, AllocaOps, CallOps, ReinterpretCastOps and function's
343/// argument. The ModuleOp argument is used to help update function's signature
344/// after normalization.
345void NormalizeMemRefs::normalizeFuncOpMemRefs(func::FuncOp funcOp,
346 ModuleOp moduleOp) {
347 // Turn memrefs' non-identity layouts maps into ones with identity. Collect
348 // alloc, alloca ops and reinterpret_cast ops first and then process since
349 // normalizeMemRef replaces/erases ops during memref rewriting.
350 SmallVector<AllocOp, 4> allocOps;
351 SmallVector<AllocaOp> allocaOps;
352 SmallVector<ReinterpretCastOp> reinterpretCastOps;
353 funcOp.walk([&](Operation *op) {
354 if (auto allocOp = dyn_cast<AllocOp>(op))
355 allocOps.push_back(allocOp);
356 else if (auto allocaOp = dyn_cast<AllocaOp>(op))
357 allocaOps.push_back(allocaOp);
358 else if (auto reinterpretCastOp = dyn_cast<ReinterpretCastOp>(op))
359 reinterpretCastOps.push_back(reinterpretCastOp);
360 });
361 for (AllocOp allocOp : allocOps)
362 (void)normalizeMemRef(allocOp);
363 for (AllocaOp allocaOp : allocaOps)
364 (void)normalizeMemRef(allocaOp);
365 for (ReinterpretCastOp reinterpretCastOp : reinterpretCastOps)
366 (void)normalizeMemRef(reinterpretCastOp);
367
368 // We use this OpBuilder to create new memref layout later.
369 OpBuilder b(funcOp);
370
371 FunctionType functionType = funcOp.getFunctionType();
372 SmallVector<Location> functionArgLocs(llvm::map_range(
373 funcOp.getArguments(), [](BlockArgument arg) { return arg.getLoc(); }));
374 SmallVector<Type, 8> inputTypes;
375 // Walk over each argument of a function to perform memref normalization (if
376 for (unsigned argIndex :
377 llvm::seq<unsigned>(0, functionType.getNumInputs())) {
378 Type argType = functionType.getInput(argIndex);
379 MemRefType memrefType = dyn_cast<MemRefType>(argType);
380 // Check whether argument is of MemRef type. Any other argument type can
381 // simply be part of the final function signature.
382 if (!memrefType) {
383 inputTypes.push_back(argType);
384 continue;
385 }
386 // Fetch a new memref type after normalizing the old memref to have an
387 // identity map layout.
388 MemRefType newMemRefType = normalizeMemRefType(memrefType);
389 if (newMemRefType == memrefType || funcOp.isExternal()) {
390 // Either memrefType already had an identity map or the map couldn't be
391 // transformed to an identity map.
392 inputTypes.push_back(newMemRefType);
393 continue;
394 }
395
396 // Insert a new temporary argument with the new memref type.
397 BlockArgument newMemRef = funcOp.front().insertArgument(
398 argIndex, newMemRefType, functionArgLocs[argIndex]);
399 BlockArgument oldMemRef = funcOp.getArgument(argIndex + 1);
400 AffineMap layoutMap = memrefType.getLayout().getAffineMap();
401 // Replace all uses of the old memref.
402 if (failed(replaceAllMemRefUsesWith(oldMemRef, /*newMemRef=*/newMemRef,
403 /*extraIndices=*/{},
404 /*indexRemap=*/layoutMap,
405 /*extraOperands=*/{},
406 /*symbolOperands=*/{},
407 /*userFilterFn=*/nullptr,
408 /*allowNonDereferencingOps=*/true,
409 /*replaceInDeallocOp=*/true))) {
410 // If it failed (due to escapes for example), bail out. Removing the
411 // temporary argument inserted previously.
412 funcOp.front().eraseArgument(argIndex);
413 continue;
414 }
415
416 // All uses for the argument with old memref type were replaced
417 // successfully. So we remove the old argument now.
418 funcOp.front().eraseArgument(argIndex + 1);
419 }
420
421 // Walk over normalizable operations to normalize memrefs of the operation
422 // results. When `op` has memrefs with affine map in the operation results,
423 // new operation containin normalized memrefs is created. Then, the memrefs
424 // are replaced. `CallOp` is skipped here because it is handled in
425 // `updateFunctionSignature()`.
426 funcOp.walk([&](Operation *op) {
427 if (op->hasTrait<OpTrait::MemRefsNormalizable>() &&
428 op->getNumResults() > 0 && !isa<func::CallOp>(op) &&
429 !funcOp.isExternal()) {
430 // Create newOp containing normalized memref in the operation result.
431 Operation *newOp = createOpResultsNormalized(funcOp, op);
432 // When all of the operation results have no memrefs or memrefs without
433 // affine map, `newOp` is the same with `op` and following process is
434 // skipped.
435 if (op != newOp) {
436 bool replacingMemRefUsesFailed = false;
437 for (unsigned resIndex : llvm::seq<unsigned>(0, op->getNumResults())) {
438 // Replace all uses of the old memrefs.
439 Value oldMemRef = op->getResult(resIndex);
440 Value newMemRef = newOp->getResult(resIndex);
441 MemRefType oldMemRefType = dyn_cast<MemRefType>(oldMemRef.getType());
442 // Check whether the operation result is MemRef type.
443 if (!oldMemRefType)
444 continue;
445 MemRefType newMemRefType = cast<MemRefType>(newMemRef.getType());
446 if (oldMemRefType == newMemRefType)
447 continue;
448 // TODO: Assume single layout map. Multiple maps not supported.
449 AffineMap layoutMap = oldMemRefType.getLayout().getAffineMap();
450 if (failed(replaceAllMemRefUsesWith(oldMemRef,
451 /*newMemRef=*/newMemRef,
452 /*extraIndices=*/{},
453 /*indexRemap=*/layoutMap,
454 /*extraOperands=*/{},
455 /*symbolOperands=*/{},
456 /*userFilterFn=*/nullptr,
457 /*allowNonDereferencingOps=*/true,
458 /*replaceInDeallocOp=*/true))) {
459 newOp->erase();
460 replacingMemRefUsesFailed = true;
461 continue;
462 }
463 }
464 if (!replacingMemRefUsesFailed) {
465 // Replace other ops with new op and delete the old op when the
466 // replacement succeeded.
467 op->replaceAllUsesWith(newOp);
468 op->erase();
469 }
470 }
471 }
472 });
473
474 // In a normal function, memrefs in the return type signature gets normalized
475 // as a result of normalization of functions arguments, AllocOps or CallOps'
476 // result types. Since an external function doesn't have a body, memrefs in
477 // the return type signature can only get normalized by iterating over the
478 // individual return types.
479 if (funcOp.isExternal()) {
480 SmallVector<Type, 4> resultTypes;
481 for (unsigned resIndex :
482 llvm::seq<unsigned>(0, functionType.getNumResults())) {
483 Type resType = functionType.getResult(resIndex);
484 MemRefType memrefType = dyn_cast<MemRefType>(resType);
485 // Check whether result is of MemRef type. Any other argument type can
486 // simply be part of the final function signature.
487 if (!memrefType) {
488 resultTypes.push_back(resType);
489 continue;
490 }
491 // Computing a new memref type after normalizing the old memref to have an
492 // identity map layout.
493 MemRefType newMemRefType = normalizeMemRefType(memrefType);
494 resultTypes.push_back(newMemRefType);
495 }
496
497 FunctionType newFuncType =
498 FunctionType::get(&getContext(), /*inputs=*/inputTypes,
499 /*results=*/resultTypes);
500 // Setting the new function signature for this external function.
501 funcOp.setType(newFuncType);
502 }
503 updateFunctionSignature(funcOp, moduleOp);
504}
505
506/// Create an operation containing normalized memrefs in the operation results.
507/// When the results of `oldOp` have memrefs with affine map, the memrefs are
508/// normalized, and new operation containing them in the operation results is
509/// returned. If all of the results of `oldOp` have no memrefs or memrefs
510/// without affine map, `oldOp` is returned without modification.
511Operation *NormalizeMemRefs::createOpResultsNormalized(func::FuncOp funcOp,
512 Operation *oldOp) {
513 // Prepare OperationState to create newOp containing normalized memref in
514 // the operation results.
515 OperationState result(oldOp->getLoc(), oldOp->getName());
516 result.addOperands(oldOp->getOperands());
517 result.addAttributes(oldOp->getAttrs());
518 // Add normalized MemRefType to the OperationState.
519 SmallVector<Type, 4> resultTypes;
520 OpBuilder b(funcOp);
521 bool resultTypeNormalized = false;
522 for (unsigned resIndex : llvm::seq<unsigned>(0, oldOp->getNumResults())) {
523 auto resultType = oldOp->getResult(resIndex).getType();
524 MemRefType memrefType = dyn_cast<MemRefType>(resultType);
525 // Check whether the operation result is MemRef type.
526 if (!memrefType) {
527 resultTypes.push_back(resultType);
528 continue;
529 }
530
531 // Fetch a new memref type after normalizing the old memref.
532 MemRefType newMemRefType = normalizeMemRefType(memrefType);
533 if (newMemRefType == memrefType) {
534 // Either memrefType already had an identity map or the map couldn't
535 // be transformed to an identity map.
536 resultTypes.push_back(memrefType);
537 continue;
539 resultTypes.push_back(newMemRefType);
540 resultTypeNormalized = true;
541 }
542 result.addTypes(resultTypes);
543 // When all of the results of `oldOp` have no memrefs or memrefs without
544 // affine map, `oldOp` is returned without modification.
545 if (resultTypeNormalized) {
546 OpBuilder bb(oldOp);
547 for (auto &oldRegion : oldOp->getRegions()) {
548 Region *newRegion = result.addRegion();
549 newRegion->takeBody(oldRegion);
550 }
551 return bb.create(result);
552 }
553 return oldOp;
b
Return true if permutation is a valid permutation of the outer_dims_perm (case OuterOrInnerPerm::Oute...
b getContext())
static bool isMemRefNormalizable(Value::user_range opUsers)
Check whether all the uses of oldMemRef are either dereferencing uses or the op is of type : DeallocO...
This class helps build Operations.
Definition Builders.h:207
Operation is the basic unit of execution within MLIR.
Definition Operation.h:88
bool hasTrait()
Returns true if the operation was registered with a particular trait, e.g.
Definition Operation.h:749
ArrayRef< NamedAttribute > getAttrs()
Return all of the attributes on this operation.
Definition Operation.h:512
OpResult getResult(unsigned idx)
Get the 'idx'th result of this operation.
Definition Operation.h:407
Location getLoc()
The source location the operation was defined or derived from.
Definition Operation.h:223
OpTy getParentOfType()
Return the closest surrounding parent operation that is of type 'OpTy'.
Definition Operation.h:238
OperationName getName()
The name of an operation is the key identifier for it.
Definition Operation.h:119
MutableArrayRef< Region > getRegions()
Returns the regions held by this operation.
Definition Operation.h:677
operand_range getOperands()
Returns an iterator on the underlying Value's.
Definition Operation.h:378
void replaceAllUsesWith(ValuesT &&values)
Replace all uses of results of this operation with the provided 'values'.
Definition Operation.h:272
void erase()
Remove this operation from its parent block and delete it.
unsigned getNumResults()
Return the number of results held by this operation.
Definition Operation.h:404
This class contains a list of basic blocks and a link to the parent operation it is attached to.
Definition Region.h:26
void takeBody(Region &other)
Takes body of another region (that region will have no body after this operation completes).
Definition Region.h:241
Type getType() const
Return the type of this value.
Definition Value.h:105
iterator_range< user_iterator > user_range
Definition Value.h:214
user_range getUsers() const
Definition Value.h:218
static WalkResult advance()
Definition WalkResult.h:47
static WalkResult interrupt()
Definition WalkResult.h:46
void walk(Operation *op, function_ref< void(Region *)> callback, WalkOrder order)
Walk all of the regions, blocks, or operations nested under (and including) the given operation.
Definition Visitors.h:102
detail::InFlightRemark failed(Location loc, RemarkOpts opts)
Report an optimization remark that failed.
Definition Remarks.h:561
Include the generated interface declarations.
llvm::DenseSet< ValueT, ValueInfoT > DenseSet
Definition LLVM.h:128