Skip to content

Commit 767e509

Browse files
committed
[SSADestroyHoisting] Fold into access scopes.
If the destroy_addr's barrier is an end_access, try to fold with copies or loads that occur inside the scope so long as there are no barriers between the destroy_addr and the instruction it is to be fold with.
1 parent 4ad25e9 commit 767e509

File tree

2 files changed

+175
-5
lines changed

2 files changed

+175
-5
lines changed

lib/SILOptimizer/Transforms/SSADestroyHoisting.cpp

Lines changed: 31 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -217,6 +217,11 @@ class DeinitBarriers {
217217

218218
void compute() { DestroyReachability(*this).solveBackward(); }
219219

220+
bool isBarrier(SILInstruction *instruction) const {
221+
return classificationIsBarrier(classifyInstruction(
222+
instruction, ignoreDeinitBarriers, storageDefInst, knownUses));
223+
};
224+
220225
private:
221226
DeinitBarriers(DeinitBarriers const &) = delete;
222227
DeinitBarriers &operator=(DeinitBarriers const &) = delete;
@@ -367,7 +372,10 @@ class HoistDestroys {
367372
protected:
368373
SILFunction *getFunction() const { return storageRoot->getFunction(); }
369374

370-
bool foldBarrier(SILInstruction *barrier);
375+
bool foldBarrier(SILInstruction *barrier, SILValue accessScope);
376+
377+
bool foldBarrier(SILInstruction *barrier, const KnownStorageUses &knownUses,
378+
const DeinitBarriers &deinitBarriers);
371379

372380
void insertDestroy(SILInstruction *barrier, SILInstruction *insertBefore,
373381
const KnownStorageUses &knownUses);
@@ -408,7 +416,7 @@ bool HoistDestroys::rewriteDestroys(const KnownStorageUses &knownUses,
408416
for (SILInstruction *barrier : deinitBarriers.barriers) {
409417
auto *barrierBlock = barrier->getParent();
410418
if (barrier != barrierBlock->getTerminator()) {
411-
if (!foldBarrier(barrier))
419+
if (!foldBarrier(barrier, knownUses, deinitBarriers))
412420
insertDestroy(barrier, barrier->getNextInstruction(), knownUses);
413421
continue;
414422
}
@@ -455,9 +463,10 @@ bool HoistDestroys::rewriteDestroys(const KnownStorageUses &knownUses,
455463
return deleter.hadCallbackInvocation();
456464
}
457465

458-
bool HoistDestroys::foldBarrier(SILInstruction *barrier) {
466+
bool HoistDestroys::foldBarrier(SILInstruction *barrier, SILValue storageRoot) {
459467
if (auto *load = dyn_cast<LoadInst>(barrier)) {
460-
if (load->getOperand() == storageRoot) {
468+
if (stripAccessMarkers(load->getOperand()) ==
469+
stripAccessMarkers(storageRoot)) {
461470
if (load->getOwnershipQualifier() == LoadOwnershipQualifier::Copy) {
462471
load->setOwnershipQualifier(LoadOwnershipQualifier::Take);
463472
return true;
@@ -468,7 +477,7 @@ bool HoistDestroys::foldBarrier(SILInstruction *barrier) {
468477
}
469478
}
470479
if (auto *copy = dyn_cast<CopyAddrInst>(barrier)) {
471-
if (copy->getSrc() == storageRoot) {
480+
if (stripAccessMarkers(copy->getSrc()) == stripAccessMarkers(storageRoot)) {
472481
assert(!copy->isTakeOfSrc());
473482
copy->setIsTakeOfSrc(IsTake);
474483
return true;
@@ -477,6 +486,23 @@ bool HoistDestroys::foldBarrier(SILInstruction *barrier) {
477486
return false;
478487
}
479488

489+
bool HoistDestroys::foldBarrier(SILInstruction *barrier,
490+
const KnownStorageUses &knownUses,
491+
const DeinitBarriers &deinitBarriers) {
492+
if (auto *eai = dyn_cast<EndAccessInst>(barrier)) {
493+
SILInstruction *instruction = eai;
494+
while ((instruction = instruction->getPreviousInstruction())) {
495+
if (instruction == eai->getBeginAccess())
496+
return false;
497+
if (foldBarrier(instruction, storageRoot))
498+
return true;
499+
if (deinitBarriers.isBarrier(instruction))
500+
return false;
501+
}
502+
}
503+
return foldBarrier(barrier, storageRoot);
504+
}
505+
480506
// \p barrier may be null if the destroy is at function entry.
481507
void HoistDestroys::insertDestroy(SILInstruction *barrier,
482508
SILInstruction *insertBefore,

test/SILOptimizer/hoist_destroy_addr.sil

Lines changed: 144 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -254,3 +254,147 @@ entry(%instance : @none $TrivialStruct):
254254
%retval = tuple ()
255255
return %retval : $()
256256
}
257+
258+
// Fold destroy_addr and a load [copy] into a load [take] even when that
259+
// load [take] is guarded by an access scope.
260+
//
261+
// CHECK-LABEL: sil [ossa] @fold_scoped_load : {{.*}} {
262+
// CHECK: load [take]
263+
// CHECK-LABEL: // end sil function 'fold_scoped_load'
264+
sil [ossa] @fold_scoped_load : $@convention(thin) (@owned S) -> (@owned S) {
265+
entry(%instance : @owned $S):
266+
%addr = alloc_stack $S
267+
%store_scope = begin_access [modify] [static] %addr : $*S
268+
store %instance to [init] %store_scope : $*S
269+
end_access %store_scope : $*S
270+
%load_scope = begin_access [read] [static] %addr : $*S
271+
%value = load [copy] %load_scope : $*S
272+
end_access %load_scope : $*S
273+
destroy_addr %addr : $*S
274+
dealloc_stack %addr : $*S
275+
return %value : $S
276+
}
277+
278+
// Don't fold when there's a deinit barrier in the way.
279+
//
280+
// CHECK-LABEL: sil [ossa] @nofold_scoped_load_barrier : {{.*}} {
281+
// CHECK: load [copy]
282+
// CHECK-LABEL: // end sil function 'nofold_scoped_load_barrier'
283+
sil [ossa] @nofold_scoped_load_barrier : $@convention(thin) (@owned S) -> (@owned S) {
284+
entry(%instance : @owned $S):
285+
%addr = alloc_stack $S
286+
%store_scope = begin_access [modify] [static] %addr : $*S
287+
store %instance to [init] %store_scope : $*S
288+
end_access %store_scope : $*S
289+
%load_scope = begin_access [read] [static] %addr : $*S
290+
%value = load [copy] %load_scope : $*S
291+
%unknown = function_ref @unknown : $@convention(thin) () -> ()
292+
apply %unknown() : $@convention(thin) () -> ()
293+
end_access %load_scope : $*S
294+
destroy_addr %addr : $*S
295+
dealloc_stack %addr : $*S
296+
return %value : $S
297+
}
298+
299+
// Don't fold with a copy_addr of a struct_element_addr.
300+
//
301+
// CHECK-LABEL: sil [ossa] @nofold_scoped_copy_addr_projection : {{.*}} {
302+
// CHECK: load [copy]
303+
// CHECK-LABEL: // end sil function 'nofold_scoped_copy_addr_projection'
304+
sil [ossa] @nofold_scoped_copy_addr_projection : $@convention(thin) (@owned S) -> (@owned S) {
305+
entry(%instance : @owned $S):
306+
%addr = alloc_stack $S
307+
%store_scope = begin_access [modify] [static] %addr : $*S
308+
store %instance to [init] %store_scope : $*S
309+
end_access %store_scope : $*S
310+
%load_scope = begin_access [read] [static] %addr : $*S
311+
%field_addr = struct_element_addr %load_scope : $*S, #S.x
312+
%field = load [copy] %field_addr : $*X
313+
end_access %load_scope : $*S
314+
destroy_addr %addr : $*S
315+
dealloc_stack %addr : $*S
316+
%value = struct $S (%field : $X)
317+
return %value : $S
318+
}
319+
320+
// Don't fold destroy of outer scope with struct_element_addr of inner scope.
321+
//
322+
// CHECK-LABEL: sil [ossa] @nofold_with_copy_addr_projection : {{.*}} {
323+
// CHECK: copy_addr {{%[^,]+}}
324+
// CHECK-LABEL: // end sil function 'nofold_with_copy_addr_projection'
325+
sil [ossa] @nofold_with_copy_addr_projection : $@convention(thin) (@owned S) -> (@owned S) {
326+
entry(%instance : @owned $S):
327+
%addr = alloc_stack $S
328+
%addr_2 = alloc_stack $S
329+
%store_scope = begin_access [modify] [static] %addr : $*S
330+
store %instance to [init] %store_scope : $*S
331+
end_access %store_scope : $*S
332+
%outer = begin_access [read] [static] %addr : $*S
333+
apply undef(%outer) : $@convention(thin) (@inout S) -> ()
334+
%inner = begin_access [read] [static] %outer : $*S
335+
%field_addr = struct_element_addr %inner : $*S, #S.x
336+
%field_addr_2 = struct_element_addr %addr_2 : $*S, #S.x
337+
copy_addr %field_addr to [initialization] %field_addr_2 : $*X
338+
end_access %inner : $*S
339+
destroy_addr %outer : $*S
340+
end_access %outer : $*S
341+
%value = load [take] %addr_2 : $*S
342+
dealloc_stack %addr_2 : $*S
343+
dealloc_stack %addr : $*S
344+
return %value : $S
345+
}
346+
347+
// Don't fold destroy of outer scope with struct_element_addr of inner scope.
348+
//
349+
// CHECK-LABEL: sil [ossa] @fold_scoped_destroy_with_scoped_copy_addr : {{.*}} {
350+
// CHECK: copy_addr [take] {{%[^,]+}}
351+
// CHECK-LABEL: // end sil function 'fold_scoped_destroy_with_scoped_copy_addr'
352+
sil [ossa] @fold_scoped_destroy_with_scoped_copy_addr : $@convention(thin) (@owned S) -> (@owned S) {
353+
entry(%instance : @owned $S):
354+
%addr = alloc_stack $S
355+
%addr_2 = alloc_stack $S
356+
%store_scope = begin_access [modify] [static] %addr : $*S
357+
store %instance to [init] %store_scope : $*S
358+
end_access %store_scope : $*S
359+
%outer = begin_access [read] [static] %addr : $*S
360+
apply undef(%outer) : $@convention(thin) (@inout S) -> ()
361+
%inner = begin_access [read] [static] %outer : $*S
362+
copy_addr %inner to [initialization] %addr_2 : $*S
363+
end_access %inner : $*S
364+
destroy_addr %outer : $*S
365+
end_access %outer : $*S
366+
%value = load [take] %addr_2 : $*S
367+
dealloc_stack %addr_2 : $*S
368+
dealloc_stack %addr : $*S
369+
return %value : $S
370+
}
371+
372+
// Don't fold with an unrelated load [copy].
373+
// CHECK-LABEL: sil [ossa] @nofold_unrelated_scoped_load_copy : {{.*}} {
374+
// CHECK: load [copy]
375+
// CHECK: destroy_addr
376+
// CHECK: destroy_addr
377+
// CHECK-LABEL: // end sil function 'nofold_unrelated_scoped_load_copy'
378+
sil [ossa] @nofold_unrelated_scoped_load_copy : $@convention(thin) (@owned X) -> (@owned X) {
379+
entry(%instance : @owned $X):
380+
%copy = copy_value %instance : $X
381+
%addr_1 = alloc_stack $X
382+
%addr_2 = alloc_stack $X
383+
store %instance to [init] %addr_1 : $*X
384+
store %copy to [init] %addr_2 : $*X
385+
386+
%access = begin_access [read] [static] %addr_1 : $*X
387+
%loaded = load [copy] %access : $*X
388+
end_access %access : $*X
389+
destroy_addr %addr_2 : $*X
390+
391+
%barrier = function_ref @unknown : $@convention(thin) () -> ()
392+
apply %barrier() : $@convention(thin) () -> ()
393+
394+
destroy_addr %addr_1 : $*X
395+
396+
dealloc_stack %addr_2 : $*X
397+
dealloc_stack %addr_1 : $*X
398+
399+
return %loaded : $X
400+
}

0 commit comments

Comments
 (0)