@@ -536,6 +536,128 @@ void BytecodeGenerationTest::testInliningOfToDo() {
536536 BC_RETURN_SELF});
537537}
538538
539+ static std::vector<uint8_t > GetBytecodes (VMMethod* method) {
540+ std::vector<uint8_t > bcs (method->bcLength );
541+
542+ for (size_t i = 0 ; i < method->bcLength ; i += 1 ) {
543+ bcs.at (i) = method->GetBytecode (i);
544+ }
545+ return bcs;
546+ }
547+
548+ void BytecodeGenerationTest::testToDoBlockBlockInlinedSelf () {
549+ auto bytecodes = methodToBytecode (R"""(
550+ test = (
551+ | l1 l2 |
552+ 1 to: 2 do: [:a |
553+ l1 do: [:b |
554+ b ifTrue: [
555+ a.
556+ l2 := l2 + 1 ] ] ]
557+ ) )""" );
558+ check (bytecodes,
559+ {BC_PUSH_1, BC_PUSH_CONSTANT_0,
560+ BC_DUP_SECOND, // stack: Top[1, 2, 1]
561+
562+ BC (BC_JUMP_IF_GREATER, 15 , 0 ), // consume only on jump
563+ BC_DUP,
564+
565+ BC_POP_LOCAL_2, // store the `a`
566+ BC_PUSH_LOCAL_0, // push the `l1` on the stack
567+ BC (BC_PUSH_BLOCK, 1 ), BC (BC_SEND, 2 ), // send #do:
568+ BC_POP,
569+ BC_INC, // increment top, the iteration counter
570+
571+ // jump back to the jump_if_greater bytecode
572+ BC (BC_JUMP_BACKWARD, 12 , 0 ),
573+
574+ // jump_if_greater target
575+ BC_RETURN_SELF});
576+
577+ auto * block = (VMMethod*)_mgenc->GetLiteral (1 );
578+ check (GetBytecodes (block),
579+ {BC_PUSH_ARG_1, BC (BC_JUMP_ON_FALSE_TOP_NIL, 15 , 0 ),
580+ BC (BC_PUSH_LOCAL, 2 , 1 ), // load the `a`
581+ BC_POP, BC (BC_PUSH_LOCAL, 1 , 1 ), BC_INC, BC_DUP,
582+ BC (BC_POP_LOCAL, 1 , 1 ), BC_RETURN_LOCAL},
583+ block);
584+ }
585+
586+ void BytecodeGenerationTest::testToDoWithMoreEmbeddedBlocksAndArgAccess () {
587+ auto bytecodes = methodToBytecode (R"""(
588+ transferEntries: oldStorage = (
589+ 1 to: oldStorage length do: [:i |
590+ | current |
591+ current := oldStorage at: i.
592+ current notInlined: [
593+ oldStorage at: i put: nil.
594+ current next
595+ noInline: [ i. current. #foo ]
596+ noInline: [
597+ self splitBucket: oldStorage bucket: i head: current ] ] ]
598+ ) )""" );
599+ check (bytecodes,
600+ {BC_PUSH_1, BC_PUSH_ARG_1, BC (BC_SEND_1, 0 ),
601+ BC_DUP_SECOND, // ~
602+
603+ BC (BC_JUMP_IF_GREATER, 20 , 0 ), // consume only on jump
604+ BC_DUP,
605+
606+ BC_POP_LOCAL_0, // i
607+ BC_PUSH_ARG_1, // oldStorage
608+ BC_PUSH_LOCAL_0, // i
609+ BC (BC_SEND, 1 ), // #at:
610+
611+ BC_POP_LOCAL_1, // current
612+ BC_PUSH_LOCAL_1, // current
613+ BC (BC_PUSH_BLOCK, 2 ), // ~
614+ BC (BC_SEND, 3 ), // send #notInlined:
615+ BC_POP,
616+ BC_INC, // increment top, the iteration counter
617+
618+ // jump back to the jump_if_greater bytecode
619+ BC (BC_JUMP_BACKWARD, 17 , 0 ),
620+
621+ // jump_if_greater target
622+ BC_RETURN_SELF});
623+
624+ auto * block = (VMMethod*)_mgenc->GetLiteral (2 );
625+ check (GetBytecodes (block),
626+ {BC (BC_PUSH_ARGUMENT, 1 , 1 ), // oldStorage
627+ BC (BC_PUSH_LOCAL, 0 , 1 ), // i
628+ BC_PUSH_NIL, BC (BC_SEND, 0 ), // #at:put:
629+ BC_POP,
630+
631+ BC (BC_PUSH_LOCAL, 1 , 1 ), // current
632+ BC (BC_SEND_1, 1 ), // #next
633+ BC (BC_PUSH_BLOCK, 2 ), // ~
634+ BC (BC_PUSH_BLOCK, 3 ), // ~
635+ BC (BC_SEND, 4 ), // #noInline:noInline:
636+ BC_RETURN_LOCAL},
637+ block);
638+
639+ // [ i. current. #foo ]
640+ auto * block2 = (VMMethod*)block->GetIndexableField (2 );
641+ check (GetBytecodes (block2),
642+ {BC (BC_PUSH_LOCAL, 0 , 2 ), // i
643+ BC_POP, // ~
644+ BC (BC_PUSH_LOCAL, 1 , 2 ), // current
645+ BC_POP, // ~
646+ BC_PUSH_CONSTANT_0, BC_RETURN_LOCAL},
647+ block2);
648+
649+ // [ self splitBucket: oldStorage bucket: i head: current ]
650+ auto * block3 = (VMMethod*)block->GetIndexableField (3 );
651+ check (GetBytecodes (block3),
652+ {BC (BC_PUSH_ARGUMENT, 0 , 2 ), // self
653+ BC (BC_PUSH_ARGUMENT, 1 , 2 ), // oldStorage
654+ BC (BC_PUSH_LOCAL, 0 , 2 ), // i
655+ BC (BC_PUSH_LOCAL, 1 , 2 ), // current
656+ BC (BC_SEND, 0 ), // #splitBucket:bucket:head:
657+ BC_RETURN_LOCAL},
658+ block3);
659+ }
660+
539661void BytecodeGenerationTest::testIfArg () {
540662 ifArg (" ifTrue:" , BC_JUMP_ON_FALSE_TOP_NIL);
541663 ifArg (" ifFalse:" , BC_JUMP_ON_TRUE_TOP_NIL);
0 commit comments