Skip to content

Commit 8327e55

Browse files
vtjnashaviatesk
authored andcommitted
optimizations: better modeling and codegen for apply and svec calls (#59548)
- Use svec instead of tuple for arguments (better match for ABI which will require boxes) - Directly forward single svec argument, both runtime and codegen, without copying. - Optimize all consistant builtin functions of constant arguments, not just ones with special tfuncs. Reducing code duplication and divergence. - Codegen for `svec()` directly, so optimizer can see each store (and doesn't have to build the whole thing on the stack first). Written with help by Claude
1 parent db87ab5 commit 8327e55

File tree

8 files changed

+179
-38
lines changed

8 files changed

+179
-38
lines changed

Compiler/src/ssair/passes.jl

Lines changed: 46 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -872,6 +872,49 @@ function perform_lifting!(compact::IncrementalCompact,
872872
return Pair{Any, PhiNest}(stmt_val, PhiNest(visited_philikes, lifted_philikes, lifted_leaves, reverse_mapping, walker_callback))
873873
end
874874

875+
function lift_apply_args!(compact::IncrementalCompact, idx::Int, stmt::Expr, 𝕃ₒ::AbstractLattice)
876+
# Handle _apply_iterate calls: convert arguments to use `Core.svec`. The behavior of Core.svec (with boxing) better matches the ABI of codegen.
877+
compact[idx] = nothing
878+
for i in 4:length(stmt.args) # Skip iterate function, f, and first iterator
879+
arg = stmt.args[i]
880+
arg_type = argextype(arg, compact)
881+
svec_args = nothing
882+
if isa(arg_type, DataType) && arg_type.name === Tuple.name
883+
if isa(arg, SSAValue)
884+
arg_stmt = compact[arg][:stmt]
885+
if is_known_call(arg_stmt, Core.tuple, compact)
886+
svec_args = copy(arg_stmt.args)
887+
end
888+
end
889+
if svec_args === nothing
890+
# Fallback path: generate getfield calls for tuple elements
891+
tuple_length = length(arg_type.parameters)
892+
if tuple_length > 0 && !isvarargtype(arg_type.parameters[tuple_length])
893+
svec_args = Vector{Any}(undef, tuple_length + 1)
894+
for j in 1:tuple_length
895+
getfield_call = Expr(:call, GlobalRef(Core, :getfield), arg, j)
896+
getfield_type = arg_type.parameters[j]
897+
inst = compact[SSAValue(idx)]
898+
getfield_ssa = insert_node!(compact, SSAValue(idx), NewInstruction(getfield_call, getfield_type, NoCallInfo(), inst[:line], inst[:flag]))
899+
svec_args[j + 1] = getfield_ssa
900+
end
901+
end
902+
end
903+
end
904+
# Create Core.svec call if we have arguments
905+
if svec_args !== nothing
906+
svec_args[1] = GlobalRef(Core, :svec)
907+
new_svec_call = Expr(:call)
908+
new_svec_call.args = svec_args
909+
inst = compact[SSAValue(idx)]
910+
new_svec_ssa = insert_node!(compact, SSAValue(idx), NewInstruction(new_svec_call, SimpleVector, NoCallInfo(), inst[:line], inst[:flag]))
911+
stmt.args[i] = new_svec_ssa
912+
end
913+
end
914+
compact[idx] = stmt
915+
nothing
916+
end
917+
875918
function lift_svec_ref!(compact::IncrementalCompact, idx::Int, stmt::Expr)
876919
length(stmt.args) != 3 && return
877920

@@ -1375,6 +1418,9 @@ function sroa_pass!(ir::IRCode, inlining::Union{Nothing,InliningState}=nothing)
13751418
compact[SSAValue(idx)] = (compact[enter_ssa][:stmt]::EnterNode).scope
13761419
elseif isexpr(stmt, :new)
13771420
refine_new_effects!(𝕃ₒ, compact, idx, stmt)
1421+
elseif is_known_call(stmt, Core._apply_iterate, compact)
1422+
length(stmt.args) >= 4 || continue
1423+
lift_apply_args!(compact, idx, stmt, 𝕃ₒ)
13781424
end
13791425
continue
13801426
end

Compiler/src/tfuncs.jl

Lines changed: 28 additions & 15 deletions
Original file line numberDiff line numberDiff line change
@@ -585,6 +585,15 @@ end
585585
add_tfunc(nfields, 1, 1, nfields_tfunc, 1)
586586
add_tfunc(Core._expr, 1, INT_INF, @nospecs((𝕃::AbstractLattice, args...)->Expr), 100)
587587
add_tfunc(svec, 0, INT_INF, @nospecs((𝕃::AbstractLattice, args...)->SimpleVector), 20)
588+
589+
@nospecs function _svec_len_tfunc(𝕃::AbstractLattice, s)
590+
if isa(s, Const) && isa(s.val, SimpleVector)
591+
return Const(length(s.val))
592+
end
593+
return Int
594+
end
595+
add_tfunc(Core._svec_len, 1, 1, _svec_len_tfunc, 1)
596+
588597
@nospecs function _svec_ref_tfunc(𝕃::AbstractLattice, s, i)
589598
if isa(s, Const) && isa(i, Const)
590599
s, i = s.val, i.val
@@ -1986,15 +1995,8 @@ function tuple_tfunc(𝕃::AbstractLattice, argtypes::Vector{Any})
19861995
# UnionAll context is missing around this.
19871996
pop!(argtypes)
19881997
end
1989-
all_are_const = true
1990-
for i in 1:length(argtypes)
1991-
if !isa(argtypes[i], Const)
1992-
all_are_const = false
1993-
break
1994-
end
1995-
end
1996-
if all_are_const
1997-
return Const(ntuple(i::Int->argtypes[i].val, length(argtypes)))
1998+
if is_all_const_arg(argtypes, 1) # repeated from builtin_tfunction for the benefit of callers that use this tfunc directly
1999+
return Const(tuple(collect_const_args(argtypes, 1)...))
19982000
end
19992001
params = Vector{Any}(undef, length(argtypes))
20002002
anyinfo = false
@@ -2359,14 +2361,17 @@ function _builtin_nothrow(𝕃::AbstractLattice, @nospecialize(f::Builtin), argt
23592361
elseif f === Core.compilerbarrier
23602362
na == 2 || return false
23612363
return compilerbarrier_nothrow(argtypes[1], nothing)
2364+
elseif f === Core._svec_len
2365+
na == 1 || return false
2366+
return _svec_len_tfunc(𝕃, argtypes[1]) isa Const
23622367
elseif f === Core._svec_ref
23632368
na == 2 || return false
23642369
return _svec_ref_tfunc(𝕃, argtypes[1], argtypes[2]) isa Const
23652370
end
23662371
return false
23672372
end
23682373

2369-
# known to be always effect-free (in particular nothrow)
2374+
# known to be always effect-free (in particular also nothrow)
23702375
const _PURE_BUILTINS = Any[
23712376
tuple,
23722377
svec,
@@ -2395,6 +2400,8 @@ const _CONSISTENT_BUILTINS = Any[
23952400
donotdelete,
23962401
memoryrefnew,
23972402
memoryrefoffset,
2403+
Core._svec_len,
2404+
Core._svec_ref,
23982405
]
23992406

24002407
# known to be effect-free (but not necessarily nothrow)
@@ -2419,6 +2426,7 @@ const _EFFECT_FREE_BUILTINS = [
24192426
Core.throw_methoderror,
24202427
getglobal,
24212428
compilerbarrier,
2429+
Core._svec_len,
24222430
Core._svec_ref,
24232431
]
24242432

@@ -2453,6 +2461,7 @@ const _ARGMEM_BUILTINS = Any[
24532461
replacefield!,
24542462
setfield!,
24552463
swapfield!,
2464+
Core._svec_len,
24562465
Core._svec_ref,
24572466
]
24582467

@@ -2637,7 +2646,7 @@ function builtin_effects(𝕃::AbstractLattice, @nospecialize(f::Builtin), argty
26372646
else
26382647
if contains_is(_CONSISTENT_BUILTINS, f)
26392648
consistent = ALWAYS_TRUE
2640-
elseif f === memoryrefget || f === memoryrefset! || f === memoryref_isassigned || f === Core._svec_ref
2649+
elseif f === memoryrefget || f === memoryrefset! || f === memoryref_isassigned || f === Core._svec_len || f === Core._svec_ref
26412650
consistent = CONSISTENT_IF_INACCESSIBLEMEMONLY
26422651
elseif f === Core._typevar || f === Core.memorynew
26432652
consistent = CONSISTENT_IF_NOTRETURNED
@@ -2746,11 +2755,12 @@ end
27462755
function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtypes::Vector{Any},
27472756
sv::Union{AbsIntState, Nothing})
27482757
𝕃ᵢ = typeinf_lattice(interp)
2749-
if isa(f, IntrinsicFunction)
2750-
if is_pure_intrinsic_infer(f) && all(@nospecialize(a) -> isa(a, Const), argtypes)
2751-
argvals = anymap(@nospecialize(a) -> (a::Const).val, argtypes)
2758+
# Early constant evaluation for foldable builtins with all const args
2759+
if isa(f, IntrinsicFunction) ? is_pure_intrinsic_infer(f) : (f in _PURE_BUILTINS || (f in _CONSISTENT_BUILTINS && f in _EFFECT_FREE_BUILTINS))
2760+
if is_all_const_arg(argtypes, 1)
2761+
argvals = collect_const_args(argtypes, 1)
27522762
try
2753-
# unroll a few cases which have specialized codegen
2763+
# unroll a few common cases for better codegen
27542764
if length(argvals) == 1
27552765
return Const(f(argvals[1]))
27562766
elseif length(argvals) == 2
@@ -2764,6 +2774,8 @@ function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtyp
27642774
return Bottom
27652775
end
27662776
end
2777+
end
2778+
if isa(f, IntrinsicFunction)
27672779
iidx = Int(reinterpret(Int32, f)) + 1
27682780
if iidx < 0 || iidx > length(T_IFUNC)
27692781
# unknown intrinsic
@@ -2790,6 +2802,7 @@ function builtin_tfunction(interp::AbstractInterpreter, @nospecialize(f), argtyp
27902802
end
27912803
tf = T_FFUNC_VAL[fidx]
27922804
end
2805+
27932806
if hasvarargtype(argtypes)
27942807
if length(argtypes) - 1 > tf[2]
27952808
# definitely too many arguments

Compiler/test/effects.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1466,7 +1466,7 @@ end
14661466
let effects = Base.infer_effects((Core.SimpleVector,Int); optimize=false) do svec, i
14671467
Core._svec_ref(svec, i)
14681468
end
1469-
@test !Compiler.is_consistent(effects)
1469+
@test Compiler.is_consistent(effects)
14701470
@test Compiler.is_effect_free(effects)
14711471
@test !Compiler.is_nothrow(effects)
14721472
@test Compiler.is_terminates(effects)

base/essentials.jl

Lines changed: 1 addition & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -934,11 +934,7 @@ setindex!(A::MemoryRef{Any}, @nospecialize(x)) = (memoryrefset!(A, x, :not_atomi
934934

935935
getindex(v::SimpleVector, i::Int) = (@_foldable_meta; Core._svec_ref(v, i))
936936
function length(v::SimpleVector)
937-
@_total_meta
938-
t = @_gc_preserve_begin v
939-
len = unsafe_load(Ptr{Int}(pointer_from_objref(v)))
940-
@_gc_preserve_end t
941-
return len
937+
Core._svec_len(v)
942938
end
943939
firstindex(v::SimpleVector) = 1
944940
lastindex(v::SimpleVector) = length(v)

src/builtin_proto.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ extern "C" {
1919
XX(_primitivetype,"_primitivetype") \
2020
XX(_setsuper,"_setsuper!") \
2121
XX(_structtype,"_structtype") \
22+
XX(_svec_len,"_svec_len") \
2223
XX(_svec_ref,"_svec_ref") \
2324
XX(_typebody,"_typebody!") \
2425
XX(_typevar,"_typevar") \

src/builtins.c

Lines changed: 16 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -693,9 +693,15 @@ JL_CALLABLE(jl_f__apply_iterate)
693693
return (jl_value_t*)t;
694694
}
695695
}
696-
else if (f == BUILTIN(tuple) && jl_is_tuple(args[1])) {
697-
return args[1];
696+
else if (f == BUILTIN(tuple)) {
697+
if (jl_is_tuple(args[1]))
698+
return args[1];
699+
if (jl_is_svec(args[1]))
700+
return jl_f_tuple(NULL, jl_svec_data(args[1]), jl_svec_len(args[1]));
698701
}
702+
// optimization for `f(svec...)`
703+
if (jl_is_svec(args[1]))
704+
return jl_apply_generic(f, jl_svec_data(args[1]), jl_svec_len(args[1]));
699705
}
700706
// estimate how many real arguments we appear to have
701707
size_t precount = 1;
@@ -2091,6 +2097,14 @@ JL_CALLABLE(jl_f__compute_sparams)
20912097
return (jl_value_t*)env;
20922098
}
20932099

2100+
JL_CALLABLE(jl_f__svec_len)
2101+
{
2102+
JL_NARGS(_svec_len, 1, 1);
2103+
jl_svec_t *s = (jl_svec_t*)args[0];
2104+
JL_TYPECHK(_svec_len, simplevector, (jl_value_t*)s);
2105+
return jl_box_long(jl_svec_len(s));
2106+
}
2107+
20942108
JL_CALLABLE(jl_f__svec_ref)
20952109
{
20962110
JL_NARGS(_svec_ref, 2, 2);

src/cgutils.cpp

Lines changed: 24 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -2228,6 +2228,9 @@ static jl_cgval_t typed_load(jl_codectx_t &ctx, Value *ptr, Value *idx_0based, j
22282228
}
22292229
Value *instr = nullptr;
22302230
if (!isboxed && jl_is_genericmemoryref_type(jltype)) {
2231+
//We don't specify the stronger expected memory ordering here because of fears it may interfere with vectorization and other optimizations
2232+
//if (Order == AtomicOrdering::NotAtomic)
2233+
// Order = AtomicOrdering::Monotonic;
22312234
// load these FCA as individual fields, so LLVM does not need to split them later
22322235
Value *fld0 = ctx.builder.CreateStructGEP(elty, ptr, 0);
22332236
LoadInst *load0 = ctx.builder.CreateAlignedLoad(elty->getStructElementType(0), fld0, Align(alignment), false);
@@ -2401,11 +2404,26 @@ static jl_cgval_t typed_store(jl_codectx_t &ctx,
24012404
instr = load;
24022405
}
24032406
if (r) {
2404-
StoreInst *store = ctx.builder.CreateAlignedStore(r, ptr, Align(alignment));
2405-
store->setOrdering(Order == AtomicOrdering::NotAtomic && isboxed ? AtomicOrdering::Release : Order);
24062407
jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, tbaa);
24072408
ai.noalias = MDNode::concatenate(aliasscope, ai.noalias);
2408-
ai.decorateInst(store);
2409+
if (false && !isboxed && Order == AtomicOrdering::NotAtomic && jl_is_genericmemoryref_type(jltype)) {
2410+
// if enabled, store these FCA as individual fields, so LLVM does not need to split them later and they can use release ordering
2411+
assert(r->getType() == ctx.types().T_jlgenericmemory);
2412+
Value *f1 = ctx.builder.CreateExtractValue(r, 0);
2413+
Value *f2 = ctx.builder.CreateExtractValue(r, 1);
2414+
static_assert(offsetof(jl_genericmemoryref_t, ptr_or_offset) == 0, "wrong field order");
2415+
StoreInst *store = ctx.builder.CreateAlignedStore(f1, ctx.builder.CreateStructGEP(ctx.types().T_jlgenericmemory, ptr, 0), Align(alignment));
2416+
store->setOrdering(AtomicOrdering::Release);
2417+
ai.decorateInst(store);
2418+
store = ctx.builder.CreateAlignedStore(f2, ctx.builder.CreateStructGEP(ctx.types().T_jlgenericmemory, ptr, 1), Align(alignment));
2419+
store->setOrdering(AtomicOrdering::Release);
2420+
ai.decorateInst(store);
2421+
}
2422+
else {
2423+
StoreInst *store = ctx.builder.CreateAlignedStore(r, ptr, Align(alignment));
2424+
store->setOrdering(Order == AtomicOrdering::NotAtomic && isboxed ? AtomicOrdering::Release : Order);
2425+
ai.decorateInst(store);
2426+
}
24092427
}
24102428
else {
24112429
assert(Order == AtomicOrdering::NotAtomic && !isboxed && rhs.typ == jltype);
@@ -4377,10 +4395,11 @@ static jl_cgval_t emit_new_struct(jl_codectx_t &ctx, jl_value_t *ty, size_t narg
43774395
for (size_t i = nargs; i < nf; i++) {
43784396
if (!jl_field_isptr(sty, i) && jl_is_uniontype(jl_field_type(sty, i))) {
43794397
jl_aliasinfo_t ai = jl_aliasinfo_t::fromTBAA(ctx, strctinfo.tbaa);
4380-
ai.decorateInst(ctx.builder.CreateAlignedStore(
4398+
auto *store = ctx.builder.CreateAlignedStore(
43814399
ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0),
43824400
emit_ptrgep(ctx, strct, jl_field_offset(sty, i) + jl_field_size(sty, i) - 1),
4383-
Align(1)));
4401+
Align(1));
4402+
ai.decorateInst(store);
43844403
}
43854404
}
43864405
// TODO: verify that nargs <= nf (currently handled by front-end)

src/codegen.cpp

Lines changed: 62 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -4072,21 +4072,38 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f,
40724072
}
40734073
}
40744074

4075-
else if ((f == BUILTIN(_apply_iterate) && nargs == 3) && ctx.vaSlot > 0) {
4075+
else if (f == BUILTIN(_apply_iterate) && nargs == 3) {
40764076
// turn Core._apply_iterate(iter, f, Tuple) ==> f(Tuple...) using the jlcall calling convention if Tuple is the va allocation
4077-
if (LoadInst *load = dyn_cast_or_null<LoadInst>(argv[3].V)) {
4078-
if (load->getPointerOperand() == ctx.slots[ctx.vaSlot].boxroot && ctx.argArray) {
4079-
Value *theF = boxed(ctx, argv[2]);
4080-
Value *nva = emit_n_varargs(ctx);
4077+
if (ctx.vaSlot > 0) {
4078+
if (LoadInst *load = dyn_cast_or_null<LoadInst>(argv[3].V)) {
4079+
if (load->getPointerOperand() == ctx.slots[ctx.vaSlot].boxroot && ctx.argArray) {
4080+
Value *theF = boxed(ctx, argv[2]);
4081+
Value *nva = emit_n_varargs(ctx);
40814082
#ifdef _P64
4082-
nva = ctx.builder.CreateTrunc(nva, getInt32Ty(ctx.builder.getContext()));
4083+
nva = ctx.builder.CreateTrunc(nva, getInt32Ty(ctx.builder.getContext()));
40834084
#endif
4084-
Value *theArgs = emit_ptrgep(ctx, ctx.argArray, ctx.nReqArgs * sizeof(jl_value_t*));
4085-
Value *r = ctx.builder.CreateCall(prepare_call(jlapplygeneric_func), { theF, theArgs, nva });
4086-
*ret = mark_julia_type(ctx, r, true, jl_any_type);
4087-
return true;
4085+
Value *theArgs = emit_ptrgep(ctx, ctx.argArray, ctx.nReqArgs * sizeof(jl_value_t*));
4086+
Value *r = ctx.builder.CreateCall(prepare_call(jlapplygeneric_func), { theF, theArgs, nva });
4087+
*ret = mark_julia_type(ctx, r, true, jl_any_type);
4088+
return true;
4089+
}
40884090
}
40894091
}
4092+
// optimization for _apply_iterate when there is one argument and it is a SimpleVector
4093+
const jl_cgval_t &arg = argv[3];
4094+
if (arg.typ == (jl_value_t*)jl_simplevector_type) {
4095+
Value *theF = boxed(ctx, argv[2]);
4096+
Value *svec_val = boxed(ctx, arg);
4097+
Value *svec_len = ctx.builder.CreateAlignedLoad(ctx.types().T_size, decay_derived(ctx, svec_val), Align(ctx.types().sizeof_ptr));
4098+
#ifdef _P64
4099+
svec_len = ctx.builder.CreateTrunc(svec_len, getInt32Ty(ctx.builder.getContext()));
4100+
#endif
4101+
Value *svec_data = emit_ptrgep(ctx, emit_pointer_from_objref(ctx, svec_val), ctx.types().sizeof_ptr);
4102+
OperandBundleDef OpBundle("jl_roots", svec_val);
4103+
Value *r = ctx.builder.CreateCall(prepare_call(jlapplygeneric_func), { theF, svec_data, svec_len }, OpBundle);
4104+
*ret = mark_julia_type(ctx, r, true, jl_any_type);
4105+
return true;
4106+
}
40904107
}
40914108

40924109
else if (f == BUILTIN(tuple)) {
@@ -4100,6 +4117,27 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f,
41004117
}
41014118
}
41024119

4120+
else if (f == BUILTIN(svec)) {
4121+
if (nargs == 0) {
4122+
*ret = mark_julia_const(ctx, (jl_value_t*)jl_emptysvec);
4123+
return true;
4124+
}
4125+
Value *svec = emit_allocobj(ctx, ctx.types().sizeof_ptr * (nargs + 1), ctx.builder.CreateIntToPtr(emit_tagfrom(ctx, jl_simplevector_type), ctx.types().T_pjlvalue), true, julia_alignment((jl_value_t*)jl_simplevector_type));
4126+
Value *svec_derived = decay_derived(ctx, svec);
4127+
ctx.builder.CreateAlignedStore(ConstantInt::get(ctx.types().T_size, nargs), svec_derived, Align(ctx.types().sizeof_ptr));
4128+
Value *svec_data = emit_ptrgep(ctx, svec_derived, ctx.types().sizeof_ptr);
4129+
ctx.builder.CreateMemSet(svec_data, ConstantInt::get(getInt8Ty(ctx.builder.getContext()), 0), ctx.types().sizeof_ptr * nargs, Align(ctx.types().sizeof_ptr));
4130+
for (size_t i = 0; i < nargs; i++) {
4131+
Value *elem = boxed(ctx, argv[i + 1]);
4132+
Value *elem_ptr = emit_ptrgep(ctx, svec_derived, ctx.types().sizeof_ptr * (i + 1));
4133+
auto *store = ctx.builder.CreateAlignedStore(elem, elem_ptr, Align(ctx.types().sizeof_ptr));
4134+
store->setOrdering(AtomicOrdering::Release);
4135+
emit_write_barrier(ctx, svec, elem);
4136+
}
4137+
*ret = mark_julia_type(ctx, svec, true, jl_simplevector_type);
4138+
return true;
4139+
}
4140+
41034141
else if (f == BUILTIN(throw) && nargs == 1) {
41044142
Value *arg1 = boxed(ctx, argv[1]);
41054143
raise_exception(ctx, arg1);
@@ -4599,6 +4637,20 @@ static bool emit_builtin_call(jl_codectx_t &ctx, jl_cgval_t *ret, jl_value_t *f,
45994637
return emit_f_opfield(ctx, ret, f, argv, nargs, nullptr);
46004638
}
46014639

4640+
else if (f == BUILTIN(_svec_len) && nargs == 1) {
4641+
const jl_cgval_t &obj = argv[1];
4642+
Value *len;
4643+
if (obj.constant && jl_is_svec(obj.constant)) {
4644+
len = ConstantInt::get(ctx.types().T_size, jl_svec_len(obj.constant));
4645+
}
4646+
else {
4647+
Value *svec_val = decay_derived(ctx, boxed(ctx, obj));
4648+
len = ctx.builder.CreateAlignedLoad(ctx.types().T_size, svec_val, Align(ctx.types().sizeof_ptr));
4649+
}
4650+
*ret = mark_julia_type(ctx, len, false, jl_long_type);
4651+
return true;
4652+
}
4653+
46024654
else if (f == BUILTIN(nfields) && nargs == 1) {
46034655
const jl_cgval_t &obj = argv[1];
46044656
if (ctx.vaSlot > 0) {

0 commit comments

Comments
 (0)