@@ -9,28 +9,27 @@ const MAX_RET_IN_REGS_SIZE: u64 = 2 * 32;
9
9
10
10
fn classify_ret_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 ) {
11
11
// The rules for return and argument types are the same, so defer to
12
- // classifyArgumentType.
13
- classify_arg_ty ( arg, xlen, & mut 2 ) ; // two as max return size
12
+ // classify_arg_ty.
13
+ let mut remaining_gpr = 2 ;
14
+ let fixed = true ;
15
+ classify_arg_ty ( arg, xlen, fixed, & mut remaining_gpr) ;
14
16
}
15
17
16
- fn classify_arg_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 , remaining_gpr : & mut u64 ) {
17
- // Determine the number of GPRs needed to pass the current argument
18
- // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
19
- // register pairs, so may consume 3 registers.
18
+ fn classify_arg_ty < Ty > ( arg : & mut ArgAbi < ' _ , Ty > , xlen : u64 , fixed : bool , remaining_gpr : & mut u64 ) {
19
+ assert ! ( * remaining_gpr <= NUM_ARG_GPR , "Arg GPR tracking underflow" ) ;
20
20
21
21
let arg_size = arg. layout . size ;
22
- if arg_size. bits ( ) > MAX_ARG_IN_REGS_SIZE {
23
- arg. make_indirect ( ) ;
24
- return ;
25
- }
22
+ let alignment = arg. layout . details . align . abi ;
26
23
27
- let alignment = arg. layout . align . abi ;
28
- let mut required_gpr = 1u64 ; // at least one per arg
24
+ // Determine the number of GPRs needed to pass the current argument
25
+ // according to the ABI. 2*XLen-aligned varargs are passed in "aligned"
26
+ // register pairs, so may consume 3 registers.
27
+ let mut required_gpr = 1u64 ;
29
28
30
- if alignment. bits ( ) == 2 * xlen {
29
+ if !fixed && alignment. bits ( ) == 2 * xlen {
31
30
required_gpr = 2 + ( * remaining_gpr % 2 ) ;
32
31
} else if arg_size. bits ( ) > xlen && arg_size. bits ( ) <= MAX_ARG_IN_REGS_SIZE {
33
- required_gpr = ( arg_size. bits ( ) + ( xlen - 1 ) ) / xlen;
32
+ required_gpr = ( arg_size. bits ( ) + xlen - 1 ) / xlen;
34
33
}
35
34
36
35
let mut stack_required = false ;
@@ -40,63 +39,53 @@ fn classify_arg_ty<Ty>(arg: &mut ArgAbi<'_, Ty>, xlen: u64, remaining_gpr: &mut
40
39
}
41
40
* remaining_gpr -= required_gpr;
42
41
43
- // if a value can fit in a reg and the
44
- // stack is not required, extend
45
42
if !arg. layout . is_aggregate ( ) {
46
- // non-aggregate types
43
+ // All integral types are promoted to XLen width, unless passed on the
44
+ // stack.
47
45
if arg_size. bits ( ) < xlen && !stack_required {
48
46
arg. extend_integer_width_to ( xlen) ;
47
+ return ;
49
48
}
50
- } else if arg_size. bits ( ) as u64 <= MAX_ARG_IN_REGS_SIZE {
51
- // aggregate types
52
- // Aggregates which are <= 4*32 will be passed in registers if possible,
53
- // so coerce to integers.
54
49
50
+ return ;
51
+ }
52
+
53
+ // Aggregates which are <= 4 * 32 will be passed in registers if possible,
54
+ // so coerce to integers.
55
+ if size. bits ( ) as u64 <= MAX_ARG_IN_REGS_SIZE {
55
56
// Use a single XLen int if possible, 2*XLen if 2*XLen alignment is
56
57
// required, and a 2-element XLen array if only XLen alignment is
57
58
// required.
58
- // if alignment == 2 * xlen {
59
- // arg.extend_integer_width_to(xlen * 2);
60
- // } else {
61
- // arg.extend_integer_width_to(arg_size + (xlen - 1) / xlen);
62
- // }
63
- if alignment. bits ( ) == 2 * xlen {
59
+ if arg_size. bits ( ) <= xlen {
60
+ arg. cast_to ( Uniform { unit : Reg :: i32 ( ) , total : arg_size } ) ;
61
+ return ;
62
+ } else if alignment. bits ( ) == 2 * xlen {
64
63
arg. cast_to ( Uniform { unit : Reg :: i64 ( ) , total : arg_size } ) ;
64
+ return ;
65
65
} else {
66
- //FIXME array type - this should be a homogenous array type
67
- // arg.extend_integer_width_to(arg_size + (xlen - 1) / xlen) ;
66
+ arg . extend_integer_width_to ( ( arg_size . bits ( ) + xlen - 1 ) / xlen ) ;
67
+ return ;
68
68
}
69
- } else {
70
- // if we get here the stack is required
71
- assert ! ( stack_required) ;
72
- arg. make_indirect ( ) ;
73
69
}
74
70
75
- // if arg_size as u64 <= MAX_ARG_IN_REGS_SIZE {
76
- // let align = arg.layout.align.abi.bytes();
77
- // let total = arg.layout.size;
78
- // arg.cast_to(Uniform {
79
- // unit: if align <= 4 { Reg::i32() } else { Reg::i64() },
80
- // total
81
- // });
82
- // return;
83
- // }
71
+ arg. make_indirect ( ) ;
84
72
}
85
73
86
- pub fn compute_abi_info < Ty > ( fabi : & mut FnAbi < ' _ , Ty > , xlen : u64 ) {
87
- if !fabi . ret . is_ignore ( ) {
88
- classify_ret_ty ( & mut fabi . ret , xlen) ;
74
+ pub fn compute_abi_info < Ty > ( fty : & mut FnAbi < ' _ , Ty > , xlen : u64 ) {
75
+ if !fty . ret . is_ignore ( ) {
76
+ classify_ret_ty ( & mut fty . ret , xlen) ;
89
77
}
90
78
91
79
let return_indirect =
92
- fabi . ret . layout . size . bits ( ) > MAX_RET_IN_REGS_SIZE || fabi . ret . is_indirect ( ) ;
80
+ fty . ret . is_indirect ( ) || fty . ret . layout . size . bits ( ) > MAX_RET_IN_REGS_SIZE ;
93
81
94
82
let mut remaining_gpr = if return_indirect { NUM_ARG_GPR - 1 } else { NUM_ARG_GPR } ;
95
83
96
- for arg in & mut fabi . args {
84
+ for arg in & mut fty . args {
97
85
if arg. is_ignore ( ) {
98
86
continue ;
99
87
}
100
- classify_arg_ty ( arg, xlen, & mut remaining_gpr) ;
88
+ let fixed = true ;
89
+ classify_arg_ty ( arg, xlen, fixed, & mut remaining_gpr) ;
101
90
}
102
91
}
0 commit comments