@@ -90,7 +90,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
90
90
struct ce_task * cet ;
91
91
struct scatterlist * sg ;
92
92
unsigned int todo , len , offset , ivsize ;
93
+ dma_addr_t addr_iv = 0 , addr_key = 0 ;
93
94
void * backup_iv = NULL ;
95
+ u32 common , sym ;
94
96
int flow , i ;
95
97
int nr_sgs = 0 ;
96
98
int nr_sgd = 0 ;
@@ -115,38 +117,41 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
115
117
cet = chan -> tl ;
116
118
memset (cet , 0 , sizeof (struct ce_task ));
117
119
118
- cet -> t_id = flow ;
119
- cet -> t_common_ctl = ce -> variant -> alg_cipher [algt -> ce_algo_id ];
120
- cet -> t_common_ctl |= rctx -> op_dir | CE_COMM_INT ;
121
- cet -> t_dlen = areq -> cryptlen / 4 ;
120
+ cet -> t_id = cpu_to_le32 ( flow ) ;
121
+ common = ce -> variant -> alg_cipher [algt -> ce_algo_id ];
122
+ common |= rctx -> op_dir | CE_COMM_INT ;
123
+ cet -> t_common_ctl = cpu_to_le32 ( common ) ;
122
124
/* CTS and recent CE (H6) need length in bytes, in word otherwise */
123
125
if (ce -> variant -> has_t_dlen_in_bytes )
124
- cet -> t_dlen = areq -> cryptlen ;
126
+ cet -> t_dlen = cpu_to_le32 (areq -> cryptlen );
127
+ else
128
+ cet -> t_dlen = cpu_to_le32 (areq -> cryptlen / 4 );
125
129
126
- cet -> t_sym_ctl = ce -> variant -> op_mode [algt -> ce_blockmode ];
130
+ sym = ce -> variant -> op_mode [algt -> ce_blockmode ];
127
131
len = op -> keylen ;
128
132
switch (len ) {
129
133
case 128 / 8 :
130
- cet -> t_sym_ctl |= CE_AES_128BITS ;
134
+ sym |= CE_AES_128BITS ;
131
135
break ;
132
136
case 192 / 8 :
133
- cet -> t_sym_ctl |= CE_AES_192BITS ;
137
+ sym |= CE_AES_192BITS ;
134
138
break ;
135
139
case 256 / 8 :
136
- cet -> t_sym_ctl |= CE_AES_256BITS ;
140
+ sym |= CE_AES_256BITS ;
137
141
break ;
138
142
}
139
143
144
+ cet -> t_sym_ctl = cpu_to_le32 (sym );
140
145
cet -> t_asym_ctl = 0 ;
141
146
142
147
chan -> op_mode = ce -> variant -> op_mode [algt -> ce_blockmode ];
143
148
chan -> op_dir = rctx -> op_dir ;
144
149
chan -> method = ce -> variant -> alg_cipher [algt -> ce_algo_id ];
145
150
chan -> keylen = op -> keylen ;
146
151
147
- cet -> t_key = dma_map_single (ce -> dev , op -> key , op -> keylen ,
148
- DMA_TO_DEVICE );
149
- if (dma_mapping_error (ce -> dev , cet -> t_key )) {
152
+ addr_key = dma_map_single (ce -> dev , op -> key , op -> keylen , DMA_TO_DEVICE );
153
+ cet -> t_key = cpu_to_le32 ( addr_key );
154
+ if (dma_mapping_error (ce -> dev , addr_key )) {
150
155
dev_err (ce -> dev , "Cannot DMA MAP KEY\n" );
151
156
err = - EFAULT ;
152
157
goto theend ;
@@ -171,9 +176,10 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
171
176
ivsize , 0 );
172
177
}
173
178
memcpy (chan -> bounce_iv , areq -> iv , ivsize );
174
- cet -> t_iv = dma_map_single (ce -> dev , chan -> bounce_iv ,
175
- chan -> ivlen , DMA_TO_DEVICE );
176
- if (dma_mapping_error (ce -> dev , cet -> t_iv )) {
179
+ addr_iv = dma_map_single (ce -> dev , chan -> bounce_iv , chan -> ivlen ,
180
+ DMA_TO_DEVICE );
181
+ cet -> t_iv = cpu_to_le32 (addr_iv );
182
+ if (dma_mapping_error (ce -> dev , addr_iv )) {
177
183
dev_err (ce -> dev , "Cannot DMA MAP IV\n" );
178
184
err = - ENOMEM ;
179
185
goto theend_iv ;
@@ -208,9 +214,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
208
214
209
215
len = areq -> cryptlen ;
210
216
for_each_sg (areq -> src , sg , nr_sgs , i ) {
211
- cet -> t_src [i ].addr = sg_dma_address (sg );
217
+ cet -> t_src [i ].addr = cpu_to_le32 ( sg_dma_address (sg ) );
212
218
todo = min (len , sg_dma_len (sg ));
213
- cet -> t_src [i ].len = todo / 4 ;
219
+ cet -> t_src [i ].len = cpu_to_le32 ( todo / 4 ) ;
214
220
dev_dbg (ce -> dev , "%s total=%u SG(%d %u off=%d) todo=%u\n" , __func__ ,
215
221
areq -> cryptlen , i , cet -> t_src [i ].len , sg -> offset , todo );
216
222
len -= todo ;
@@ -223,9 +229,9 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
223
229
224
230
len = areq -> cryptlen ;
225
231
for_each_sg (areq -> dst , sg , nr_sgd , i ) {
226
- cet -> t_dst [i ].addr = sg_dma_address (sg );
232
+ cet -> t_dst [i ].addr = cpu_to_le32 ( sg_dma_address (sg ) );
227
233
todo = min (len , sg_dma_len (sg ));
228
- cet -> t_dst [i ].len = todo / 4 ;
234
+ cet -> t_dst [i ].len = cpu_to_le32 ( todo / 4 ) ;
229
235
dev_dbg (ce -> dev , "%s total=%u SG(%d %u off=%d) todo=%u\n" , __func__ ,
230
236
areq -> cryptlen , i , cet -> t_dst [i ].len , sg -> offset , todo );
231
237
len -= todo ;
@@ -250,8 +256,8 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
250
256
251
257
theend_iv :
252
258
if (areq -> iv && ivsize > 0 ) {
253
- if (cet -> t_iv )
254
- dma_unmap_single (ce -> dev , cet -> t_iv , chan -> ivlen ,
259
+ if (addr_iv )
260
+ dma_unmap_single (ce -> dev , addr_iv , chan -> ivlen ,
255
261
DMA_TO_DEVICE );
256
262
offset = areq -> cryptlen - ivsize ;
257
263
if (rctx -> op_dir & CE_DECRYPTION ) {
@@ -265,7 +271,7 @@ static int sun8i_ce_cipher(struct skcipher_request *areq)
265
271
}
266
272
267
273
theend_key :
268
- dma_unmap_single (ce -> dev , cet -> t_key , op -> keylen , DMA_TO_DEVICE );
274
+ dma_unmap_single (ce -> dev , addr_key , op -> keylen , DMA_TO_DEVICE );
269
275
270
276
theend :
271
277
return err ;
0 commit comments