Skip to content

Commit 8b13c22

Browse files
ebiggersherbertx
authored andcommitted
crypto: skcipher - optimize initializing skcipher_walk fields
The helper functions like crypto_skcipher_blocksize() take in a pointer to a tfm object, but they actually return properties of the algorithm. As the Linux kernel is compiled with -fno-strict-aliasing, the compiler has to assume that the writes to struct skcipher_walk could clobber the tfm's pointer to its algorithm. Thus it gets repeatedly reloaded in the generated code. Therefore, replace the use of these helper functions with staightforward accesses to the struct fields. Note that while *users* of the skcipher and aead APIs are supposed to use the helper functions, this particular code is part of the API *implementation* in crypto/skcipher.c, which already accesses the algorithm struct directly in many cases. So there is no reason to prefer the helper functions here. Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent f248945 commit 8b13c22

File tree

1 file changed

+20
-10
lines changed

1 file changed

+20
-10
lines changed

crypto/skcipher.c

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -308,8 +308,8 @@ static int skcipher_walk_first(struct skcipher_walk *walk)
308308
int skcipher_walk_virt(struct skcipher_walk *walk,
309309
struct skcipher_request *req, bool atomic)
310310
{
311-
struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
312-
struct skcipher_alg *alg = crypto_skcipher_alg(tfm);
311+
const struct skcipher_alg *alg =
312+
crypto_skcipher_alg(crypto_skcipher_reqtfm(req));
313313

314314
might_sleep_if(req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP);
315315

@@ -328,9 +328,14 @@ int skcipher_walk_virt(struct skcipher_walk *walk,
328328
scatterwalk_start(&walk->in, req->src);
329329
scatterwalk_start(&walk->out, req->dst);
330330

331-
walk->blocksize = crypto_skcipher_blocksize(tfm);
332-
walk->ivsize = crypto_skcipher_ivsize(tfm);
333-
walk->alignmask = crypto_skcipher_alignmask(tfm);
331+
/*
332+
* Accessing 'alg' directly generates better code than using the
333+
* crypto_skcipher_blocksize() and similar helper functions here, as it
334+
* prevents the algorithm pointer from being repeatedly reloaded.
335+
*/
336+
walk->blocksize = alg->base.cra_blocksize;
337+
walk->ivsize = alg->co.ivsize;
338+
walk->alignmask = alg->base.cra_alignmask;
334339

335340
if (alg->co.base.cra_type != &crypto_skcipher_type)
336341
walk->stride = alg->co.chunksize;
@@ -344,7 +349,7 @@ EXPORT_SYMBOL_GPL(skcipher_walk_virt);
344349
static int skcipher_walk_aead_common(struct skcipher_walk *walk,
345350
struct aead_request *req, bool atomic)
346351
{
347-
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
352+
const struct aead_alg *alg = crypto_aead_alg(crypto_aead_reqtfm(req));
348353

349354
walk->nbytes = 0;
350355
walk->iv = req->iv;
@@ -366,10 +371,15 @@ static int skcipher_walk_aead_common(struct skcipher_walk *walk,
366371
scatterwalk_done(&walk->in, 0, walk->total);
367372
scatterwalk_done(&walk->out, 0, walk->total);
368373

369-
walk->blocksize = crypto_aead_blocksize(tfm);
370-
walk->stride = crypto_aead_chunksize(tfm);
371-
walk->ivsize = crypto_aead_ivsize(tfm);
372-
walk->alignmask = crypto_aead_alignmask(tfm);
374+
/*
375+
* Accessing 'alg' directly generates better code than using the
376+
* crypto_aead_blocksize() and similar helper functions here, as it
377+
* prevents the algorithm pointer from being repeatedly reloaded.
378+
*/
379+
walk->blocksize = alg->base.cra_blocksize;
380+
walk->stride = alg->chunksize;
381+
walk->ivsize = alg->ivsize;
382+
walk->alignmask = alg->base.cra_alignmask;
373383

374384
return skcipher_walk_first(walk);
375385
}

0 commit comments

Comments
 (0)