Skip to content

Commit c626910

Browse files
ebiggersherbertx
authored andcommitted
crypto: ahash - remove support for nonzero alignmask
Currently, the ahash API checks the alignment of all key and result buffers against the algorithm's declared alignmask, and for any unaligned buffers it falls back to manually aligned temporary buffers. This is virtually useless, however. First, since it does not apply to the message, its effect is much more limited than e.g. is the case for the alignmask for "skcipher". Second, the key and result buffers are given as virtual addresses and cannot (in general) be DMA'ed into, so drivers end up having to copy to/from them in software anyway. As a result it's easy to use memcpy() or the unaligned access helpers. The crypto_hash_walk_*() helper functions do use the alignmask to align the message. But with one exception those are only used for shash algorithms being exposed via the ahash API, not for native ahashes, and aligning the message is not required in this case, especially now that alignmask support has been removed from shash. The exception is the n2_core driver, which doesn't set an alignmask. In any case, no ahash algorithms actually set a nonzero alignmask anymore. Therefore, remove support for it from ahash. The benefit is that all the code to handle "misaligned" buffers in the ahash API goes away, reducing the overhead of the ahash API. This follows the same change that was made to shash. Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent 54eea8e commit c626910

File tree

5 files changed

+28
-132
lines changed

5 files changed

+28
-132
lines changed

Documentation/crypto/devel-algos.rst

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -235,6 +235,4 @@ Specifics Of Asynchronous HASH Transformation
235235

236236
Some of the drivers will want to use the Generic ScatterWalk in case the
237237
implementation needs to be fed separate chunks of the scatterlist which
238-
contains the input data. The buffer containing the resulting hash will
239-
always be properly aligned to .cra_alignmask so there is no need to
240-
worry about this.
238+
contains the input data.

crypto/ahash.c

Lines changed: 8 additions & 109 deletions
Original file line numberDiff line numberDiff line change
@@ -35,21 +35,12 @@ struct ahash_request_priv {
3535

3636
static int hash_walk_next(struct crypto_hash_walk *walk)
3737
{
38-
unsigned int alignmask = walk->alignmask;
3938
unsigned int offset = walk->offset;
4039
unsigned int nbytes = min(walk->entrylen,
4140
((unsigned int)(PAGE_SIZE)) - offset);
4241

4342
walk->data = kmap_local_page(walk->pg);
4443
walk->data += offset;
45-
46-
if (offset & alignmask) {
47-
unsigned int unaligned = alignmask + 1 - (offset & alignmask);
48-
49-
if (nbytes > unaligned)
50-
nbytes = unaligned;
51-
}
52-
5344
walk->entrylen -= nbytes;
5445
return nbytes;
5546
}
@@ -73,23 +64,8 @@ static int hash_walk_new_entry(struct crypto_hash_walk *walk)
7364

7465
int crypto_hash_walk_done(struct crypto_hash_walk *walk, int err)
7566
{
76-
unsigned int alignmask = walk->alignmask;
77-
7867
walk->data -= walk->offset;
7968

80-
if (walk->entrylen && (walk->offset & alignmask) && !err) {
81-
unsigned int nbytes;
82-
83-
walk->offset = ALIGN(walk->offset, alignmask + 1);
84-
nbytes = min(walk->entrylen,
85-
(unsigned int)(PAGE_SIZE - walk->offset));
86-
if (nbytes) {
87-
walk->entrylen -= nbytes;
88-
walk->data += walk->offset;
89-
return nbytes;
90-
}
91-
}
92-
9369
kunmap_local(walk->data);
9470
crypto_yield(walk->flags);
9571

@@ -121,34 +97,13 @@ int crypto_hash_walk_first(struct ahash_request *req,
12197
return 0;
12298
}
12399

124-
walk->alignmask = crypto_ahash_alignmask(crypto_ahash_reqtfm(req));
125100
walk->sg = req->src;
126101
walk->flags = req->base.flags;
127102

128103
return hash_walk_new_entry(walk);
129104
}
130105
EXPORT_SYMBOL_GPL(crypto_hash_walk_first);
131106

132-
static int ahash_setkey_unaligned(struct crypto_ahash *tfm, const u8 *key,
133-
unsigned int keylen)
134-
{
135-
unsigned long alignmask = crypto_ahash_alignmask(tfm);
136-
int ret;
137-
u8 *buffer, *alignbuffer;
138-
unsigned long absize;
139-
140-
absize = keylen + alignmask;
141-
buffer = kmalloc(absize, GFP_KERNEL);
142-
if (!buffer)
143-
return -ENOMEM;
144-
145-
alignbuffer = (u8 *)ALIGN((unsigned long)buffer, alignmask + 1);
146-
memcpy(alignbuffer, key, keylen);
147-
ret = tfm->setkey(tfm, alignbuffer, keylen);
148-
kfree_sensitive(buffer);
149-
return ret;
150-
}
151-
152107
static int ahash_nosetkey(struct crypto_ahash *tfm, const u8 *key,
153108
unsigned int keylen)
154109
{
@@ -167,13 +122,7 @@ static void ahash_set_needkey(struct crypto_ahash *tfm)
167122
int crypto_ahash_setkey(struct crypto_ahash *tfm, const u8 *key,
168123
unsigned int keylen)
169124
{
170-
unsigned long alignmask = crypto_ahash_alignmask(tfm);
171-
int err;
172-
173-
if ((unsigned long)key & alignmask)
174-
err = ahash_setkey_unaligned(tfm, key, keylen);
175-
else
176-
err = tfm->setkey(tfm, key, keylen);
125+
int err = tfm->setkey(tfm, key, keylen);
177126

178127
if (unlikely(err)) {
179128
ahash_set_needkey(tfm);
@@ -189,7 +138,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
189138
bool has_state)
190139
{
191140
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
192-
unsigned long alignmask = crypto_ahash_alignmask(tfm);
193141
unsigned int ds = crypto_ahash_digestsize(tfm);
194142
struct ahash_request *subreq;
195143
unsigned int subreq_size;
@@ -203,7 +151,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
203151
reqsize = ALIGN(reqsize, crypto_tfm_ctx_alignment());
204152
subreq_size += reqsize;
205153
subreq_size += ds;
206-
subreq_size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
207154

208155
flags = ahash_request_flags(req);
209156
gfp = (flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? GFP_KERNEL : GFP_ATOMIC;
@@ -215,7 +162,6 @@ static int ahash_save_req(struct ahash_request *req, crypto_completion_t cplt,
215162
ahash_request_set_callback(subreq, flags, cplt, req);
216163

217164
result = (u8 *)(subreq + 1) + reqsize;
218-
result = PTR_ALIGN(result, alignmask + 1);
219165

220166
ahash_request_set_crypt(subreq, req->src, result, req->nbytes);
221167

@@ -251,56 +197,6 @@ static void ahash_restore_req(struct ahash_request *req, int err)
251197
kfree_sensitive(subreq);
252198
}
253199

254-
static void ahash_op_unaligned_done(void *data, int err)
255-
{
256-
struct ahash_request *areq = data;
257-
258-
if (err == -EINPROGRESS)
259-
goto out;
260-
261-
/* First copy req->result into req->priv.result */
262-
ahash_restore_req(areq, err);
263-
264-
out:
265-
/* Complete the ORIGINAL request. */
266-
ahash_request_complete(areq, err);
267-
}
268-
269-
static int ahash_op_unaligned(struct ahash_request *req,
270-
int (*op)(struct ahash_request *),
271-
bool has_state)
272-
{
273-
int err;
274-
275-
err = ahash_save_req(req, ahash_op_unaligned_done, has_state);
276-
if (err)
277-
return err;
278-
279-
err = op(req->priv);
280-
if (err == -EINPROGRESS || err == -EBUSY)
281-
return err;
282-
283-
ahash_restore_req(req, err);
284-
285-
return err;
286-
}
287-
288-
static int crypto_ahash_op(struct ahash_request *req,
289-
int (*op)(struct ahash_request *),
290-
bool has_state)
291-
{
292-
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
293-
unsigned long alignmask = crypto_ahash_alignmask(tfm);
294-
int err;
295-
296-
if ((unsigned long)req->result & alignmask)
297-
err = ahash_op_unaligned(req, op, has_state);
298-
else
299-
err = op(req);
300-
301-
return crypto_hash_errstat(crypto_hash_alg_common(tfm), err);
302-
}
303-
304200
int crypto_ahash_final(struct ahash_request *req)
305201
{
306202
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
@@ -309,7 +205,7 @@ int crypto_ahash_final(struct ahash_request *req)
309205
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
310206
atomic64_inc(&hash_get_stat(alg)->hash_cnt);
311207

312-
return crypto_ahash_op(req, tfm->final, true);
208+
return crypto_hash_errstat(alg, tfm->final(req));
313209
}
314210
EXPORT_SYMBOL_GPL(crypto_ahash_final);
315211

@@ -325,14 +221,15 @@ int crypto_ahash_finup(struct ahash_request *req)
325221
atomic64_add(req->nbytes, &istat->hash_tlen);
326222
}
327223

328-
return crypto_ahash_op(req, tfm->finup, true);
224+
return crypto_hash_errstat(alg, tfm->finup(req));
329225
}
330226
EXPORT_SYMBOL_GPL(crypto_ahash_finup);
331227

332228
int crypto_ahash_digest(struct ahash_request *req)
333229
{
334230
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
335231
struct hash_alg_common *alg = crypto_hash_alg_common(tfm);
232+
int err;
336233

337234
if (IS_ENABLED(CONFIG_CRYPTO_STATS)) {
338235
struct crypto_istat_hash *istat = hash_get_stat(alg);
@@ -342,9 +239,11 @@ int crypto_ahash_digest(struct ahash_request *req)
342239
}
343240

344241
if (crypto_ahash_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
345-
return crypto_hash_errstat(alg, -ENOKEY);
242+
err = -ENOKEY;
243+
else
244+
err = tfm->digest(req);
346245

347-
return crypto_ahash_op(req, tfm->digest, false);
246+
return crypto_hash_errstat(alg, err);
348247
}
349248
EXPORT_SYMBOL_GPL(crypto_ahash_digest);
350249

crypto/shash.c

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -541,6 +541,10 @@ int hash_prepare_alg(struct hash_alg_common *alg)
541541
if (alg->digestsize > HASH_MAX_DIGESTSIZE)
542542
return -EINVAL;
543543

544+
/* alignmask is not useful for hashes, so it is not supported. */
545+
if (base->cra_alignmask)
546+
return -EINVAL;
547+
544548
base->cra_flags &= ~CRYPTO_ALG_TYPE_MASK;
545549

546550
if (IS_ENABLED(CONFIG_CRYPTO_STATS))
@@ -557,10 +561,6 @@ static int shash_prepare_alg(struct shash_alg *alg)
557561
if (alg->descsize > HASH_MAX_DESCSIZE)
558562
return -EINVAL;
559563

560-
/* alignmask is not useful for shash, so it is not supported. */
561-
if (base->cra_alignmask)
562-
return -EINVAL;
563-
564564
if ((alg->export && !alg->import) || (alg->import && !alg->export))
565565
return -EINVAL;
566566

include/crypto/internal/hash.h

Lines changed: 1 addition & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,15 +18,13 @@ struct crypto_hash_walk {
1818
char *data;
1919

2020
unsigned int offset;
21-
unsigned int alignmask;
21+
unsigned int flags;
2222

2323
struct page *pg;
2424
unsigned int entrylen;
2525

2626
unsigned int total;
2727
struct scatterlist *sg;
28-
29-
unsigned int flags;
3028
};
3129

3230
struct ahash_instance {

include/linux/crypto.h

Lines changed: 14 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -110,7 +110,6 @@
110110
* crypto_aead_walksize() (with the remainder going at the end), no chunk
111111
* can cross a page boundary or a scatterlist element boundary.
112112
* ahash:
113-
* - The result buffer must be aligned to the algorithm's alignmask.
114113
* - crypto_ahash_finup() must not be used unless the algorithm implements
115114
* ->finup() natively.
116115
*/
@@ -278,18 +277,20 @@ struct compress_alg {
278277
* @cra_ctxsize: Size of the operational context of the transformation. This
279278
* value informs the kernel crypto API about the memory size
280279
* needed to be allocated for the transformation context.
281-
* @cra_alignmask: Alignment mask for the input and output data buffer. The data
282-
* buffer containing the input data for the algorithm must be
283-
* aligned to this alignment mask. The data buffer for the
284-
* output data must be aligned to this alignment mask. Note that
285-
* the Crypto API will do the re-alignment in software, but
286-
* only under special conditions and there is a performance hit.
287-
* The re-alignment happens at these occasions for different
288-
* @cra_u types: cipher -- For both input data and output data
289-
* buffer; ahash -- For output hash destination buf; shash --
290-
* For output hash destination buf.
291-
* This is needed on hardware which is flawed by design and
292-
* cannot pick data from arbitrary addresses.
280+
* @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
281+
* 1 less than the alignment, in bytes, that the algorithm
282+
* implementation requires for input and output buffers. When
283+
* the crypto API is invoked with buffers that are not aligned
284+
* to this alignment, the crypto API automatically utilizes
285+
* appropriately aligned temporary buffers to comply with what
286+
* the algorithm needs. (For scatterlists this happens only if
287+
* the algorithm uses the skcipher_walk helper functions.) This
288+
* misalignment handling carries a performance penalty, so it is
289+
* preferred that algorithms do not set a nonzero alignmask.
290+
* Also, crypto API users may wish to allocate buffers aligned
291+
* to the alignmask of the algorithm being used, in order to
292+
* avoid the API having to realign them. Note: the alignmask is
293+
* not supported for hash algorithms and is always 0 for them.
293294
* @cra_priority: Priority of this transformation implementation. In case
294295
* multiple transformations with same @cra_name are available to
295296
* the Crypto API, the kernel will use the one with highest

0 commit comments

Comments
 (0)