Skip to content

Commit 807c801

Browse files
ebiggersherbertx
authored andcommitted
crypto: skcipher - remove unnecessary page alignment of bounce buffer
In the slow path of skcipher_walk where it uses a slab bounce buffer for the data and/or IV, do not bother to avoid crossing a page boundary in the part(s) of this buffer that are used, and do not bother to allocate extra space in the buffer for that purpose. The buffer is accessed only by virtual address, so pages are irrelevant for it. This logic may have been present due to the physical address support in skcipher_walk, but that has now been removed. Or it may have been present to be consistent with the fast path that currently does not hand back addresses that span pages, but that behavior is a side effect of the pages being "mapped" one by one and is not actually a requirement. Signed-off-by: Eric Biggers <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent e71778c commit 807c801

File tree

1 file changed

+15
-47
lines changed

1 file changed

+15
-47
lines changed

crypto/skcipher.c

Lines changed: 15 additions & 47 deletions
Original file line numberDiff line numberDiff line change
@@ -63,16 +63,6 @@ static inline gfp_t skcipher_walk_gfp(struct skcipher_walk *walk)
6363
return walk->flags & SKCIPHER_WALK_SLEEP ? GFP_KERNEL : GFP_ATOMIC;
6464
}
6565

66-
/* Get a spot of the specified length that does not straddle a page.
67-
* The caller needs to ensure that there is enough space for this operation.
68-
*/
69-
static inline u8 *skcipher_get_spot(u8 *start, unsigned int len)
70-
{
71-
u8 *end_page = (u8 *)(((unsigned long)(start + len - 1)) & PAGE_MASK);
72-
73-
return max(start, end_page);
74-
}
75-
7666
static inline struct skcipher_alg *__crypto_skcipher_alg(
7767
struct crypto_alg *alg)
7868
{
@@ -81,10 +71,8 @@ static inline struct skcipher_alg *__crypto_skcipher_alg(
8171

8272
static int skcipher_done_slow(struct skcipher_walk *walk, unsigned int bsize)
8373
{
84-
u8 *addr;
74+
u8 *addr = PTR_ALIGN(walk->buffer, walk->alignmask + 1);
8575

86-
addr = (u8 *)ALIGN((unsigned long)walk->buffer, walk->alignmask + 1);
87-
addr = skcipher_get_spot(addr, bsize);
8876
scatterwalk_copychunks(addr, &walk->out, bsize, 1);
8977
return 0;
9078
}
@@ -183,33 +171,22 @@ EXPORT_SYMBOL_GPL(skcipher_walk_done);
183171
static int skcipher_next_slow(struct skcipher_walk *walk, unsigned int bsize)
184172
{
185173
unsigned alignmask = walk->alignmask;
186-
unsigned a;
187174
unsigned n;
188175
u8 *buffer;
189176

190177
if (!walk->buffer)
191178
walk->buffer = walk->page;
192179
buffer = walk->buffer;
193-
if (buffer)
194-
goto ok;
195-
196-
/* Start with the minimum alignment of kmalloc. */
197-
a = crypto_tfm_ctx_alignment() - 1;
198-
n = bsize;
199-
200-
/* Minimum size to align buffer by alignmask. */
201-
n += alignmask & ~a;
202-
203-
/* Minimum size to ensure buffer does not straddle a page. */
204-
n += (bsize - 1) & ~(alignmask | a);
205-
206-
buffer = kzalloc(n, skcipher_walk_gfp(walk));
207-
if (!buffer)
208-
return skcipher_walk_done(walk, -ENOMEM);
209-
walk->buffer = buffer;
210-
ok:
180+
if (!buffer) {
181+
/* Min size for a buffer of bsize bytes aligned to alignmask */
182+
n = bsize + (alignmask & ~(crypto_tfm_ctx_alignment() - 1));
183+
184+
buffer = kzalloc(n, skcipher_walk_gfp(walk));
185+
if (!buffer)
186+
return skcipher_walk_done(walk, -ENOMEM);
187+
walk->buffer = buffer;
188+
}
211189
walk->dst.virt.addr = PTR_ALIGN(buffer, alignmask + 1);
212-
walk->dst.virt.addr = skcipher_get_spot(walk->dst.virt.addr, bsize);
213190
walk->src.virt.addr = walk->dst.virt.addr;
214191

215192
scatterwalk_copychunks(walk->src.virt.addr, &walk->in, bsize, 0);
@@ -296,30 +273,21 @@ static int skcipher_walk_next(struct skcipher_walk *walk)
296273

297274
static int skcipher_copy_iv(struct skcipher_walk *walk)
298275
{
299-
unsigned a = crypto_tfm_ctx_alignment() - 1;
300276
unsigned alignmask = walk->alignmask;
301277
unsigned ivsize = walk->ivsize;
302-
unsigned bs = walk->stride;
303-
unsigned aligned_bs;
278+
unsigned aligned_stride = ALIGN(walk->stride, alignmask + 1);
304279
unsigned size;
305280
u8 *iv;
306281

307-
aligned_bs = ALIGN(bs, alignmask + 1);
308-
309-
/* Minimum size to align buffer by alignmask. */
310-
size = alignmask & ~a;
311-
312-
size += aligned_bs + ivsize;
313-
314-
/* Minimum size to ensure buffer does not straddle a page. */
315-
size += (bs - 1) & ~(alignmask | a);
282+
/* Min size for a buffer of stride + ivsize, aligned to alignmask */
283+
size = aligned_stride + ivsize +
284+
(alignmask & ~(crypto_tfm_ctx_alignment() - 1));
316285

317286
walk->buffer = kmalloc(size, skcipher_walk_gfp(walk));
318287
if (!walk->buffer)
319288
return -ENOMEM;
320289

321-
iv = PTR_ALIGN(walk->buffer, alignmask + 1);
322-
iv = skcipher_get_spot(iv, bs) + aligned_bs;
290+
iv = PTR_ALIGN(walk->buffer, alignmask + 1) + aligned_stride;
323291

324292
walk->iv = memcpy(iv, walk->iv, walk->ivsize);
325293
return 0;

0 commit comments

Comments
 (0)