Skip to content

Commit 7271b33

Browse files
committed
crypto: ghash-clmulni - Fix cryptd reordering
This patch fixes an old bug where requests can be reordered because some are processed by cryptd while others are processed directly in softirq context. The fix is to always postpone to cryptd if there are currently requests outstanding from the same tfm. This patch also removes the redundant use of cryptd in the async init function as init never touches the FPU. Signed-off-by: Herbert Xu <[email protected]>
1 parent 88407a3 commit 7271b33

File tree

1 file changed

+17
-23
lines changed

1 file changed

+17
-23
lines changed

arch/x86/crypto/ghash-clmulni-intel_glue.c

Lines changed: 17 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -168,30 +168,23 @@ static int ghash_async_init(struct ahash_request *req)
168168
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
169169
struct ahash_request *cryptd_req = ahash_request_ctx(req);
170170
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
171+
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
172+
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
171173

172-
if (!irq_fpu_usable()) {
173-
memcpy(cryptd_req, req, sizeof(*req));
174-
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
175-
return crypto_ahash_init(cryptd_req);
176-
} else {
177-
struct shash_desc *desc = cryptd_shash_desc(cryptd_req);
178-
struct crypto_shash *child = cryptd_ahash_child(cryptd_tfm);
179-
180-
desc->tfm = child;
181-
desc->flags = req->base.flags;
182-
return crypto_shash_init(desc);
183-
}
174+
desc->tfm = child;
175+
desc->flags = req->base.flags;
176+
return crypto_shash_init(desc);
184177
}
185178

186179
static int ghash_async_update(struct ahash_request *req)
187180
{
188181
struct ahash_request *cryptd_req = ahash_request_ctx(req);
182+
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
183+
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
184+
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
189185

190-
if (!irq_fpu_usable()) {
191-
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
192-
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
193-
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
194-
186+
if (!irq_fpu_usable() ||
187+
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
195188
memcpy(cryptd_req, req, sizeof(*req));
196189
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
197190
return crypto_ahash_update(cryptd_req);
@@ -204,12 +197,12 @@ static int ghash_async_update(struct ahash_request *req)
204197
static int ghash_async_final(struct ahash_request *req)
205198
{
206199
struct ahash_request *cryptd_req = ahash_request_ctx(req);
200+
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
201+
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
202+
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
207203

208-
if (!irq_fpu_usable()) {
209-
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
210-
struct ghash_async_ctx *ctx = crypto_ahash_ctx(tfm);
211-
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
212-
204+
if (!irq_fpu_usable() ||
205+
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
213206
memcpy(cryptd_req, req, sizeof(*req));
214207
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
215208
return crypto_ahash_final(cryptd_req);
@@ -249,7 +242,8 @@ static int ghash_async_digest(struct ahash_request *req)
249242
struct ahash_request *cryptd_req = ahash_request_ctx(req);
250243
struct cryptd_ahash *cryptd_tfm = ctx->cryptd_tfm;
251244

252-
if (!irq_fpu_usable()) {
245+
if (!irq_fpu_usable() ||
246+
(in_atomic() && cryptd_ahash_queued(cryptd_tfm))) {
253247
memcpy(cryptd_req, req, sizeof(*req));
254248
ahash_request_set_tfm(cryptd_req, &cryptd_tfm->base);
255249
return crypto_ahash_digest(cryptd_req);

0 commit comments

Comments
 (0)