Skip to content

Commit 331bf73

Browse files
Megha Deyherbertx
authored andcommitted
crypto: sha1-mb - async implementation for sha1-mb
Herbert wants the sha1-mb algorithm to have an async implementation: https://lkml.org/lkml/2016/4/5/286. Currently, sha1-mb uses an async interface for the outer algorithm and a sync interface for the inner algorithm. This patch introduces a async interface for even the inner algorithm. Signed-off-by: Megha Dey <[email protected]> Signed-off-by: Tim Chen <[email protected]> Signed-off-by: Herbert Xu <[email protected]>
1 parent 820573e commit 331bf73

File tree

4 files changed

+165
-169
lines changed

4 files changed

+165
-169
lines changed

arch/x86/crypto/sha-mb/sha1_mb.c

Lines changed: 101 additions & 81 deletions
Original file line numberDiff line numberDiff line change
@@ -80,10 +80,10 @@ struct sha1_mb_ctx {
8080
static inline struct mcryptd_hash_request_ctx
8181
*cast_hash_to_mcryptd_ctx(struct sha1_hash_ctx *hash_ctx)
8282
{
83-
struct shash_desc *desc;
83+
struct ahash_request *areq;
8484

85-
desc = container_of((void *) hash_ctx, struct shash_desc, __ctx);
86-
return container_of(desc, struct mcryptd_hash_request_ctx, desc);
85+
areq = container_of((void *) hash_ctx, struct ahash_request, __ctx);
86+
return container_of(areq, struct mcryptd_hash_request_ctx, areq);
8787
}
8888

8989
static inline struct ahash_request
@@ -93,7 +93,7 @@ static inline struct ahash_request
9393
}
9494

9595
static void req_ctx_init(struct mcryptd_hash_request_ctx *rctx,
96-
struct shash_desc *desc)
96+
struct ahash_request *areq)
9797
{
9898
rctx->flag = HASH_UPDATE;
9999
}
@@ -375,9 +375,9 @@ static struct sha1_hash_ctx *sha1_ctx_mgr_flush(struct sha1_ctx_mgr *mgr)
375375
}
376376
}
377377

378-
static int sha1_mb_init(struct shash_desc *desc)
378+
static int sha1_mb_init(struct ahash_request *areq)
379379
{
380-
struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
380+
struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
381381

382382
hash_ctx_init(sctx);
383383
sctx->job.result_digest[0] = SHA1_H0;
@@ -395,7 +395,7 @@ static int sha1_mb_init(struct shash_desc *desc)
395395
static int sha1_mb_set_results(struct mcryptd_hash_request_ctx *rctx)
396396
{
397397
int i;
398-
struct sha1_hash_ctx *sctx = shash_desc_ctx(&rctx->desc);
398+
struct sha1_hash_ctx *sctx = ahash_request_ctx(&rctx->areq);
399399
__be32 *dst = (__be32 *) rctx->out;
400400

401401
for (i = 0; i < 5; ++i)
@@ -427,7 +427,7 @@ static int sha_finish_walk(struct mcryptd_hash_request_ctx **ret_rctx,
427427

428428
}
429429
sha_ctx = (struct sha1_hash_ctx *)
430-
shash_desc_ctx(&rctx->desc);
430+
ahash_request_ctx(&rctx->areq);
431431
kernel_fpu_begin();
432432
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx,
433433
rctx->walk.data, nbytes, flag);
@@ -519,11 +519,10 @@ static void sha1_mb_add_list(struct mcryptd_hash_request_ctx *rctx,
519519
mcryptd_arm_flusher(cstate, delay);
520520
}
521521

522-
static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
523-
unsigned int len)
522+
static int sha1_mb_update(struct ahash_request *areq)
524523
{
525524
struct mcryptd_hash_request_ctx *rctx =
526-
container_of(desc, struct mcryptd_hash_request_ctx, desc);
525+
container_of(areq, struct mcryptd_hash_request_ctx, areq);
527526
struct mcryptd_alg_cstate *cstate =
528527
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
529528

@@ -539,7 +538,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
539538
}
540539

541540
/* need to init context */
542-
req_ctx_init(rctx, desc);
541+
req_ctx_init(rctx, areq);
543542

544543
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
545544

@@ -552,7 +551,7 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
552551
rctx->flag |= HASH_DONE;
553552

554553
/* submit */
555-
sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
554+
sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
556555
sha1_mb_add_list(rctx, cstate);
557556
kernel_fpu_begin();
558557
sha_ctx = sha1_ctx_mgr_submit(cstate->mgr, sha_ctx, rctx->walk.data,
@@ -579,11 +578,10 @@ static int sha1_mb_update(struct shash_desc *desc, const u8 *data,
579578
return ret;
580579
}
581580

582-
static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
583-
unsigned int len, u8 *out)
581+
static int sha1_mb_finup(struct ahash_request *areq)
584582
{
585583
struct mcryptd_hash_request_ctx *rctx =
586-
container_of(desc, struct mcryptd_hash_request_ctx, desc);
584+
container_of(areq, struct mcryptd_hash_request_ctx, areq);
587585
struct mcryptd_alg_cstate *cstate =
588586
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
589587

@@ -598,7 +596,7 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
598596
}
599597

600598
/* need to init context */
601-
req_ctx_init(rctx, desc);
599+
req_ctx_init(rctx, areq);
602600

603601
nbytes = crypto_ahash_walk_first(req, &rctx->walk);
604602

@@ -611,11 +609,10 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
611609
rctx->flag |= HASH_DONE;
612610
flag = HASH_LAST;
613611
}
614-
rctx->out = out;
615612

616613
/* submit */
617614
rctx->flag |= HASH_FINAL;
618-
sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
615+
sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
619616
sha1_mb_add_list(rctx, cstate);
620617

621618
kernel_fpu_begin();
@@ -641,10 +638,10 @@ static int sha1_mb_finup(struct shash_desc *desc, const u8 *data,
641638
return ret;
642639
}
643640

644-
static int sha1_mb_final(struct shash_desc *desc, u8 *out)
641+
static int sha1_mb_final(struct ahash_request *areq)
645642
{
646643
struct mcryptd_hash_request_ctx *rctx =
647-
container_of(desc, struct mcryptd_hash_request_ctx, desc);
644+
container_of(areq, struct mcryptd_hash_request_ctx, areq);
648645
struct mcryptd_alg_cstate *cstate =
649646
this_cpu_ptr(sha1_mb_alg_state.alg_cstate);
650647

@@ -659,12 +656,11 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out)
659656
}
660657

661658
/* need to init context */
662-
req_ctx_init(rctx, desc);
659+
req_ctx_init(rctx, areq);
663660

664-
rctx->out = out;
665661
rctx->flag |= HASH_DONE | HASH_FINAL;
666662

667-
sha_ctx = (struct sha1_hash_ctx *) shash_desc_ctx(desc);
663+
sha_ctx = (struct sha1_hash_ctx *) ahash_request_ctx(areq);
668664
/* flag HASH_FINAL and 0 data size */
669665
sha1_mb_add_list(rctx, cstate);
670666
kernel_fpu_begin();
@@ -691,48 +687,98 @@ static int sha1_mb_final(struct shash_desc *desc, u8 *out)
691687
return ret;
692688
}
693689

694-
static int sha1_mb_export(struct shash_desc *desc, void *out)
690+
static int sha1_mb_export(struct ahash_request *areq, void *out)
695691
{
696-
struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
692+
struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
697693

698694
memcpy(out, sctx, sizeof(*sctx));
699695

700696
return 0;
701697
}
702698

703-
static int sha1_mb_import(struct shash_desc *desc, const void *in)
699+
static int sha1_mb_import(struct ahash_request *areq, const void *in)
704700
{
705-
struct sha1_hash_ctx *sctx = shash_desc_ctx(desc);
701+
struct sha1_hash_ctx *sctx = ahash_request_ctx(areq);
706702

707703
memcpy(sctx, in, sizeof(*sctx));
708704

709705
return 0;
710706
}
711707

708+
static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
709+
{
710+
struct mcryptd_ahash *mcryptd_tfm;
711+
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
712+
struct mcryptd_hash_ctx *mctx;
712713

713-
static struct shash_alg sha1_mb_shash_alg = {
714-
.digestsize = SHA1_DIGEST_SIZE,
714+
mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
715+
CRYPTO_ALG_INTERNAL,
716+
CRYPTO_ALG_INTERNAL);
717+
if (IS_ERR(mcryptd_tfm))
718+
return PTR_ERR(mcryptd_tfm);
719+
mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
720+
mctx->alg_state = &sha1_mb_alg_state;
721+
ctx->mcryptd_tfm = mcryptd_tfm;
722+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
723+
sizeof(struct ahash_request) +
724+
crypto_ahash_reqsize(&mcryptd_tfm->base));
725+
726+
return 0;
727+
}
728+
729+
static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
730+
{
731+
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
732+
733+
mcryptd_free_ahash(ctx->mcryptd_tfm);
734+
}
735+
736+
static int sha1_mb_areq_init_tfm(struct crypto_tfm *tfm)
737+
{
738+
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
739+
sizeof(struct ahash_request) +
740+
sizeof(struct sha1_hash_ctx));
741+
742+
return 0;
743+
}
744+
745+
static void sha1_mb_areq_exit_tfm(struct crypto_tfm *tfm)
746+
{
747+
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
748+
749+
mcryptd_free_ahash(ctx->mcryptd_tfm);
750+
}
751+
752+
static struct ahash_alg sha1_mb_areq_alg = {
715753
.init = sha1_mb_init,
716754
.update = sha1_mb_update,
717755
.final = sha1_mb_final,
718756
.finup = sha1_mb_finup,
719757
.export = sha1_mb_export,
720758
.import = sha1_mb_import,
721-
.descsize = sizeof(struct sha1_hash_ctx),
722-
.statesize = sizeof(struct sha1_hash_ctx),
723-
.base = {
724-
.cra_name = "__sha1-mb",
725-
.cra_driver_name = "__intel_sha1-mb",
726-
.cra_priority = 100,
727-
/*
728-
* use ASYNC flag as some buffers in multi-buffer
729-
* algo may not have completed before hashing thread sleep
730-
*/
731-
.cra_flags = CRYPTO_ALG_TYPE_SHASH | CRYPTO_ALG_ASYNC |
732-
CRYPTO_ALG_INTERNAL,
733-
.cra_blocksize = SHA1_BLOCK_SIZE,
734-
.cra_module = THIS_MODULE,
735-
.cra_list = LIST_HEAD_INIT(sha1_mb_shash_alg.base.cra_list),
759+
.halg = {
760+
.digestsize = SHA1_DIGEST_SIZE,
761+
.statesize = sizeof(struct sha1_hash_ctx),
762+
.base = {
763+
.cra_name = "__sha1-mb",
764+
.cra_driver_name = "__intel_sha1-mb",
765+
.cra_priority = 100,
766+
/*
767+
* use ASYNC flag as some buffers in multi-buffer
768+
* algo may not have completed before hashing thread
769+
* sleep
770+
*/
771+
.cra_flags = CRYPTO_ALG_TYPE_AHASH |
772+
CRYPTO_ALG_ASYNC |
773+
CRYPTO_ALG_INTERNAL,
774+
.cra_blocksize = SHA1_BLOCK_SIZE,
775+
.cra_module = THIS_MODULE,
776+
.cra_list = LIST_HEAD_INIT
777+
(sha1_mb_areq_alg.halg.base.cra_list),
778+
.cra_init = sha1_mb_areq_init_tfm,
779+
.cra_exit = sha1_mb_areq_exit_tfm,
780+
.cra_ctxsize = sizeof(struct sha1_hash_ctx),
781+
}
736782
}
737783
};
738784

@@ -817,46 +863,20 @@ static int sha1_mb_async_import(struct ahash_request *req, const void *in)
817863
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
818864
struct sha1_mb_ctx *ctx = crypto_ahash_ctx(tfm);
819865
struct mcryptd_ahash *mcryptd_tfm = ctx->mcryptd_tfm;
820-
struct crypto_shash *child = mcryptd_ahash_child(mcryptd_tfm);
866+
struct crypto_ahash *child = mcryptd_ahash_child(mcryptd_tfm);
821867
struct mcryptd_hash_request_ctx *rctx;
822-
struct shash_desc *desc;
868+
struct ahash_request *areq;
823869

824870
memcpy(mcryptd_req, req, sizeof(*req));
825871
ahash_request_set_tfm(mcryptd_req, &mcryptd_tfm->base);
826872
rctx = ahash_request_ctx(mcryptd_req);
827-
desc = &rctx->desc;
828-
desc->tfm = child;
829-
desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP;
830-
831-
return crypto_ahash_import(mcryptd_req, in);
832-
}
833-
834-
static int sha1_mb_async_init_tfm(struct crypto_tfm *tfm)
835-
{
836-
struct mcryptd_ahash *mcryptd_tfm;
837-
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
838-
struct mcryptd_hash_ctx *mctx;
873+
areq = &rctx->areq;
839874

840-
mcryptd_tfm = mcryptd_alloc_ahash("__intel_sha1-mb",
841-
CRYPTO_ALG_INTERNAL,
842-
CRYPTO_ALG_INTERNAL);
843-
if (IS_ERR(mcryptd_tfm))
844-
return PTR_ERR(mcryptd_tfm);
845-
mctx = crypto_ahash_ctx(&mcryptd_tfm->base);
846-
mctx->alg_state = &sha1_mb_alg_state;
847-
ctx->mcryptd_tfm = mcryptd_tfm;
848-
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
849-
sizeof(struct ahash_request) +
850-
crypto_ahash_reqsize(&mcryptd_tfm->base));
851-
852-
return 0;
853-
}
854-
855-
static void sha1_mb_async_exit_tfm(struct crypto_tfm *tfm)
856-
{
857-
struct sha1_mb_ctx *ctx = crypto_tfm_ctx(tfm);
875+
ahash_request_set_tfm(areq, child);
876+
ahash_request_set_callback(areq, CRYPTO_TFM_REQ_MAY_SLEEP,
877+
rctx->complete, req);
858878

859-
mcryptd_free_ahash(ctx->mcryptd_tfm);
879+
return crypto_ahash_import(mcryptd_req, in);
860880
}
861881

862882
static struct ahash_alg sha1_mb_async_alg = {
@@ -965,7 +985,7 @@ static int __init sha1_mb_mod_init(void)
965985
}
966986
sha1_mb_alg_state.flusher = &sha1_mb_flusher;
967987

968-
err = crypto_register_shash(&sha1_mb_shash_alg);
988+
err = crypto_register_ahash(&sha1_mb_areq_alg);
969989
if (err)
970990
goto err2;
971991
err = crypto_register_ahash(&sha1_mb_async_alg);
@@ -975,7 +995,7 @@ static int __init sha1_mb_mod_init(void)
975995

976996
return 0;
977997
err1:
978-
crypto_unregister_shash(&sha1_mb_shash_alg);
998+
crypto_unregister_ahash(&sha1_mb_areq_alg);
979999
err2:
9801000
for_each_possible_cpu(cpu) {
9811001
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
@@ -991,7 +1011,7 @@ static void __exit sha1_mb_mod_fini(void)
9911011
struct mcryptd_alg_cstate *cpu_state;
9921012

9931013
crypto_unregister_ahash(&sha1_mb_async_alg);
994-
crypto_unregister_shash(&sha1_mb_shash_alg);
1014+
crypto_unregister_ahash(&sha1_mb_areq_alg);
9951015
for_each_possible_cpu(cpu) {
9961016
cpu_state = per_cpu_ptr(sha1_mb_alg_state.alg_cstate, cpu);
9971017
kfree(cpu_state->mgr);

0 commit comments

Comments
 (0)