|
112 | 112 | #define FLAGS_DMA_READY 6 |
113 | 113 | #define FLAGS_AUTO_XOR 7 |
114 | 114 | #define FLAGS_BE32_SHA1 8 |
| 115 | +#define FLAGS_SGS_COPIED 9 |
| 116 | +#define FLAGS_SGS_ALLOCED 10 |
115 | 117 | /* context flags */ |
116 | 118 | #define FLAGS_FINUP 16 |
117 | 119 | #define FLAGS_SG 17 |
@@ -151,8 +153,10 @@ struct omap_sham_reqctx { |
151 | 153 |
|
152 | 154 | /* walk state */ |
153 | 155 | struct scatterlist *sg; |
| 156 | + struct scatterlist sgl[2]; |
154 | 157 | struct scatterlist sgl_tmp; |
155 | 158 | unsigned int offset; /* offset in current sg */ |
| 159 | + int sg_len; |
156 | 160 | unsigned int total; /* total request */ |
157 | 161 |
|
158 | 162 | u8 buffer[0] OMAP_ALIGNED; |
@@ -223,6 +227,7 @@ struct omap_sham_dev { |
223 | 227 | struct dma_chan *dma_lch; |
224 | 228 | struct tasklet_struct done_task; |
225 | 229 | u8 polling_mode; |
| 230 | + u8 xmit_buf[BUFLEN]; |
226 | 231 |
|
227 | 232 | unsigned long flags; |
228 | 233 | struct crypto_queue queue; |
@@ -626,6 +631,260 @@ static int omap_sham_xmit_dma(struct omap_sham_dev *dd, dma_addr_t dma_addr, |
626 | 631 | return -EINPROGRESS; |
627 | 632 | } |
628 | 633 |
|
| 634 | +static int omap_sham_copy_sg_lists(struct omap_sham_reqctx *ctx, |
| 635 | + struct scatterlist *sg, int bs, int new_len) |
| 636 | +{ |
| 637 | + int n = sg_nents(sg); |
| 638 | + struct scatterlist *tmp; |
| 639 | + int offset = ctx->offset; |
| 640 | + |
| 641 | + if (ctx->bufcnt) |
| 642 | + n++; |
| 643 | + |
| 644 | + ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL); |
| 645 | + if (!ctx->sg) |
| 646 | + return -ENOMEM; |
| 647 | + |
| 648 | + sg_init_table(ctx->sg, n); |
| 649 | + |
| 650 | + tmp = ctx->sg; |
| 651 | + |
| 652 | + ctx->sg_len = 0; |
| 653 | + |
| 654 | + if (ctx->bufcnt) { |
| 655 | + sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt); |
| 656 | + tmp = sg_next(tmp); |
| 657 | + ctx->sg_len++; |
| 658 | + } |
| 659 | + |
| 660 | + while (sg && new_len) { |
| 661 | + int len = sg->length - offset; |
| 662 | + |
| 663 | + if (offset) { |
| 664 | + offset -= sg->length; |
| 665 | + if (offset < 0) |
| 666 | + offset = 0; |
| 667 | + } |
| 668 | + |
| 669 | + if (new_len < len) |
| 670 | + len = new_len; |
| 671 | + |
| 672 | + if (len > 0) { |
| 673 | + new_len -= len; |
| 674 | + sg_set_page(tmp, sg_page(sg), len, sg->offset); |
| 675 | + if (new_len <= 0) |
| 676 | + sg_mark_end(tmp); |
| 677 | + tmp = sg_next(tmp); |
| 678 | + ctx->sg_len++; |
| 679 | + } |
| 680 | + |
| 681 | + sg = sg_next(sg); |
| 682 | + } |
| 683 | + |
| 684 | + set_bit(FLAGS_SGS_ALLOCED, &ctx->dd->flags); |
| 685 | + |
| 686 | + ctx->bufcnt = 0; |
| 687 | + |
| 688 | + return 0; |
| 689 | +} |
| 690 | + |
| 691 | +static int omap_sham_copy_sgs(struct omap_sham_reqctx *ctx, |
| 692 | + struct scatterlist *sg, int bs, int new_len) |
| 693 | +{ |
| 694 | + int pages; |
| 695 | + void *buf; |
| 696 | + int len; |
| 697 | + |
| 698 | + len = new_len + ctx->bufcnt; |
| 699 | + |
| 700 | + pages = get_order(ctx->total); |
| 701 | + |
| 702 | + buf = (void *)__get_free_pages(GFP_ATOMIC, pages); |
| 703 | + if (!buf) { |
| 704 | + pr_err("Couldn't allocate pages for unaligned cases.\n"); |
| 705 | + return -ENOMEM; |
| 706 | + } |
| 707 | + |
| 708 | + if (ctx->bufcnt) |
| 709 | + memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt); |
| 710 | + |
| 711 | + scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->offset, |
| 712 | + ctx->total - ctx->bufcnt, 0); |
| 713 | + sg_init_table(ctx->sgl, 1); |
| 714 | + sg_set_buf(ctx->sgl, buf, len); |
| 715 | + ctx->sg = ctx->sgl; |
| 716 | + set_bit(FLAGS_SGS_COPIED, &ctx->dd->flags); |
| 717 | + ctx->sg_len = 1; |
| 718 | + ctx->bufcnt = 0; |
| 719 | + ctx->offset = 0; |
| 720 | + |
| 721 | + return 0; |
| 722 | +} |
| 723 | + |
| 724 | +static int omap_sham_align_sgs(struct scatterlist *sg, |
| 725 | + int nbytes, int bs, bool final, |
| 726 | + struct omap_sham_reqctx *rctx) |
| 727 | +{ |
| 728 | + int n = 0; |
| 729 | + bool aligned = true; |
| 730 | + bool list_ok = true; |
| 731 | + struct scatterlist *sg_tmp = sg; |
| 732 | + int new_len; |
| 733 | + int offset = rctx->offset; |
| 734 | + |
| 735 | + if (!sg || !sg->length || !nbytes) |
| 736 | + return 0; |
| 737 | + |
| 738 | + new_len = nbytes; |
| 739 | + |
| 740 | + if (offset) |
| 741 | + list_ok = false; |
| 742 | + |
| 743 | + if (final) |
| 744 | + new_len = DIV_ROUND_UP(new_len, bs) * bs; |
| 745 | + else |
| 746 | + new_len = new_len / bs * bs; |
| 747 | + |
| 748 | + while (nbytes > 0 && sg_tmp) { |
| 749 | + n++; |
| 750 | + |
| 751 | + if (offset < sg_tmp->length) { |
| 752 | + if (!IS_ALIGNED(offset + sg_tmp->offset, 4)) { |
| 753 | + aligned = false; |
| 754 | + break; |
| 755 | + } |
| 756 | + |
| 757 | + if (!IS_ALIGNED(sg_tmp->length - offset, bs)) { |
| 758 | + aligned = false; |
| 759 | + break; |
| 760 | + } |
| 761 | + } |
| 762 | + |
| 763 | + if (offset) { |
| 764 | + offset -= sg_tmp->length; |
| 765 | + if (offset < 0) { |
| 766 | + nbytes += offset; |
| 767 | + offset = 0; |
| 768 | + } |
| 769 | + } else { |
| 770 | + nbytes -= sg_tmp->length; |
| 771 | + } |
| 772 | + |
| 773 | + sg_tmp = sg_next(sg_tmp); |
| 774 | + |
| 775 | + if (nbytes < 0) { |
| 776 | + list_ok = false; |
| 777 | + break; |
| 778 | + } |
| 779 | + } |
| 780 | + |
| 781 | + if (!aligned) |
| 782 | + return omap_sham_copy_sgs(rctx, sg, bs, new_len); |
| 783 | + else if (!list_ok) |
| 784 | + return omap_sham_copy_sg_lists(rctx, sg, bs, new_len); |
| 785 | + |
| 786 | + rctx->sg_len = n; |
| 787 | + rctx->sg = sg; |
| 788 | + |
| 789 | + return 0; |
| 790 | +} |
| 791 | + |
| 792 | +static int omap_sham_prepare_request(struct ahash_request *req, bool update) |
| 793 | +{ |
| 794 | + struct omap_sham_reqctx *rctx = ahash_request_ctx(req); |
| 795 | + int bs; |
| 796 | + int ret; |
| 797 | + int nbytes; |
| 798 | + bool final = rctx->flags & BIT(FLAGS_FINUP); |
| 799 | + int xmit_len, hash_later; |
| 800 | + |
| 801 | + if (!req) |
| 802 | + return 0; |
| 803 | + |
| 804 | + bs = get_block_size(rctx); |
| 805 | + |
| 806 | + if (update) |
| 807 | + nbytes = req->nbytes; |
| 808 | + else |
| 809 | + nbytes = 0; |
| 810 | + |
| 811 | + rctx->total = nbytes + rctx->bufcnt; |
| 812 | + |
| 813 | + if (!rctx->total) |
| 814 | + return 0; |
| 815 | + |
| 816 | + if (nbytes && (!IS_ALIGNED(rctx->bufcnt, bs))) { |
| 817 | + int len = bs - rctx->bufcnt % bs; |
| 818 | + |
| 819 | + if (len > nbytes) |
| 820 | + len = nbytes; |
| 821 | + scatterwalk_map_and_copy(rctx->buffer + rctx->bufcnt, req->src, |
| 822 | + 0, len, 0); |
| 823 | + rctx->bufcnt += len; |
| 824 | + nbytes -= len; |
| 825 | + rctx->offset = len; |
| 826 | + } |
| 827 | + |
| 828 | + if (rctx->bufcnt) |
| 829 | + memcpy(rctx->dd->xmit_buf, rctx->buffer, rctx->bufcnt); |
| 830 | + |
| 831 | + ret = omap_sham_align_sgs(req->src, nbytes, bs, final, rctx); |
| 832 | + if (ret) |
| 833 | + return ret; |
| 834 | + |
| 835 | + xmit_len = rctx->total; |
| 836 | + |
| 837 | + if (!IS_ALIGNED(xmit_len, bs)) { |
| 838 | + if (final) |
| 839 | + xmit_len = DIV_ROUND_UP(xmit_len, bs) * bs; |
| 840 | + else |
| 841 | + xmit_len = xmit_len / bs * bs; |
| 842 | + } |
| 843 | + |
| 844 | + hash_later = rctx->total - xmit_len; |
| 845 | + if (hash_later < 0) |
| 846 | + hash_later = 0; |
| 847 | + |
| 848 | + if (rctx->bufcnt && nbytes) { |
| 849 | + /* have data from previous operation and current */ |
| 850 | + sg_init_table(rctx->sgl, 2); |
| 851 | + sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, rctx->bufcnt); |
| 852 | + |
| 853 | + sg_chain(rctx->sgl, 2, req->src); |
| 854 | + |
| 855 | + rctx->sg = rctx->sgl; |
| 856 | + |
| 857 | + rctx->sg_len++; |
| 858 | + } else if (rctx->bufcnt) { |
| 859 | + /* have buffered data only */ |
| 860 | + sg_init_table(rctx->sgl, 1); |
| 861 | + sg_set_buf(rctx->sgl, rctx->dd->xmit_buf, xmit_len); |
| 862 | + |
| 863 | + rctx->sg = rctx->sgl; |
| 864 | + |
| 865 | + rctx->sg_len = 1; |
| 866 | + } |
| 867 | + |
| 868 | + if (hash_later) { |
| 869 | + if (req->nbytes) { |
| 870 | + scatterwalk_map_and_copy(rctx->buffer, req->src, |
| 871 | + req->nbytes - hash_later, |
| 872 | + hash_later, 0); |
| 873 | + } else { |
| 874 | + memcpy(rctx->buffer, rctx->buffer + xmit_len, |
| 875 | + hash_later); |
| 876 | + } |
| 877 | + rctx->bufcnt = hash_later; |
| 878 | + } else { |
| 879 | + rctx->bufcnt = 0; |
| 880 | + } |
| 881 | + |
| 882 | + if (!final) |
| 883 | + rctx->total = xmit_len; |
| 884 | + |
| 885 | + return 0; |
| 886 | +} |
| 887 | + |
629 | 888 | static size_t omap_sham_append_buffer(struct omap_sham_reqctx *ctx, |
630 | 889 | const u8 *data, size_t length) |
631 | 890 | { |
@@ -1040,6 +1299,10 @@ static int omap_sham_handle_queue(struct omap_sham_dev *dd, |
1040 | 1299 | dd->req = req; |
1041 | 1300 | ctx = ahash_request_ctx(req); |
1042 | 1301 |
|
| 1302 | + err = omap_sham_prepare_request(NULL, ctx->op == OP_UPDATE); |
| 1303 | + if (err) |
| 1304 | + goto err1; |
| 1305 | + |
1043 | 1306 | dev_dbg(dd->dev, "handling new req, op: %lu, nbytes: %d\n", |
1044 | 1307 | ctx->op, req->nbytes); |
1045 | 1308 |
|
|
0 commit comments