15
15
#include <linux/spinlock.h>
16
16
#include <linux/gfp.h>
17
17
#include <linux/module.h>
18
+ #include <linux/of.h>
18
19
19
20
#include <crypto/ctr.h>
20
21
#include <crypto/internal/des.h>
71
72
#define MOD_AES256 (0x0a00 | KEYLEN_256)
72
73
73
74
#define MAX_IVLEN 16
74
- #define NPE_ID 2 /* NPE C */
75
75
#define NPE_QLEN 16
76
76
/* Space for registering when the first
77
77
* NPE_QLEN crypt_ctl are busy */
78
78
#define NPE_QLEN_TOTAL 64
79
79
80
- #define SEND_QID 29
81
- #define RECV_QID 30
82
-
83
80
#define CTL_FLAG_UNUSED 0x0000
84
81
#define CTL_FLAG_USED 0x1000
85
82
#define CTL_FLAG_PERFORM_ABLK 0x0001
@@ -221,6 +218,9 @@ static const struct ix_hash_algo hash_alg_sha1 = {
221
218
};
222
219
223
220
static struct npe * npe_c ;
221
+
222
+ static unsigned int send_qid ;
223
+ static unsigned int recv_qid ;
224
224
static struct dma_pool * buffer_pool ;
225
225
static struct dma_pool * ctx_pool ;
226
226
@@ -437,8 +437,7 @@ static void crypto_done_action(unsigned long arg)
437
437
int i ;
438
438
439
439
for (i = 0 ; i < 4 ; i ++ ) {
440
- dma_addr_t phys = qmgr_get_entry (RECV_QID );
441
-
440
+ dma_addr_t phys = qmgr_get_entry (recv_qid );
442
441
if (!phys )
443
442
return ;
444
443
one_packet (phys );
@@ -448,10 +447,52 @@ static void crypto_done_action(unsigned long arg)
448
447
449
448
static int init_ixp_crypto (struct device * dev )
450
449
{
451
- int ret = - ENODEV ;
450
+ struct device_node * np = dev -> of_node ;
452
451
u32 msg [2 ] = { 0 , 0 };
452
+ int ret = - ENODEV ;
453
+ u32 npe_id ;
454
+
455
+ dev_info (dev , "probing...\n" );
456
+
457
+ /* Locate the NPE and queue manager to use from device tree */
458
+ if (IS_ENABLED (CONFIG_OF ) && np ) {
459
+ struct of_phandle_args queue_spec ;
460
+ struct of_phandle_args npe_spec ;
461
+
462
+ ret = of_parse_phandle_with_fixed_args (np , "intel,npe-handle" ,
463
+ 1 , 0 , & npe_spec );
464
+ if (ret ) {
465
+ dev_err (dev , "no NPE engine specified\n" );
466
+ return - ENODEV ;
467
+ }
468
+ npe_id = npe_spec .args [0 ];
453
469
454
- npe_c = npe_request (NPE_ID );
470
+ ret = of_parse_phandle_with_fixed_args (np , "queue-rx" , 1 , 0 ,
471
+ & queue_spec );
472
+ if (ret ) {
473
+ dev_err (dev , "no rx queue phandle\n" );
474
+ return - ENODEV ;
475
+ }
476
+ recv_qid = queue_spec .args [0 ];
477
+
478
+ ret = of_parse_phandle_with_fixed_args (np , "queue-txready" , 1 , 0 ,
479
+ & queue_spec );
480
+ if (ret ) {
481
+ dev_err (dev , "no txready queue phandle\n" );
482
+ return - ENODEV ;
483
+ }
484
+ send_qid = queue_spec .args [0 ];
485
+ } else {
486
+ /*
487
+ * Hardcoded engine when using platform data, this goes away
488
+ * when we switch to using DT only.
489
+ */
490
+ npe_id = 2 ;
491
+ send_qid = 29 ;
492
+ recv_qid = 30 ;
493
+ }
494
+
495
+ npe_c = npe_request (npe_id );
455
496
if (!npe_c )
456
497
return ret ;
457
498
@@ -497,20 +538,20 @@ static int init_ixp_crypto(struct device *dev)
497
538
if (!ctx_pool )
498
539
goto err ;
499
540
500
- ret = qmgr_request_queue (SEND_QID , NPE_QLEN_TOTAL , 0 , 0 ,
541
+ ret = qmgr_request_queue (send_qid , NPE_QLEN_TOTAL , 0 , 0 ,
501
542
"ixp_crypto:out" , NULL );
502
543
if (ret )
503
544
goto err ;
504
- ret = qmgr_request_queue (RECV_QID , NPE_QLEN , 0 , 0 ,
545
+ ret = qmgr_request_queue (recv_qid , NPE_QLEN , 0 , 0 ,
505
546
"ixp_crypto:in" , NULL );
506
547
if (ret ) {
507
- qmgr_release_queue (SEND_QID );
548
+ qmgr_release_queue (send_qid );
508
549
goto err ;
509
550
}
510
- qmgr_set_irq (RECV_QID , QUEUE_IRQ_SRC_NOT_EMPTY , irqhandler , NULL );
551
+ qmgr_set_irq (recv_qid , QUEUE_IRQ_SRC_NOT_EMPTY , irqhandler , NULL );
511
552
tasklet_init (& crypto_done_tasklet , crypto_done_action , 0 );
512
553
513
- qmgr_enable_irq (RECV_QID );
554
+ qmgr_enable_irq (recv_qid );
514
555
return 0 ;
515
556
516
557
npe_error :
@@ -526,11 +567,11 @@ static int init_ixp_crypto(struct device *dev)
526
567
527
568
static void release_ixp_crypto (struct device * dev )
528
569
{
529
- qmgr_disable_irq (RECV_QID );
570
+ qmgr_disable_irq (recv_qid );
530
571
tasklet_kill (& crypto_done_tasklet );
531
572
532
- qmgr_release_queue (SEND_QID );
533
- qmgr_release_queue (RECV_QID );
573
+ qmgr_release_queue (send_qid );
574
+ qmgr_release_queue (recv_qid );
534
575
535
576
dma_pool_destroy (ctx_pool );
536
577
dma_pool_destroy (buffer_pool );
@@ -682,8 +723,8 @@ static int register_chain_var(struct crypto_tfm *tfm, u8 xpad, u32 target,
682
723
buf -> phys_addr = pad_phys ;
683
724
684
725
atomic_inc (& ctx -> configuring );
685
- qmgr_put_entry (SEND_QID , crypt_virt2phys (crypt ));
686
- BUG_ON (qmgr_stat_overflow (SEND_QID ));
726
+ qmgr_put_entry (send_qid , crypt_virt2phys (crypt ));
727
+ BUG_ON (qmgr_stat_overflow (send_qid ));
687
728
return 0 ;
688
729
}
689
730
@@ -757,8 +798,8 @@ static int gen_rev_aes_key(struct crypto_tfm *tfm)
757
798
crypt -> ctl_flags |= CTL_FLAG_GEN_REVAES ;
758
799
759
800
atomic_inc (& ctx -> configuring );
760
- qmgr_put_entry (SEND_QID , crypt_virt2phys (crypt ));
761
- BUG_ON (qmgr_stat_overflow (SEND_QID ));
801
+ qmgr_put_entry (send_qid , crypt_virt2phys (crypt ));
802
+ BUG_ON (qmgr_stat_overflow (send_qid ));
762
803
return 0 ;
763
804
}
764
805
@@ -943,7 +984,7 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
943
984
if (sg_nents (req -> src ) > 1 || sg_nents (req -> dst ) > 1 )
944
985
return ixp4xx_cipher_fallback (req , encrypt );
945
986
946
- if (qmgr_stat_full (SEND_QID ))
987
+ if (qmgr_stat_full (send_qid ))
947
988
return - EAGAIN ;
948
989
if (atomic_read (& ctx -> configuring ))
949
990
return - EAGAIN ;
@@ -993,8 +1034,8 @@ static int ablk_perform(struct skcipher_request *req, int encrypt)
993
1034
req_ctx -> src = src_hook .next ;
994
1035
crypt -> src_buf = src_hook .phys_next ;
995
1036
crypt -> ctl_flags |= CTL_FLAG_PERFORM_ABLK ;
996
- qmgr_put_entry (SEND_QID , crypt_virt2phys (crypt ));
997
- BUG_ON (qmgr_stat_overflow (SEND_QID ));
1037
+ qmgr_put_entry (send_qid , crypt_virt2phys (crypt ));
1038
+ BUG_ON (qmgr_stat_overflow (send_qid ));
998
1039
return - EINPROGRESS ;
999
1040
1000
1041
free_buf_src :
@@ -1057,7 +1098,7 @@ static int aead_perform(struct aead_request *req, int encrypt,
1057
1098
enum dma_data_direction src_direction = DMA_BIDIRECTIONAL ;
1058
1099
unsigned int lastlen ;
1059
1100
1060
- if (qmgr_stat_full (SEND_QID ))
1101
+ if (qmgr_stat_full (send_qid ))
1061
1102
return - EAGAIN ;
1062
1103
if (atomic_read (& ctx -> configuring ))
1063
1104
return - EAGAIN ;
@@ -1141,8 +1182,8 @@ static int aead_perform(struct aead_request *req, int encrypt,
1141
1182
}
1142
1183
1143
1184
crypt -> ctl_flags |= CTL_FLAG_PERFORM_AEAD ;
1144
- qmgr_put_entry (SEND_QID , crypt_virt2phys (crypt ));
1145
- BUG_ON (qmgr_stat_overflow (SEND_QID ));
1185
+ qmgr_put_entry (send_qid , crypt_virt2phys (crypt ));
1186
+ BUG_ON (qmgr_stat_overflow (send_qid ));
1146
1187
return - EINPROGRESS ;
1147
1188
1148
1189
free_buf_dst :
@@ -1436,12 +1477,13 @@ static struct ixp_aead_alg ixp4xx_aeads[] = {
1436
1477
1437
1478
static int ixp_crypto_probe (struct platform_device * _pdev )
1438
1479
{
1480
+ struct device * dev = & _pdev -> dev ;
1439
1481
int num = ARRAY_SIZE (ixp4xx_algos );
1440
1482
int i , err ;
1441
1483
1442
1484
pdev = _pdev ;
1443
1485
1444
- err = init_ixp_crypto (& pdev -> dev );
1486
+ err = init_ixp_crypto (dev );
1445
1487
if (err )
1446
1488
return err ;
1447
1489
@@ -1533,11 +1575,20 @@ static int ixp_crypto_remove(struct platform_device *pdev)
1533
1575
1534
1576
return 0 ;
1535
1577
}
1578
+ static const struct of_device_id ixp4xx_crypto_of_match [] = {
1579
+ {
1580
+ .compatible = "intel,ixp4xx-crypto" ,
1581
+ },
1582
+ {},
1583
+ };
1536
1584
1537
1585
static struct platform_driver ixp_crypto_driver = {
1538
1586
.probe = ixp_crypto_probe ,
1539
1587
.remove = ixp_crypto_remove ,
1540
- .driver = { .name = "ixp4xx_crypto" },
1588
+ .driver = {
1589
+ .name = "ixp4xx_crypto" ,
1590
+ .of_match_table = ixp4xx_crypto_of_match ,
1591
+ },
1541
1592
};
1542
1593
module_platform_driver (ixp_crypto_driver );
1543
1594
0 commit comments