@@ -161,8 +161,8 @@ struct crypto_acomp_ctx {
161
161
struct crypto_acomp * acomp ;
162
162
struct acomp_req * req ;
163
163
struct crypto_wait wait ;
164
- u8 * dstmem ;
165
- struct mutex * mutex ;
164
+ u8 * buffer ;
165
+ struct mutex mutex ;
166
166
};
167
167
168
168
/*
@@ -688,72 +688,35 @@ static void zswap_alloc_shrinker(struct zswap_pool *pool)
688
688
/*********************************
689
689
* per-cpu code
690
690
**********************************/
691
- static DEFINE_PER_CPU (u8 * , zswap_dstmem ) ;
692
- /*
693
- * If users dynamically change the zpool type and compressor at runtime, i.e.
694
- * zswap is running, zswap can have more than one zpool on one cpu, but they
695
- * are sharing dtsmem. So we need this mutex to be per-cpu.
696
- */
697
- static DEFINE_PER_CPU (struct mutex * , zswap_mutex ) ;
698
-
699
- static int zswap_dstmem_prepare (unsigned int cpu )
700
- {
701
- struct mutex * mutex ;
702
- u8 * dst ;
703
-
704
- dst = kmalloc_node (PAGE_SIZE * 2 , GFP_KERNEL , cpu_to_node (cpu ));
705
- if (!dst )
706
- return - ENOMEM ;
707
-
708
- mutex = kmalloc_node (sizeof (* mutex ), GFP_KERNEL , cpu_to_node (cpu ));
709
- if (!mutex ) {
710
- kfree (dst );
711
- return - ENOMEM ;
712
- }
713
-
714
- mutex_init (mutex );
715
- per_cpu (zswap_dstmem , cpu ) = dst ;
716
- per_cpu (zswap_mutex , cpu ) = mutex ;
717
- return 0 ;
718
- }
719
-
720
- static int zswap_dstmem_dead (unsigned int cpu )
721
- {
722
- struct mutex * mutex ;
723
- u8 * dst ;
724
-
725
- mutex = per_cpu (zswap_mutex , cpu );
726
- kfree (mutex );
727
- per_cpu (zswap_mutex , cpu ) = NULL ;
728
-
729
- dst = per_cpu (zswap_dstmem , cpu );
730
- kfree (dst );
731
- per_cpu (zswap_dstmem , cpu ) = NULL ;
732
-
733
- return 0 ;
734
- }
735
-
736
691
static int zswap_cpu_comp_prepare (unsigned int cpu , struct hlist_node * node )
737
692
{
738
693
struct zswap_pool * pool = hlist_entry (node , struct zswap_pool , node );
739
694
struct crypto_acomp_ctx * acomp_ctx = per_cpu_ptr (pool -> acomp_ctx , cpu );
740
695
struct crypto_acomp * acomp ;
741
696
struct acomp_req * req ;
697
+ int ret ;
698
+
699
+ mutex_init (& acomp_ctx -> mutex );
700
+
701
+ acomp_ctx -> buffer = kmalloc_node (PAGE_SIZE * 2 , GFP_KERNEL , cpu_to_node (cpu ));
702
+ if (!acomp_ctx -> buffer )
703
+ return - ENOMEM ;
742
704
743
705
acomp = crypto_alloc_acomp_node (pool -> tfm_name , 0 , 0 , cpu_to_node (cpu ));
744
706
if (IS_ERR (acomp )) {
745
707
pr_err ("could not alloc crypto acomp %s : %ld\n" ,
746
708
pool -> tfm_name , PTR_ERR (acomp ));
747
- return PTR_ERR (acomp );
709
+ ret = PTR_ERR (acomp );
710
+ goto acomp_fail ;
748
711
}
749
712
acomp_ctx -> acomp = acomp ;
750
713
751
714
req = acomp_request_alloc (acomp_ctx -> acomp );
752
715
if (!req ) {
753
716
pr_err ("could not alloc crypto acomp_request %s\n" ,
754
717
pool -> tfm_name );
755
- crypto_free_acomp ( acomp_ctx -> acomp ) ;
756
- return - ENOMEM ;
718
+ ret = - ENOMEM ;
719
+ goto req_fail ;
757
720
}
758
721
acomp_ctx -> req = req ;
759
722
@@ -766,10 +729,13 @@ static int zswap_cpu_comp_prepare(unsigned int cpu, struct hlist_node *node)
766
729
acomp_request_set_callback (req , CRYPTO_TFM_REQ_MAY_BACKLOG ,
767
730
crypto_req_done , & acomp_ctx -> wait );
768
731
769
- acomp_ctx -> mutex = per_cpu (zswap_mutex , cpu );
770
- acomp_ctx -> dstmem = per_cpu (zswap_dstmem , cpu );
771
-
772
732
return 0 ;
733
+
734
+ req_fail :
735
+ crypto_free_acomp (acomp_ctx -> acomp );
736
+ acomp_fail :
737
+ kfree (acomp_ctx -> buffer );
738
+ return ret ;
773
739
}
774
740
775
741
static int zswap_cpu_comp_dead (unsigned int cpu , struct hlist_node * node )
@@ -782,6 +748,7 @@ static int zswap_cpu_comp_dead(unsigned int cpu, struct hlist_node *node)
782
748
acomp_request_free (acomp_ctx -> req );
783
749
if (!IS_ERR_OR_NULL (acomp_ctx -> acomp ))
784
750
crypto_free_acomp (acomp_ctx -> acomp );
751
+ kfree (acomp_ctx -> buffer );
785
752
}
786
753
787
754
return 0 ;
@@ -1391,12 +1358,12 @@ static void __zswap_load(struct zswap_entry *entry, struct page *page)
1391
1358
u8 * src ;
1392
1359
1393
1360
acomp_ctx = raw_cpu_ptr (entry -> pool -> acomp_ctx );
1394
- mutex_lock (acomp_ctx -> mutex );
1361
+ mutex_lock (& acomp_ctx -> mutex );
1395
1362
1396
1363
src = zpool_map_handle (zpool , entry -> handle , ZPOOL_MM_RO );
1397
1364
if (!zpool_can_sleep_mapped (zpool )) {
1398
- memcpy (acomp_ctx -> dstmem , src , entry -> length );
1399
- src = acomp_ctx -> dstmem ;
1365
+ memcpy (acomp_ctx -> buffer , src , entry -> length );
1366
+ src = acomp_ctx -> buffer ;
1400
1367
zpool_unmap_handle (zpool , entry -> handle );
1401
1368
}
1402
1369
@@ -1406,7 +1373,7 @@ static void __zswap_load(struct zswap_entry *entry, struct page *page)
1406
1373
acomp_request_set_params (acomp_ctx -> req , & input , & output , entry -> length , PAGE_SIZE );
1407
1374
BUG_ON (crypto_wait_req (crypto_acomp_decompress (acomp_ctx -> req ), & acomp_ctx -> wait ));
1408
1375
BUG_ON (acomp_ctx -> req -> dlen != PAGE_SIZE );
1409
- mutex_unlock (acomp_ctx -> mutex );
1376
+ mutex_unlock (& acomp_ctx -> mutex );
1410
1377
1411
1378
if (zpool_can_sleep_mapped (zpool ))
1412
1379
zpool_unmap_handle (zpool , entry -> handle );
@@ -1622,13 +1589,17 @@ bool zswap_store(struct folio *folio)
1622
1589
/* compress */
1623
1590
acomp_ctx = raw_cpu_ptr (entry -> pool -> acomp_ctx );
1624
1591
1625
- mutex_lock (acomp_ctx -> mutex );
1592
+ mutex_lock (& acomp_ctx -> mutex );
1626
1593
1627
- dst = acomp_ctx -> dstmem ;
1594
+ dst = acomp_ctx -> buffer ;
1628
1595
sg_init_table (& input , 1 );
1629
1596
sg_set_page (& input , page , PAGE_SIZE , 0 );
1630
1597
1631
- /* zswap_dstmem is of size (PAGE_SIZE * 2). Reflect same in sg_list */
1598
+ /*
1599
+ * We need PAGE_SIZE * 2 here since there maybe over-compression case,
1600
+ * and hardware-accelerators may won't check the dst buffer size, so
1601
+ * giving the dst buffer with enough length to avoid buffer overflow.
1602
+ */
1632
1603
sg_init_one (& output , dst , PAGE_SIZE * 2 );
1633
1604
acomp_request_set_params (acomp_ctx -> req , & input , & output , PAGE_SIZE , dlen );
1634
1605
/*
@@ -1668,7 +1639,7 @@ bool zswap_store(struct folio *folio)
1668
1639
buf = zpool_map_handle (zpool , handle , ZPOOL_MM_WO );
1669
1640
memcpy (buf , dst , dlen );
1670
1641
zpool_unmap_handle (zpool , handle );
1671
- mutex_unlock (acomp_ctx -> mutex );
1642
+ mutex_unlock (& acomp_ctx -> mutex );
1672
1643
1673
1644
/* populate entry */
1674
1645
entry -> swpentry = swp_entry (type , offset );
@@ -1711,7 +1682,7 @@ bool zswap_store(struct folio *folio)
1711
1682
return true;
1712
1683
1713
1684
put_dstmem :
1714
- mutex_unlock (acomp_ctx -> mutex );
1685
+ mutex_unlock (& acomp_ctx -> mutex );
1715
1686
put_pool :
1716
1687
zswap_pool_put (entry -> pool );
1717
1688
freepage :
@@ -1886,13 +1857,6 @@ static int zswap_setup(void)
1886
1857
goto cache_fail ;
1887
1858
}
1888
1859
1889
- ret = cpuhp_setup_state (CPUHP_MM_ZSWP_MEM_PREPARE , "mm/zswap:prepare" ,
1890
- zswap_dstmem_prepare , zswap_dstmem_dead );
1891
- if (ret ) {
1892
- pr_err ("dstmem alloc failed\n" );
1893
- goto dstmem_fail ;
1894
- }
1895
-
1896
1860
ret = cpuhp_setup_state_multi (CPUHP_MM_ZSWP_POOL_PREPARE ,
1897
1861
"mm/zswap_pool:prepare" ,
1898
1862
zswap_cpu_comp_prepare ,
@@ -1924,8 +1888,6 @@ static int zswap_setup(void)
1924
1888
if (pool )
1925
1889
zswap_pool_destroy (pool );
1926
1890
hp_fail :
1927
- cpuhp_remove_state (CPUHP_MM_ZSWP_MEM_PREPARE );
1928
- dstmem_fail :
1929
1891
kmem_cache_destroy (zswap_entry_cache );
1930
1892
cache_fail :
1931
1893
/* if built-in, we aren't unloaded on failure; don't allow use */
0 commit comments