@@ -690,14 +690,10 @@ static int sec_skcipher_fbtfm_init(struct crypto_skcipher *tfm)
690
690
691
691
c_ctx -> fallback = false;
692
692
693
- /* Currently, only XTS mode need fallback tfm when using 192bit key */
694
- if (likely (strncmp (alg , "xts" , SEC_XTS_NAME_SZ )))
695
- return 0 ;
696
-
697
693
c_ctx -> fbtfm = crypto_alloc_sync_skcipher (alg , 0 ,
698
694
CRYPTO_ALG_NEED_FALLBACK );
699
695
if (IS_ERR (c_ctx -> fbtfm )) {
700
- pr_err ("failed to alloc xts mode fallback tfm!\n" );
696
+ pr_err ("failed to alloc fallback tfm for %s !\n" , alg );
701
697
return PTR_ERR (c_ctx -> fbtfm );
702
698
}
703
699
@@ -857,7 +853,7 @@ static int sec_skcipher_setkey(struct crypto_skcipher *tfm, const u8 *key,
857
853
}
858
854
859
855
memcpy (c_ctx -> c_key , key , keylen );
860
- if (c_ctx -> fallback && c_ctx -> fbtfm ) {
856
+ if (c_ctx -> fbtfm ) {
861
857
ret = crypto_sync_skcipher_setkey (c_ctx -> fbtfm , key , keylen );
862
858
if (ret ) {
863
859
dev_err (dev , "failed to set fallback skcipher key!\n" );
@@ -1155,8 +1151,10 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1155
1151
}
1156
1152
1157
1153
ret = crypto_authenc_extractkeys (& keys , key , keylen );
1158
- if (ret )
1154
+ if (ret ) {
1155
+ dev_err (dev , "sec extract aead keys err!\n" );
1159
1156
goto bad_key ;
1157
+ }
1160
1158
1161
1159
ret = sec_aead_aes_set_key (c_ctx , & keys );
1162
1160
if (ret ) {
@@ -1170,12 +1168,6 @@ static int sec_aead_setkey(struct crypto_aead *tfm, const u8 *key,
1170
1168
goto bad_key ;
1171
1169
}
1172
1170
1173
- if (ctx -> a_ctx .a_key_len & WORD_MASK ) {
1174
- ret = - EINVAL ;
1175
- dev_err (dev , "AUTH key length error!\n" );
1176
- goto bad_key ;
1177
- }
1178
-
1179
1171
ret = sec_aead_fallback_setkey (a_ctx , tfm , key , keylen );
1180
1172
if (ret ) {
1181
1173
dev_err (dev , "set sec fallback key err!\n" );
@@ -1995,8 +1987,7 @@ static int sec_aead_sha512_ctx_init(struct crypto_aead *tfm)
1995
1987
return sec_aead_ctx_init (tfm , "sha512" );
1996
1988
}
1997
1989
1998
- static int sec_skcipher_cryptlen_check (struct sec_ctx * ctx ,
1999
- struct sec_req * sreq )
1990
+ static int sec_skcipher_cryptlen_check (struct sec_ctx * ctx , struct sec_req * sreq )
2000
1991
{
2001
1992
u32 cryptlen = sreq -> c_req .sk_req -> cryptlen ;
2002
1993
struct device * dev = ctx -> dev ;
@@ -2018,10 +2009,6 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
2018
2009
}
2019
2010
break ;
2020
2011
case SEC_CMODE_CTR :
2021
- if (unlikely (ctx -> sec -> qm .ver < QM_HW_V3 )) {
2022
- dev_err (dev , "skcipher HW version error!\n" );
2023
- ret = - EINVAL ;
2024
- }
2025
2012
break ;
2026
2013
default :
2027
2014
ret = - EINVAL ;
@@ -2030,17 +2017,21 @@ static int sec_skcipher_cryptlen_check(struct sec_ctx *ctx,
2030
2017
return ret ;
2031
2018
}
2032
2019
2033
- static int sec_skcipher_param_check (struct sec_ctx * ctx , struct sec_req * sreq )
2020
+ static int sec_skcipher_param_check (struct sec_ctx * ctx ,
2021
+ struct sec_req * sreq , bool * need_fallback )
2034
2022
{
2035
2023
struct skcipher_request * sk_req = sreq -> c_req .sk_req ;
2036
2024
struct device * dev = ctx -> dev ;
2037
2025
u8 c_alg = ctx -> c_ctx .c_alg ;
2038
2026
2039
- if (unlikely (!sk_req -> src || !sk_req -> dst ||
2040
- sk_req -> cryptlen > MAX_INPUT_DATA_LEN )) {
2027
+ if (unlikely (!sk_req -> src || !sk_req -> dst )) {
2041
2028
dev_err (dev , "skcipher input param error!\n" );
2042
2029
return - EINVAL ;
2043
2030
}
2031
+
2032
+ if (sk_req -> cryptlen > MAX_INPUT_DATA_LEN )
2033
+ * need_fallback = true;
2034
+
2044
2035
sreq -> c_req .c_len = sk_req -> cryptlen ;
2045
2036
2046
2037
if (ctx -> pbuf_supported && sk_req -> cryptlen <= SEC_PBUF_SZ )
@@ -2098,6 +2089,7 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2098
2089
struct crypto_skcipher * tfm = crypto_skcipher_reqtfm (sk_req );
2099
2090
struct sec_req * req = skcipher_request_ctx (sk_req );
2100
2091
struct sec_ctx * ctx = crypto_skcipher_ctx (tfm );
2092
+ bool need_fallback = false;
2101
2093
int ret ;
2102
2094
2103
2095
if (!sk_req -> cryptlen ) {
@@ -2111,11 +2103,11 @@ static int sec_skcipher_crypto(struct skcipher_request *sk_req, bool encrypt)
2111
2103
req -> c_req .encrypt = encrypt ;
2112
2104
req -> ctx = ctx ;
2113
2105
2114
- ret = sec_skcipher_param_check (ctx , req );
2106
+ ret = sec_skcipher_param_check (ctx , req , & need_fallback );
2115
2107
if (unlikely (ret ))
2116
2108
return - EINVAL ;
2117
2109
2118
- if (unlikely (ctx -> c_ctx .fallback ))
2110
+ if (unlikely (ctx -> c_ctx .fallback || need_fallback ))
2119
2111
return sec_skcipher_soft_crypto (ctx , sk_req , encrypt );
2120
2112
2121
2113
return ctx -> req_op -> process (ctx , req );
@@ -2223,52 +2215,35 @@ static int sec_aead_spec_check(struct sec_ctx *ctx, struct sec_req *sreq)
2223
2215
struct crypto_aead * tfm = crypto_aead_reqtfm (req );
2224
2216
size_t sz = crypto_aead_authsize (tfm );
2225
2217
u8 c_mode = ctx -> c_ctx .c_mode ;
2226
- struct device * dev = ctx -> dev ;
2227
2218
int ret ;
2228
2219
2229
- /* Hardware does not handle cases where authsize is not 4 bytes aligned */
2230
- if (c_mode == SEC_CMODE_CBC && (sz & WORD_MASK )) {
2231
- sreq -> aead_req .fallback = true;
2220
+ if (unlikely (ctx -> sec -> qm .ver == QM_HW_V2 && !sreq -> c_req .c_len ))
2232
2221
return - EINVAL ;
2233
- }
2234
2222
2235
2223
if (unlikely (req -> cryptlen + req -> assoclen > MAX_INPUT_DATA_LEN ||
2236
- req -> assoclen > SEC_MAX_AAD_LEN )) {
2237
- dev_err (dev , "aead input spec error!\n" );
2224
+ req -> assoclen > SEC_MAX_AAD_LEN ))
2238
2225
return - EINVAL ;
2239
- }
2240
2226
2241
2227
if (c_mode == SEC_CMODE_CCM ) {
2242
- if (unlikely (req -> assoclen > SEC_MAX_CCM_AAD_LEN )) {
2243
- dev_err_ratelimited (dev , "CCM input aad parameter is too long!\n" );
2228
+ if (unlikely (req -> assoclen > SEC_MAX_CCM_AAD_LEN ))
2244
2229
return - EINVAL ;
2245
- }
2246
- ret = aead_iv_demension_check (req );
2247
- if (ret ) {
2248
- dev_err (dev , "aead input iv param error!\n" );
2249
- return ret ;
2250
- }
2251
- }
2252
2230
2253
- if ( sreq -> c_req . encrypt )
2254
- sreq -> c_req . c_len = req -> cryptlen ;
2255
- else
2256
- sreq -> c_req . c_len = req -> cryptlen - sz ;
2257
- if (c_mode == SEC_CMODE_CBC ) {
2258
- if ( unlikely ( sreq -> c_req . c_len & ( AES_BLOCK_SIZE - 1 ))) {
2259
- dev_err ( dev , "aead crypto length error!\n" );
2231
+ ret = aead_iv_demension_check ( req );
2232
+ if ( unlikely ( ret ))
2233
+ return - EINVAL ;
2234
+ } else if ( c_mode == SEC_CMODE_CBC ) {
2235
+ if (unlikely ( sz & WORD_MASK ))
2236
+ return - EINVAL ;
2237
+ if ( unlikely ( ctx -> a_ctx . a_key_len & WORD_MASK ))
2260
2238
return - EINVAL ;
2261
- }
2262
2239
}
2263
2240
2264
2241
return 0 ;
2265
2242
}
2266
2243
2267
- static int sec_aead_param_check (struct sec_ctx * ctx , struct sec_req * sreq )
2244
+ static int sec_aead_param_check (struct sec_ctx * ctx , struct sec_req * sreq , bool * need_fallback )
2268
2245
{
2269
2246
struct aead_request * req = sreq -> aead_req .aead_req ;
2270
- struct crypto_aead * tfm = crypto_aead_reqtfm (req );
2271
- size_t authsize = crypto_aead_authsize (tfm );
2272
2247
struct device * dev = ctx -> dev ;
2273
2248
u8 c_alg = ctx -> c_ctx .c_alg ;
2274
2249
@@ -2277,12 +2252,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2277
2252
return - EINVAL ;
2278
2253
}
2279
2254
2280
- if (ctx -> sec -> qm .ver == QM_HW_V2 ) {
2281
- if (unlikely (!req -> cryptlen || (!sreq -> c_req .encrypt &&
2282
- req -> cryptlen <= authsize ))) {
2283
- sreq -> aead_req .fallback = true;
2284
- return - EINVAL ;
2285
- }
2255
+ if (unlikely (ctx -> c_ctx .c_mode == SEC_CMODE_CBC &&
2256
+ sreq -> c_req .c_len & (AES_BLOCK_SIZE - 1 ))) {
2257
+ dev_err (dev , "aead cbc mode input data length error!\n" );
2258
+ return - EINVAL ;
2286
2259
}
2287
2260
2288
2261
/* Support AES or SM4 */
@@ -2291,8 +2264,10 @@ static int sec_aead_param_check(struct sec_ctx *ctx, struct sec_req *sreq)
2291
2264
return - EINVAL ;
2292
2265
}
2293
2266
2294
- if (unlikely (sec_aead_spec_check (ctx , sreq )))
2267
+ if (unlikely (sec_aead_spec_check (ctx , sreq ))) {
2268
+ * need_fallback = true;
2295
2269
return - EINVAL ;
2270
+ }
2296
2271
2297
2272
if (ctx -> pbuf_supported && (req -> cryptlen + req -> assoclen ) <=
2298
2273
SEC_PBUF_SZ )
@@ -2336,17 +2311,19 @@ static int sec_aead_crypto(struct aead_request *a_req, bool encrypt)
2336
2311
struct crypto_aead * tfm = crypto_aead_reqtfm (a_req );
2337
2312
struct sec_req * req = aead_request_ctx (a_req );
2338
2313
struct sec_ctx * ctx = crypto_aead_ctx (tfm );
2314
+ size_t sz = crypto_aead_authsize (tfm );
2315
+ bool need_fallback = false;
2339
2316
int ret ;
2340
2317
2341
2318
req -> flag = a_req -> base .flags ;
2342
2319
req -> aead_req .aead_req = a_req ;
2343
2320
req -> c_req .encrypt = encrypt ;
2344
2321
req -> ctx = ctx ;
2345
- req -> aead_req . fallback = false ;
2322
+ req -> c_req . c_len = a_req -> cryptlen - ( req -> c_req . encrypt ? 0 : sz ) ;
2346
2323
2347
- ret = sec_aead_param_check (ctx , req );
2324
+ ret = sec_aead_param_check (ctx , req , & need_fallback );
2348
2325
if (unlikely (ret )) {
2349
- if (req -> aead_req . fallback )
2326
+ if (need_fallback )
2350
2327
return sec_aead_soft_crypto (ctx , a_req , encrypt );
2351
2328
return - EINVAL ;
2352
2329
}
0 commit comments