@@ -98,13 +98,14 @@ struct caam_hash_ctx {
98
98
u32 sh_desc_update_first [DESC_HASH_MAX_USED_LEN ] ____cacheline_aligned ;
99
99
u32 sh_desc_fin [DESC_HASH_MAX_USED_LEN ] ____cacheline_aligned ;
100
100
u32 sh_desc_digest [DESC_HASH_MAX_USED_LEN ] ____cacheline_aligned ;
101
+ u8 key [CAAM_MAX_HASH_KEY_SIZE ] ____cacheline_aligned ;
101
102
dma_addr_t sh_desc_update_dma ____cacheline_aligned ;
102
103
dma_addr_t sh_desc_update_first_dma ;
103
104
dma_addr_t sh_desc_fin_dma ;
104
105
dma_addr_t sh_desc_digest_dma ;
106
+ dma_addr_t key_dma ;
105
107
enum dma_data_direction dir ;
106
108
struct device * jrdev ;
107
- u8 key [CAAM_MAX_HASH_KEY_SIZE ];
108
109
int ctx_len ;
109
110
struct alginfo adata ;
110
111
};
@@ -158,6 +159,12 @@ static inline int *alt_buflen(struct caam_hash_state *state)
158
159
return state -> current_buf ? & state -> buflen_0 : & state -> buflen_1 ;
159
160
}
160
161
162
+ static inline bool is_xcbc_aes (u32 algtype )
163
+ {
164
+ return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK )) ==
165
+ (OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC );
166
+ }
167
+
161
168
/* Common job descriptor seq in/out ptr routines */
162
169
163
170
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -292,6 +299,62 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
292
299
return 0 ;
293
300
}
294
301
302
+ static int axcbc_set_sh_desc (struct crypto_ahash * ahash )
303
+ {
304
+ struct caam_hash_ctx * ctx = crypto_ahash_ctx (ahash );
305
+ int digestsize = crypto_ahash_digestsize (ahash );
306
+ struct device * jrdev = ctx -> jrdev ;
307
+ u32 * desc ;
308
+
309
+ /* key is loaded from memory for UPDATE and FINALIZE states */
310
+ ctx -> adata .key_dma = ctx -> key_dma ;
311
+
312
+ /* shared descriptor for ahash_update */
313
+ desc = ctx -> sh_desc_update ;
314
+ cnstr_shdsc_axcbc (desc , & ctx -> adata , OP_ALG_AS_UPDATE , ctx -> ctx_len ,
315
+ ctx -> ctx_len , 0 );
316
+ dma_sync_single_for_device (jrdev , ctx -> sh_desc_update_dma ,
317
+ desc_bytes (desc ), ctx -> dir );
318
+ print_hex_dump_debug ("axcbc update shdesc@" __stringify (__LINE__ )" : " ,
319
+ DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes (desc ),
320
+ 1 );
321
+
322
+ /* shared descriptor for ahash_{final,finup} */
323
+ desc = ctx -> sh_desc_fin ;
324
+ cnstr_shdsc_axcbc (desc , & ctx -> adata , OP_ALG_AS_FINALIZE , digestsize ,
325
+ ctx -> ctx_len , 0 );
326
+ dma_sync_single_for_device (jrdev , ctx -> sh_desc_fin_dma ,
327
+ desc_bytes (desc ), ctx -> dir );
328
+ print_hex_dump_debug ("axcbc finup shdesc@" __stringify (__LINE__ )" : " ,
329
+ DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes (desc ),
330
+ 1 );
331
+
332
+ /* key is immediate data for INIT and INITFINAL states */
333
+ ctx -> adata .key_virt = ctx -> key ;
334
+
335
+ /* shared descriptor for first invocation of ahash_update */
336
+ desc = ctx -> sh_desc_update_first ;
337
+ cnstr_shdsc_axcbc (desc , & ctx -> adata , OP_ALG_AS_INIT , ctx -> ctx_len ,
338
+ ctx -> ctx_len , ctx -> key_dma );
339
+ dma_sync_single_for_device (jrdev , ctx -> sh_desc_update_first_dma ,
340
+ desc_bytes (desc ), ctx -> dir );
341
+ print_hex_dump_debug ("axcbc update first shdesc@" __stringify (__LINE__ )" : " ,
342
+ DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes (desc ),
343
+ 1 );
344
+
345
+ /* shared descriptor for ahash_digest */
346
+ desc = ctx -> sh_desc_digest ;
347
+ cnstr_shdsc_axcbc (desc , & ctx -> adata , OP_ALG_AS_INITFINAL , digestsize ,
348
+ ctx -> ctx_len , 0 );
349
+ dma_sync_single_for_device (jrdev , ctx -> sh_desc_digest_dma ,
350
+ desc_bytes (desc ), ctx -> dir );
351
+ print_hex_dump_debug ("axcbc digest shdesc@" __stringify (__LINE__ )" : " ,
352
+ DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes (desc ),
353
+ 1 );
354
+
355
+ return 0 ;
356
+ }
357
+
295
358
/* Digest hash size if it is too large */
296
359
static int hash_digest_key (struct caam_hash_ctx * ctx , const u8 * key_in ,
297
360
u32 * keylen , u8 * key_out , u32 digestsize )
@@ -424,6 +487,21 @@ static int ahash_setkey(struct crypto_ahash *ahash,
424
487
return - EINVAL ;
425
488
}
426
489
490
+ static int axcbc_setkey (struct crypto_ahash * ahash , const u8 * key ,
491
+ unsigned int keylen )
492
+ {
493
+ struct caam_hash_ctx * ctx = crypto_ahash_ctx (ahash );
494
+ struct device * jrdev = ctx -> jrdev ;
495
+
496
+ memcpy (ctx -> key , key , keylen );
497
+ dma_sync_single_for_device (jrdev , ctx -> key_dma , keylen , DMA_TO_DEVICE );
498
+ ctx -> adata .keylen = keylen ;
499
+
500
+ print_hex_dump_debug ("axcbc ctx.key@" __stringify (__LINE__ )" : " ,
501
+ DUMP_PREFIX_ADDRESS , 16 , 4 , ctx -> key , keylen , 1 );
502
+
503
+ return axcbc_set_sh_desc (ahash );
504
+ }
427
505
/*
428
506
* ahash_edesc - s/w-extended ahash descriptor
429
507
* @dst_dma: physical mapped address of req->result
@@ -688,6 +766,7 @@ static int ahash_update_ctx(struct ahash_request *req)
688
766
u8 * buf = current_buf (state );
689
767
int * buflen = current_buflen (state );
690
768
u8 * next_buf = alt_buf (state );
769
+ int blocksize = crypto_ahash_blocksize (ahash );
691
770
int * next_buflen = alt_buflen (state ), last_buflen ;
692
771
int in_len = * buflen + req -> nbytes , to_hash ;
693
772
u32 * desc ;
@@ -696,9 +775,19 @@ static int ahash_update_ctx(struct ahash_request *req)
696
775
int ret = 0 ;
697
776
698
777
last_buflen = * next_buflen ;
699
- * next_buflen = in_len & (crypto_tfm_alg_blocksize ( & ahash -> base ) - 1 );
778
+ * next_buflen = in_len & (blocksize - 1 );
700
779
to_hash = in_len - * next_buflen ;
701
780
781
+ /*
782
+ * For XCBC, if to_hash is multiple of block size,
783
+ * keep last block in internal buffer
784
+ */
785
+ if (is_xcbc_aes (ctx -> adata .algtype ) && to_hash >= blocksize &&
786
+ (* next_buflen == 0 )) {
787
+ * next_buflen = blocksize ;
788
+ to_hash -= blocksize ;
789
+ }
790
+
702
791
if (to_hash ) {
703
792
src_nents = sg_nents_for_len (req -> src ,
704
793
req -> nbytes - (* next_buflen ));
@@ -1122,6 +1211,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1122
1211
GFP_KERNEL : GFP_ATOMIC ;
1123
1212
u8 * buf = current_buf (state );
1124
1213
int * buflen = current_buflen (state );
1214
+ int blocksize = crypto_ahash_blocksize (ahash );
1125
1215
u8 * next_buf = alt_buf (state );
1126
1216
int * next_buflen = alt_buflen (state );
1127
1217
int in_len = * buflen + req -> nbytes , to_hash ;
@@ -1130,9 +1220,19 @@ static int ahash_update_no_ctx(struct ahash_request *req)
1130
1220
u32 * desc ;
1131
1221
int ret = 0 ;
1132
1222
1133
- * next_buflen = in_len & (crypto_tfm_alg_blocksize ( & ahash -> base ) - 1 );
1223
+ * next_buflen = in_len & (blocksize - 1 );
1134
1224
to_hash = in_len - * next_buflen ;
1135
1225
1226
+ /*
1227
+ * For XCBC, if to_hash is multiple of block size,
1228
+ * keep last block in internal buffer
1229
+ */
1230
+ if (is_xcbc_aes (ctx -> adata .algtype ) && to_hash >= blocksize &&
1231
+ (* next_buflen == 0 )) {
1232
+ * next_buflen = blocksize ;
1233
+ to_hash -= blocksize ;
1234
+ }
1235
+
1136
1236
if (to_hash ) {
1137
1237
src_nents = sg_nents_for_len (req -> src ,
1138
1238
req -> nbytes - * next_buflen );
@@ -1338,15 +1438,25 @@ static int ahash_update_first(struct ahash_request *req)
1338
1438
u8 * next_buf = alt_buf (state );
1339
1439
int * next_buflen = alt_buflen (state );
1340
1440
int to_hash ;
1441
+ int blocksize = crypto_ahash_blocksize (ahash );
1341
1442
u32 * desc ;
1342
1443
int src_nents , mapped_nents ;
1343
1444
struct ahash_edesc * edesc ;
1344
1445
int ret = 0 ;
1345
1446
1346
- * next_buflen = req -> nbytes & (crypto_tfm_alg_blocksize (& ahash -> base ) -
1347
- 1 );
1447
+ * next_buflen = req -> nbytes & (blocksize - 1 );
1348
1448
to_hash = req -> nbytes - * next_buflen ;
1349
1449
1450
+ /*
1451
+ * For XCBC, if to_hash is multiple of block size,
1452
+ * keep last block in internal buffer
1453
+ */
1454
+ if (is_xcbc_aes (ctx -> adata .algtype ) && to_hash >= blocksize &&
1455
+ (* next_buflen == 0 )) {
1456
+ * next_buflen = blocksize ;
1457
+ to_hash -= blocksize ;
1458
+ }
1459
+
1350
1460
if (to_hash ) {
1351
1461
src_nents = sg_nents_for_len (req -> src ,
1352
1462
req -> nbytes - * next_buflen );
@@ -1654,6 +1764,25 @@ static struct caam_hash_template driver_hash[] = {
1654
1764
},
1655
1765
},
1656
1766
.alg_type = OP_ALG_ALGSEL_MD5 ,
1767
+ }, {
1768
+ .hmac_name = "xcbc(aes)" ,
1769
+ .hmac_driver_name = "xcbc-aes-caam" ,
1770
+ .blocksize = AES_BLOCK_SIZE ,
1771
+ .template_ahash = {
1772
+ .init = ahash_init ,
1773
+ .update = ahash_update ,
1774
+ .final = ahash_final ,
1775
+ .finup = ahash_finup ,
1776
+ .digest = ahash_digest ,
1777
+ .export = ahash_export ,
1778
+ .import = ahash_import ,
1779
+ .setkey = axcbc_setkey ,
1780
+ .halg = {
1781
+ .digestsize = AES_BLOCK_SIZE ,
1782
+ .statesize = sizeof (struct caam_export_state ),
1783
+ },
1784
+ },
1785
+ .alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC ,
1657
1786
},
1658
1787
};
1659
1788
@@ -1695,14 +1824,42 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1695
1824
}
1696
1825
1697
1826
priv = dev_get_drvdata (ctx -> jrdev -> parent );
1698
- ctx -> dir = priv -> era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE ;
1827
+
1828
+ if (is_xcbc_aes (caam_hash -> alg_type )) {
1829
+ ctx -> dir = DMA_TO_DEVICE ;
1830
+ ctx -> adata .algtype = OP_TYPE_CLASS1_ALG | caam_hash -> alg_type ;
1831
+ ctx -> ctx_len = 48 ;
1832
+
1833
+ ctx -> key_dma = dma_map_single_attrs (ctx -> jrdev , ctx -> key ,
1834
+ ARRAY_SIZE (ctx -> key ),
1835
+ DMA_BIDIRECTIONAL ,
1836
+ DMA_ATTR_SKIP_CPU_SYNC );
1837
+ if (dma_mapping_error (ctx -> jrdev , ctx -> key_dma )) {
1838
+ dev_err (ctx -> jrdev , "unable to map key\n" );
1839
+ caam_jr_free (ctx -> jrdev );
1840
+ return - ENOMEM ;
1841
+ }
1842
+ } else {
1843
+ ctx -> dir = priv -> era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE ;
1844
+ ctx -> adata .algtype = OP_TYPE_CLASS2_ALG | caam_hash -> alg_type ;
1845
+ ctx -> ctx_len = runninglen [(ctx -> adata .algtype &
1846
+ OP_ALG_ALGSEL_SUBMASK ) >>
1847
+ OP_ALG_ALGSEL_SHIFT ];
1848
+ }
1699
1849
1700
1850
dma_addr = dma_map_single_attrs (ctx -> jrdev , ctx -> sh_desc_update ,
1701
1851
offsetof(struct caam_hash_ctx ,
1702
1852
sh_desc_update_dma ),
1703
1853
ctx -> dir , DMA_ATTR_SKIP_CPU_SYNC );
1704
1854
if (dma_mapping_error (ctx -> jrdev , dma_addr )) {
1705
1855
dev_err (ctx -> jrdev , "unable to map shared descriptors\n" );
1856
+
1857
+ if (is_xcbc_aes (caam_hash -> alg_type ))
1858
+ dma_unmap_single_attrs (ctx -> jrdev , ctx -> key_dma ,
1859
+ ARRAY_SIZE (ctx -> key ),
1860
+ DMA_BIDIRECTIONAL ,
1861
+ DMA_ATTR_SKIP_CPU_SYNC );
1862
+
1706
1863
caam_jr_free (ctx -> jrdev );
1707
1864
return - ENOMEM ;
1708
1865
}
@@ -1716,13 +1873,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
1716
1873
ctx -> sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx ,
1717
1874
sh_desc_digest );
1718
1875
1719
- /* copy descriptor header template value */
1720
- ctx -> adata .algtype = OP_TYPE_CLASS2_ALG | caam_hash -> alg_type ;
1721
-
1722
- ctx -> ctx_len = runninglen [(ctx -> adata .algtype &
1723
- OP_ALG_ALGSEL_SUBMASK ) >>
1724
- OP_ALG_ALGSEL_SHIFT ];
1725
-
1726
1876
crypto_ahash_set_reqsize (__crypto_ahash_cast (tfm ),
1727
1877
sizeof (struct caam_hash_state ));
1728
1878
@@ -1738,9 +1888,12 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
1738
1888
struct caam_hash_ctx * ctx = crypto_tfm_ctx (tfm );
1739
1889
1740
1890
dma_unmap_single_attrs (ctx -> jrdev , ctx -> sh_desc_update_dma ,
1741
- offsetof(struct caam_hash_ctx ,
1742
- sh_desc_update_dma ),
1891
+ offsetof(struct caam_hash_ctx , key ),
1743
1892
ctx -> dir , DMA_ATTR_SKIP_CPU_SYNC );
1893
+ if (is_xcbc_aes (ctx -> adata .algtype ))
1894
+ dma_unmap_single_attrs (ctx -> jrdev , ctx -> key_dma ,
1895
+ ARRAY_SIZE (ctx -> key ), DMA_BIDIRECTIONAL ,
1896
+ DMA_ATTR_SKIP_CPU_SYNC );
1744
1897
caam_jr_free (ctx -> jrdev );
1745
1898
}
1746
1899
@@ -1871,7 +2024,8 @@ static int __init caam_algapi_hash_init(void)
1871
2024
struct caam_hash_template * alg = driver_hash + i ;
1872
2025
1873
2026
/* If MD size is not supported by device, skip registration */
1874
- if (alg -> template_ahash .halg .digestsize > md_limit )
2027
+ if (is_mdha (alg -> alg_type ) &&
2028
+ alg -> template_ahash .halg .digestsize > md_limit )
1875
2029
continue ;
1876
2030
1877
2031
/* register hmac version */
@@ -1892,6 +2046,9 @@ static int __init caam_algapi_hash_init(void)
1892
2046
} else
1893
2047
list_add_tail (& t_alg -> entry , & hash_list );
1894
2048
2049
+ if ((alg -> alg_type & OP_ALG_ALGSEL_MASK ) == OP_ALG_ALGSEL_AES )
2050
+ continue ;
2051
+
1895
2052
/* register unkeyed version */
1896
2053
t_alg = caam_hash_alloc (alg , false);
1897
2054
if (IS_ERR (t_alg )) {
0 commit comments