Skip to content

Commit 12b8567

Browse files
iuliana-prodanherbertx
authored andcommitted
crypto: caam - add support for xcbc(aes)
Add xcbc(aes) offloading support. Due to xcbc algorithm design and HW implementation in CAAM, driver must still have some bytes to send to the crypto engine when ahash_final() is called - such that HW correctly uses either K2 or K3 for the last block. Signed-off-by: Iuliana Prodan <iuliana.prodan@nxp.com> Signed-off-by: Horia Geantă <horia.geanta@nxp.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent 9a2537d commit 12b8567

File tree

3 files changed

+232
-17
lines changed

3 files changed

+232
-17
lines changed

drivers/crypto/caam/caamhash.c

Lines changed: 173 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -98,13 +98,14 @@ struct caam_hash_ctx {
9898
u32 sh_desc_update_first[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
9999
u32 sh_desc_fin[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
100100
u32 sh_desc_digest[DESC_HASH_MAX_USED_LEN] ____cacheline_aligned;
101+
u8 key[CAAM_MAX_HASH_KEY_SIZE] ____cacheline_aligned;
101102
dma_addr_t sh_desc_update_dma ____cacheline_aligned;
102103
dma_addr_t sh_desc_update_first_dma;
103104
dma_addr_t sh_desc_fin_dma;
104105
dma_addr_t sh_desc_digest_dma;
106+
dma_addr_t key_dma;
105107
enum dma_data_direction dir;
106108
struct device *jrdev;
107-
u8 key[CAAM_MAX_HASH_KEY_SIZE];
108109
int ctx_len;
109110
struct alginfo adata;
110111
};
@@ -158,6 +159,12 @@ static inline int *alt_buflen(struct caam_hash_state *state)
158159
return state->current_buf ? &state->buflen_0 : &state->buflen_1;
159160
}
160161

162+
static inline bool is_xcbc_aes(u32 algtype)
163+
{
164+
return (algtype & (OP_ALG_ALGSEL_MASK | OP_ALG_AAI_MASK)) ==
165+
(OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC);
166+
}
167+
161168
/* Common job descriptor seq in/out ptr routines */
162169

163170
/* Map state->caam_ctx, and append seq_out_ptr command that points to it */
@@ -292,6 +299,62 @@ static int ahash_set_sh_desc(struct crypto_ahash *ahash)
292299
return 0;
293300
}
294301

302+
static int axcbc_set_sh_desc(struct crypto_ahash *ahash)
303+
{
304+
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
305+
int digestsize = crypto_ahash_digestsize(ahash);
306+
struct device *jrdev = ctx->jrdev;
307+
u32 *desc;
308+
309+
/* key is loaded from memory for UPDATE and FINALIZE states */
310+
ctx->adata.key_dma = ctx->key_dma;
311+
312+
/* shared descriptor for ahash_update */
313+
desc = ctx->sh_desc_update;
314+
cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_UPDATE, ctx->ctx_len,
315+
ctx->ctx_len, 0);
316+
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_dma,
317+
desc_bytes(desc), ctx->dir);
318+
print_hex_dump_debug("axcbc update shdesc@" __stringify(__LINE__)" : ",
319+
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
320+
1);
321+
322+
/* shared descriptor for ahash_{final,finup} */
323+
desc = ctx->sh_desc_fin;
324+
cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_FINALIZE, digestsize,
325+
ctx->ctx_len, 0);
326+
dma_sync_single_for_device(jrdev, ctx->sh_desc_fin_dma,
327+
desc_bytes(desc), ctx->dir);
328+
print_hex_dump_debug("axcbc finup shdesc@" __stringify(__LINE__)" : ",
329+
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
330+
1);
331+
332+
/* key is immediate data for INIT and INITFINAL states */
333+
ctx->adata.key_virt = ctx->key;
334+
335+
/* shared descriptor for first invocation of ahash_update */
336+
desc = ctx->sh_desc_update_first;
337+
cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_INIT, ctx->ctx_len,
338+
ctx->ctx_len, ctx->key_dma);
339+
dma_sync_single_for_device(jrdev, ctx->sh_desc_update_first_dma,
340+
desc_bytes(desc), ctx->dir);
341+
print_hex_dump_debug("axcbc update first shdesc@" __stringify(__LINE__)" : ",
342+
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
343+
1);
344+
345+
/* shared descriptor for ahash_digest */
346+
desc = ctx->sh_desc_digest;
347+
cnstr_shdsc_axcbc(desc, &ctx->adata, OP_ALG_AS_INITFINAL, digestsize,
348+
ctx->ctx_len, 0);
349+
dma_sync_single_for_device(jrdev, ctx->sh_desc_digest_dma,
350+
desc_bytes(desc), ctx->dir);
351+
print_hex_dump_debug("axcbc digest shdesc@" __stringify(__LINE__)" : ",
352+
DUMP_PREFIX_ADDRESS, 16, 4, desc, desc_bytes(desc),
353+
1);
354+
355+
return 0;
356+
}
357+
295358
/* Digest hash size if it is too large */
296359
static int hash_digest_key(struct caam_hash_ctx *ctx, const u8 *key_in,
297360
u32 *keylen, u8 *key_out, u32 digestsize)
@@ -424,6 +487,21 @@ static int ahash_setkey(struct crypto_ahash *ahash,
424487
return -EINVAL;
425488
}
426489

490+
static int axcbc_setkey(struct crypto_ahash *ahash, const u8 *key,
491+
unsigned int keylen)
492+
{
493+
struct caam_hash_ctx *ctx = crypto_ahash_ctx(ahash);
494+
struct device *jrdev = ctx->jrdev;
495+
496+
memcpy(ctx->key, key, keylen);
497+
dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE);
498+
ctx->adata.keylen = keylen;
499+
500+
print_hex_dump_debug("axcbc ctx.key@" __stringify(__LINE__)" : ",
501+
DUMP_PREFIX_ADDRESS, 16, 4, ctx->key, keylen, 1);
502+
503+
return axcbc_set_sh_desc(ahash);
504+
}
427505
/*
428506
* ahash_edesc - s/w-extended ahash descriptor
429507
* @dst_dma: physical mapped address of req->result
@@ -688,6 +766,7 @@ static int ahash_update_ctx(struct ahash_request *req)
688766
u8 *buf = current_buf(state);
689767
int *buflen = current_buflen(state);
690768
u8 *next_buf = alt_buf(state);
769+
int blocksize = crypto_ahash_blocksize(ahash);
691770
int *next_buflen = alt_buflen(state), last_buflen;
692771
int in_len = *buflen + req->nbytes, to_hash;
693772
u32 *desc;
@@ -696,9 +775,19 @@ static int ahash_update_ctx(struct ahash_request *req)
696775
int ret = 0;
697776

698777
last_buflen = *next_buflen;
699-
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
778+
*next_buflen = in_len & (blocksize - 1);
700779
to_hash = in_len - *next_buflen;
701780

781+
/*
782+
* For XCBC, if to_hash is multiple of block size,
783+
* keep last block in internal buffer
784+
*/
785+
if (is_xcbc_aes(ctx->adata.algtype) && to_hash >= blocksize &&
786+
(*next_buflen == 0)) {
787+
*next_buflen = blocksize;
788+
to_hash -= blocksize;
789+
}
790+
702791
if (to_hash) {
703792
src_nents = sg_nents_for_len(req->src,
704793
req->nbytes - (*next_buflen));
@@ -1122,6 +1211,7 @@ static int ahash_update_no_ctx(struct ahash_request *req)
11221211
GFP_KERNEL : GFP_ATOMIC;
11231212
u8 *buf = current_buf(state);
11241213
int *buflen = current_buflen(state);
1214+
int blocksize = crypto_ahash_blocksize(ahash);
11251215
u8 *next_buf = alt_buf(state);
11261216
int *next_buflen = alt_buflen(state);
11271217
int in_len = *buflen + req->nbytes, to_hash;
@@ -1130,9 +1220,19 @@ static int ahash_update_no_ctx(struct ahash_request *req)
11301220
u32 *desc;
11311221
int ret = 0;
11321222

1133-
*next_buflen = in_len & (crypto_tfm_alg_blocksize(&ahash->base) - 1);
1223+
*next_buflen = in_len & (blocksize - 1);
11341224
to_hash = in_len - *next_buflen;
11351225

1226+
/*
1227+
* For XCBC, if to_hash is multiple of block size,
1228+
* keep last block in internal buffer
1229+
*/
1230+
if (is_xcbc_aes(ctx->adata.algtype) && to_hash >= blocksize &&
1231+
(*next_buflen == 0)) {
1232+
*next_buflen = blocksize;
1233+
to_hash -= blocksize;
1234+
}
1235+
11361236
if (to_hash) {
11371237
src_nents = sg_nents_for_len(req->src,
11381238
req->nbytes - *next_buflen);
@@ -1338,15 +1438,25 @@ static int ahash_update_first(struct ahash_request *req)
13381438
u8 *next_buf = alt_buf(state);
13391439
int *next_buflen = alt_buflen(state);
13401440
int to_hash;
1441+
int blocksize = crypto_ahash_blocksize(ahash);
13411442
u32 *desc;
13421443
int src_nents, mapped_nents;
13431444
struct ahash_edesc *edesc;
13441445
int ret = 0;
13451446

1346-
*next_buflen = req->nbytes & (crypto_tfm_alg_blocksize(&ahash->base) -
1347-
1);
1447+
*next_buflen = req->nbytes & (blocksize - 1);
13481448
to_hash = req->nbytes - *next_buflen;
13491449

1450+
/*
1451+
* For XCBC, if to_hash is multiple of block size,
1452+
* keep last block in internal buffer
1453+
*/
1454+
if (is_xcbc_aes(ctx->adata.algtype) && to_hash >= blocksize &&
1455+
(*next_buflen == 0)) {
1456+
*next_buflen = blocksize;
1457+
to_hash -= blocksize;
1458+
}
1459+
13501460
if (to_hash) {
13511461
src_nents = sg_nents_for_len(req->src,
13521462
req->nbytes - *next_buflen);
@@ -1654,6 +1764,25 @@ static struct caam_hash_template driver_hash[] = {
16541764
},
16551765
},
16561766
.alg_type = OP_ALG_ALGSEL_MD5,
1767+
}, {
1768+
.hmac_name = "xcbc(aes)",
1769+
.hmac_driver_name = "xcbc-aes-caam",
1770+
.blocksize = AES_BLOCK_SIZE,
1771+
.template_ahash = {
1772+
.init = ahash_init,
1773+
.update = ahash_update,
1774+
.final = ahash_final,
1775+
.finup = ahash_finup,
1776+
.digest = ahash_digest,
1777+
.export = ahash_export,
1778+
.import = ahash_import,
1779+
.setkey = axcbc_setkey,
1780+
.halg = {
1781+
.digestsize = AES_BLOCK_SIZE,
1782+
.statesize = sizeof(struct caam_export_state),
1783+
},
1784+
},
1785+
.alg_type = OP_ALG_ALGSEL_AES | OP_ALG_AAI_XCBC_MAC,
16571786
},
16581787
};
16591788

@@ -1695,14 +1824,42 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
16951824
}
16961825

16971826
priv = dev_get_drvdata(ctx->jrdev->parent);
1698-
ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1827+
1828+
if (is_xcbc_aes(caam_hash->alg_type)) {
1829+
ctx->dir = DMA_TO_DEVICE;
1830+
ctx->adata.algtype = OP_TYPE_CLASS1_ALG | caam_hash->alg_type;
1831+
ctx->ctx_len = 48;
1832+
1833+
ctx->key_dma = dma_map_single_attrs(ctx->jrdev, ctx->key,
1834+
ARRAY_SIZE(ctx->key),
1835+
DMA_BIDIRECTIONAL,
1836+
DMA_ATTR_SKIP_CPU_SYNC);
1837+
if (dma_mapping_error(ctx->jrdev, ctx->key_dma)) {
1838+
dev_err(ctx->jrdev, "unable to map key\n");
1839+
caam_jr_free(ctx->jrdev);
1840+
return -ENOMEM;
1841+
}
1842+
} else {
1843+
ctx->dir = priv->era >= 6 ? DMA_BIDIRECTIONAL : DMA_TO_DEVICE;
1844+
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1845+
ctx->ctx_len = runninglen[(ctx->adata.algtype &
1846+
OP_ALG_ALGSEL_SUBMASK) >>
1847+
OP_ALG_ALGSEL_SHIFT];
1848+
}
16991849

17001850
dma_addr = dma_map_single_attrs(ctx->jrdev, ctx->sh_desc_update,
17011851
offsetof(struct caam_hash_ctx,
17021852
sh_desc_update_dma),
17031853
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
17041854
if (dma_mapping_error(ctx->jrdev, dma_addr)) {
17051855
dev_err(ctx->jrdev, "unable to map shared descriptors\n");
1856+
1857+
if (is_xcbc_aes(caam_hash->alg_type))
1858+
dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1859+
ARRAY_SIZE(ctx->key),
1860+
DMA_BIDIRECTIONAL,
1861+
DMA_ATTR_SKIP_CPU_SYNC);
1862+
17061863
caam_jr_free(ctx->jrdev);
17071864
return -ENOMEM;
17081865
}
@@ -1716,13 +1873,6 @@ static int caam_hash_cra_init(struct crypto_tfm *tfm)
17161873
ctx->sh_desc_digest_dma = dma_addr + offsetof(struct caam_hash_ctx,
17171874
sh_desc_digest);
17181875

1719-
/* copy descriptor header template value */
1720-
ctx->adata.algtype = OP_TYPE_CLASS2_ALG | caam_hash->alg_type;
1721-
1722-
ctx->ctx_len = runninglen[(ctx->adata.algtype &
1723-
OP_ALG_ALGSEL_SUBMASK) >>
1724-
OP_ALG_ALGSEL_SHIFT];
1725-
17261876
crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
17271877
sizeof(struct caam_hash_state));
17281878

@@ -1738,9 +1888,12 @@ static void caam_hash_cra_exit(struct crypto_tfm *tfm)
17381888
struct caam_hash_ctx *ctx = crypto_tfm_ctx(tfm);
17391889

17401890
dma_unmap_single_attrs(ctx->jrdev, ctx->sh_desc_update_dma,
1741-
offsetof(struct caam_hash_ctx,
1742-
sh_desc_update_dma),
1891+
offsetof(struct caam_hash_ctx, key),
17431892
ctx->dir, DMA_ATTR_SKIP_CPU_SYNC);
1893+
if (is_xcbc_aes(ctx->adata.algtype))
1894+
dma_unmap_single_attrs(ctx->jrdev, ctx->key_dma,
1895+
ARRAY_SIZE(ctx->key), DMA_BIDIRECTIONAL,
1896+
DMA_ATTR_SKIP_CPU_SYNC);
17441897
caam_jr_free(ctx->jrdev);
17451898
}
17461899

@@ -1871,7 +2024,8 @@ static int __init caam_algapi_hash_init(void)
18712024
struct caam_hash_template *alg = driver_hash + i;
18722025

18732026
/* If MD size is not supported by device, skip registration */
1874-
if (alg->template_ahash.halg.digestsize > md_limit)
2027+
if (is_mdha(alg->alg_type) &&
2028+
alg->template_ahash.halg.digestsize > md_limit)
18752029
continue;
18762030

18772031
/* register hmac version */
@@ -1892,6 +2046,9 @@ static int __init caam_algapi_hash_init(void)
18922046
} else
18932047
list_add_tail(&t_alg->entry, &hash_list);
18942048

2049+
if ((alg->alg_type & OP_ALG_ALGSEL_MASK) == OP_ALG_ALGSEL_AES)
2050+
continue;
2051+
18952052
/* register unkeyed version */
18962053
t_alg = caam_hash_alloc(alg, false);
18972054
if (IS_ERR(t_alg)) {

drivers/crypto/caam/caamhash_desc.c

Lines changed: 57 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2,7 +2,7 @@
22
/*
33
* Shared descriptors for ahash algorithms
44
*
5-
* Copyright 2017 NXP
5+
* Copyright 2017-2018 NXP
66
*/
77

88
#include "compat.h"
@@ -75,6 +75,62 @@ void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
7575
}
7676
EXPORT_SYMBOL(cnstr_shdsc_ahash);
7777

78+
/**
79+
* cnstr_shdsc_axcbc - axcbc shared descriptor
80+
* @desc: pointer to buffer used for descriptor construction
81+
* @adata: pointer to authentication transform definitions.
82+
* @state: algorithm state OP_ALG_AS_{INIT, FINALIZE, INITFINALIZE, UPDATE}
83+
* @digestsize: algorithm's digest size
84+
* @ctx_len: size of Context Register
85+
* @key_dma: I/O Virtual Address of the key
86+
*/
87+
void cnstr_shdsc_axcbc(u32 * const desc, struct alginfo *adata, u32 state,
88+
int digestsize, int ctx_len, dma_addr_t key_dma)
89+
{
90+
u32 *skip_key_load;
91+
92+
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
93+
94+
/* Skip loading of key, context if already shared */
95+
skip_key_load = append_jump(desc, JUMP_TEST_ALL | JUMP_COND_SHRD);
96+
97+
if (state == OP_ALG_AS_INIT || state == OP_ALG_AS_INITFINAL) {
98+
append_key_as_imm(desc, adata->key_virt, adata->keylen,
99+
adata->keylen, CLASS_1 | KEY_DEST_CLASS_REG);
100+
} else { /* UPDATE, FINALIZE */
101+
/* Load K1 */
102+
append_key(desc, adata->key_dma, adata->keylen,
103+
CLASS_1 | KEY_DEST_CLASS_REG | KEY_ENC);
104+
/* Restore context */
105+
append_seq_load(desc, ctx_len, LDST_CLASS_1_CCB |
106+
LDST_SRCDST_BYTE_CONTEXT);
107+
}
108+
109+
set_jump_tgt_here(desc, skip_key_load);
110+
111+
/* Class 1 operation */
112+
append_operation(desc, adata->algtype | state | OP_ALG_ENCRYPT);
113+
114+
/*
115+
* Load from buf and/or src and write to req->result or state->context
116+
* Calculate remaining bytes to read
117+
*/
118+
append_math_add(desc, VARSEQINLEN, SEQINLEN, REG0, CAAM_CMD_SZ);
119+
120+
/* Read remaining bytes */
121+
append_seq_fifo_load(desc, 0, FIFOLD_CLASS_CLASS1 | FIFOLD_TYPE_LAST1 |
122+
FIFOLD_TYPE_MSG | FIFOLDST_VLF);
123+
124+
/* Save context (partial hash, K2, K3) */
125+
append_seq_store(desc, digestsize, LDST_CLASS_1_CCB |
126+
LDST_SRCDST_BYTE_CONTEXT);
127+
if (state == OP_ALG_AS_INIT)
128+
/* Save K1 */
129+
append_fifo_store(desc, key_dma, adata->keylen,
130+
LDST_CLASS_1_CCB | FIFOST_TYPE_KEY_KEK);
131+
}
132+
EXPORT_SYMBOL(cnstr_shdsc_axcbc);
133+
78134
MODULE_LICENSE("Dual BSD/GPL");
79135
MODULE_DESCRIPTION("FSL CAAM ahash descriptors support");
80136
MODULE_AUTHOR("NXP Semiconductors");

drivers/crypto/caam/caamhash_desc.h

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -18,4 +18,6 @@
1818
void cnstr_shdsc_ahash(u32 * const desc, struct alginfo *adata, u32 state,
1919
int digestsize, int ctx_len, bool import_ctx, int era);
2020

21+
void cnstr_shdsc_axcbc(u32 * const desc, struct alginfo *adata, u32 state,
22+
int digestsize, int ctx_len, dma_addr_t key_dma);
2123
#endif /* _CAAMHASH_DESC_H_ */

0 commit comments

Comments
 (0)