Skip to content

Commit 7222d1a

Browse files
Catalin Vasileherbertx
authored andcommitted
crypto: caam - add support for givencrypt cbc(aes) and rfc3686(ctr(aes))
Add support for one-shot givencrypt algorithms. Givencrypt algorithms will generate their IV and encrypt data within the same shared job descriptors. Current algorithms merged from ablkcipher to givencrypt are: - AES Cipher Block Chaining (CBC) - AES Counter Mode (CTR) compliant with RFC3686 Signed-off-by: Catalin Vasile <catalin.vasile@freescale.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent daebc46 commit 7222d1a

File tree

1 file changed

+281
-4
lines changed

1 file changed

+281
-4
lines changed

drivers/crypto/caam/caamalg.c

Lines changed: 281 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1835,6 +1835,7 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
18351835
u32 *key_jump_cmd;
18361836
u32 *desc;
18371837
u32 *nonce;
1838+
u32 geniv;
18381839
u32 ctx1_iv_off = 0;
18391840
const bool ctr_mode = ((ctx->class1_alg_type & OP_ALG_AAI_MASK) ==
18401841
OP_ALG_AAI_CTR_MOD128);
@@ -1993,6 +1994,83 @@ static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher,
19931994
DUMP_PREFIX_ADDRESS, 16, 4, desc,
19941995
desc_bytes(desc), 1);
19951996
#endif
1997+
/* ablkcipher_givencrypt shared descriptor */
1998+
desc = ctx->sh_desc_givenc;
1999+
2000+
init_sh_desc(desc, HDR_SHARE_SERIAL | HDR_SAVECTX);
2001+
/* Skip if already shared */
2002+
key_jump_cmd = append_jump(desc, JUMP_JSL | JUMP_TEST_ALL |
2003+
JUMP_COND_SHRD);
2004+
2005+
/* Load class1 key only */
2006+
append_key_as_imm(desc, (void *)ctx->key, ctx->enckeylen,
2007+
ctx->enckeylen, CLASS_1 |
2008+
KEY_DEST_CLASS_REG);
2009+
2010+
/* Load Nonce into CONTEXT1 reg */
2011+
if (is_rfc3686) {
2012+
nonce = (u32 *)(key + keylen);
2013+
append_load_imm_u32(desc, *nonce, LDST_CLASS_IND_CCB |
2014+
LDST_SRCDST_BYTE_OUTFIFO | LDST_IMM);
2015+
append_move(desc, MOVE_WAITCOMP |
2016+
MOVE_SRC_OUTFIFO |
2017+
MOVE_DEST_CLASS1CTX |
2018+
(16 << MOVE_OFFSET_SHIFT) |
2019+
(CTR_RFC3686_NONCE_SIZE << MOVE_LEN_SHIFT));
2020+
}
2021+
set_jump_tgt_here(desc, key_jump_cmd);
2022+
2023+
/* Generate IV */
2024+
geniv = NFIFOENTRY_STYPE_PAD | NFIFOENTRY_DEST_DECO |
2025+
NFIFOENTRY_DTYPE_MSG | NFIFOENTRY_LC1 |
2026+
NFIFOENTRY_PTYPE_RND | (crt->ivsize << NFIFOENTRY_DLEN_SHIFT);
2027+
append_load_imm_u32(desc, geniv, LDST_CLASS_IND_CCB |
2028+
LDST_SRCDST_WORD_INFO_FIFO | LDST_IMM);
2029+
append_cmd(desc, CMD_LOAD | DISABLE_AUTO_INFO_FIFO);
2030+
append_move(desc, MOVE_WAITCOMP |
2031+
MOVE_SRC_INFIFO |
2032+
MOVE_DEST_CLASS1CTX |
2033+
(crt->ivsize << MOVE_LEN_SHIFT) |
2034+
(ctx1_iv_off << MOVE_OFFSET_SHIFT));
2035+
append_cmd(desc, CMD_LOAD | ENABLE_AUTO_INFO_FIFO);
2036+
2037+
/* Copy generated IV to memory */
2038+
append_seq_store(desc, crt->ivsize,
2039+
LDST_SRCDST_BYTE_CONTEXT | LDST_CLASS_1_CCB |
2040+
(ctx1_iv_off << LDST_OFFSET_SHIFT));
2041+
2042+
/* Load Counter into CONTEXT1 reg */
2043+
if (is_rfc3686)
2044+
append_load_imm_u32(desc, (u32)1, LDST_IMM |
2045+
LDST_CLASS_1_CCB |
2046+
LDST_SRCDST_BYTE_CONTEXT |
2047+
((ctx1_iv_off + CTR_RFC3686_IV_SIZE) <<
2048+
LDST_OFFSET_SHIFT));
2049+
2050+
if (ctx1_iv_off)
2051+
append_jump(desc, JUMP_JSL | JUMP_TEST_ALL | JUMP_COND_NCP |
2052+
(1 << JUMP_OFFSET_SHIFT));
2053+
2054+
/* Load operation */
2055+
append_operation(desc, ctx->class1_alg_type |
2056+
OP_ALG_AS_INITFINAL | OP_ALG_ENCRYPT);
2057+
2058+
/* Perform operation */
2059+
ablkcipher_append_src_dst(desc);
2060+
2061+
ctx->sh_desc_givenc_dma = dma_map_single(jrdev, desc,
2062+
desc_bytes(desc),
2063+
DMA_TO_DEVICE);
2064+
if (dma_mapping_error(jrdev, ctx->sh_desc_givenc_dma)) {
2065+
dev_err(jrdev, "unable to map shared descriptor\n");
2066+
return -ENOMEM;
2067+
}
2068+
#ifdef DEBUG
2069+
print_hex_dump(KERN_ERR,
2070+
"ablkcipher givenc shdesc@" __stringify(__LINE__) ": ",
2071+
DUMP_PREFIX_ADDRESS, 16, 4, desc,
2072+
desc_bytes(desc), 1);
2073+
#endif
19962074

19972075
return ret;
19982076
}
@@ -2479,6 +2557,54 @@ static void init_ablkcipher_job(u32 *sh_desc, dma_addr_t ptr,
24792557
append_seq_out_ptr(desc, dst_dma, req->nbytes, out_options);
24802558
}
24812559

2560+
/*
2561+
* Fill in ablkcipher givencrypt job descriptor
2562+
*/
2563+
static void init_ablkcipher_giv_job(u32 *sh_desc, dma_addr_t ptr,
2564+
struct ablkcipher_edesc *edesc,
2565+
struct ablkcipher_request *req,
2566+
bool iv_contig)
2567+
{
2568+
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
2569+
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
2570+
u32 *desc = edesc->hw_desc;
2571+
u32 out_options, in_options;
2572+
dma_addr_t dst_dma, src_dma;
2573+
int len, sec4_sg_index = 0;
2574+
2575+
#ifdef DEBUG
2576+
print_hex_dump(KERN_ERR, "presciv@" __stringify(__LINE__) ": ",
2577+
DUMP_PREFIX_ADDRESS, 16, 4, req->info,
2578+
ivsize, 1);
2579+
print_hex_dump(KERN_ERR, "src @" __stringify(__LINE__) ": ",
2580+
DUMP_PREFIX_ADDRESS, 16, 4, sg_virt(req->src),
2581+
edesc->src_nents ? 100 : req->nbytes, 1);
2582+
#endif
2583+
2584+
len = desc_len(sh_desc);
2585+
init_job_desc_shared(desc, ptr, len, HDR_SHARE_DEFER | HDR_REVERSE);
2586+
2587+
if (!edesc->src_nents) {
2588+
src_dma = sg_dma_address(req->src);
2589+
in_options = 0;
2590+
} else {
2591+
src_dma = edesc->sec4_sg_dma;
2592+
sec4_sg_index += edesc->src_nents;
2593+
in_options = LDST_SGF;
2594+
}
2595+
append_seq_in_ptr(desc, src_dma, req->nbytes, in_options);
2596+
2597+
if (iv_contig) {
2598+
dst_dma = edesc->iv_dma;
2599+
out_options = 0;
2600+
} else {
2601+
dst_dma = edesc->sec4_sg_dma +
2602+
sec4_sg_index * sizeof(struct sec4_sg_entry);
2603+
out_options = LDST_SGF;
2604+
}
2605+
append_seq_out_ptr(desc, dst_dma, req->nbytes + ivsize, out_options);
2606+
}
2607+
24822608
/*
24832609
* allocate and map the aead extended descriptor
24842610
*/
@@ -3099,6 +3225,151 @@ static int ablkcipher_decrypt(struct ablkcipher_request *req)
30993225
return ret;
31003226
}
31013227

3228+
/*
3229+
* allocate and map the ablkcipher extended descriptor
3230+
* for ablkcipher givencrypt
3231+
*/
3232+
static struct ablkcipher_edesc *ablkcipher_giv_edesc_alloc(
3233+
struct skcipher_givcrypt_request *greq,
3234+
int desc_bytes,
3235+
bool *iv_contig_out)
3236+
{
3237+
struct ablkcipher_request *req = &greq->creq;
3238+
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3239+
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3240+
struct device *jrdev = ctx->jrdev;
3241+
gfp_t flags = (req->base.flags & (CRYPTO_TFM_REQ_MAY_BACKLOG |
3242+
CRYPTO_TFM_REQ_MAY_SLEEP)) ?
3243+
GFP_KERNEL : GFP_ATOMIC;
3244+
int src_nents, dst_nents = 0, sec4_sg_bytes;
3245+
struct ablkcipher_edesc *edesc;
3246+
dma_addr_t iv_dma = 0;
3247+
bool iv_contig = false;
3248+
int sgc;
3249+
int ivsize = crypto_ablkcipher_ivsize(ablkcipher);
3250+
bool src_chained = false, dst_chained = false;
3251+
int sec4_sg_index;
3252+
3253+
src_nents = sg_count(req->src, req->nbytes, &src_chained);
3254+
3255+
if (unlikely(req->dst != req->src))
3256+
dst_nents = sg_count(req->dst, req->nbytes, &dst_chained);
3257+
3258+
if (likely(req->src == req->dst)) {
3259+
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3260+
DMA_BIDIRECTIONAL, src_chained);
3261+
} else {
3262+
sgc = dma_map_sg_chained(jrdev, req->src, src_nents ? : 1,
3263+
DMA_TO_DEVICE, src_chained);
3264+
sgc = dma_map_sg_chained(jrdev, req->dst, dst_nents ? : 1,
3265+
DMA_FROM_DEVICE, dst_chained);
3266+
}
3267+
3268+
/*
3269+
* Check if iv can be contiguous with source and destination.
3270+
* If so, include it. If not, create scatterlist.
3271+
*/
3272+
iv_dma = dma_map_single(jrdev, greq->giv, ivsize, DMA_TO_DEVICE);
3273+
if (dma_mapping_error(jrdev, iv_dma)) {
3274+
dev_err(jrdev, "unable to map IV\n");
3275+
return ERR_PTR(-ENOMEM);
3276+
}
3277+
3278+
if (!dst_nents && iv_dma + ivsize == sg_dma_address(req->dst))
3279+
iv_contig = true;
3280+
else
3281+
dst_nents = dst_nents ? : 1;
3282+
sec4_sg_bytes = ((iv_contig ? 0 : 1) + src_nents + dst_nents) *
3283+
sizeof(struct sec4_sg_entry);
3284+
3285+
/* allocate space for base edesc and hw desc commands, link tables */
3286+
edesc = kmalloc(sizeof(*edesc) + desc_bytes +
3287+
sec4_sg_bytes, GFP_DMA | flags);
3288+
if (!edesc) {
3289+
dev_err(jrdev, "could not allocate extended descriptor\n");
3290+
return ERR_PTR(-ENOMEM);
3291+
}
3292+
3293+
edesc->src_nents = src_nents;
3294+
edesc->src_chained = src_chained;
3295+
edesc->dst_nents = dst_nents;
3296+
edesc->dst_chained = dst_chained;
3297+
edesc->sec4_sg_bytes = sec4_sg_bytes;
3298+
edesc->sec4_sg = (void *)edesc + sizeof(struct ablkcipher_edesc) +
3299+
desc_bytes;
3300+
3301+
sec4_sg_index = 0;
3302+
if (src_nents) {
3303+
sg_to_sec4_sg_last(req->src, src_nents, edesc->sec4_sg, 0);
3304+
sec4_sg_index += src_nents;
3305+
}
3306+
3307+
if (!iv_contig) {
3308+
dma_to_sec4_sg_one(edesc->sec4_sg + sec4_sg_index,
3309+
iv_dma, ivsize, 0);
3310+
sec4_sg_index += 1;
3311+
sg_to_sec4_sg_last(req->dst, dst_nents,
3312+
edesc->sec4_sg + sec4_sg_index, 0);
3313+
}
3314+
3315+
edesc->sec4_sg_dma = dma_map_single(jrdev, edesc->sec4_sg,
3316+
sec4_sg_bytes, DMA_TO_DEVICE);
3317+
if (dma_mapping_error(jrdev, edesc->sec4_sg_dma)) {
3318+
dev_err(jrdev, "unable to map S/G table\n");
3319+
return ERR_PTR(-ENOMEM);
3320+
}
3321+
edesc->iv_dma = iv_dma;
3322+
3323+
#ifdef DEBUG
3324+
print_hex_dump(KERN_ERR,
3325+
"ablkcipher sec4_sg@" __stringify(__LINE__) ": ",
3326+
DUMP_PREFIX_ADDRESS, 16, 4, edesc->sec4_sg,
3327+
sec4_sg_bytes, 1);
3328+
#endif
3329+
3330+
*iv_contig_out = iv_contig;
3331+
return edesc;
3332+
}
3333+
3334+
static int ablkcipher_givencrypt(struct skcipher_givcrypt_request *creq)
3335+
{
3336+
struct ablkcipher_request *req = &creq->creq;
3337+
struct ablkcipher_edesc *edesc;
3338+
struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req);
3339+
struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher);
3340+
struct device *jrdev = ctx->jrdev;
3341+
bool iv_contig;
3342+
u32 *desc;
3343+
int ret = 0;
3344+
3345+
/* allocate extended descriptor */
3346+
edesc = ablkcipher_giv_edesc_alloc(creq, DESC_JOB_IO_LEN *
3347+
CAAM_CMD_SZ, &iv_contig);
3348+
if (IS_ERR(edesc))
3349+
return PTR_ERR(edesc);
3350+
3351+
/* Create and submit job descriptor*/
3352+
init_ablkcipher_giv_job(ctx->sh_desc_givenc, ctx->sh_desc_givenc_dma,
3353+
edesc, req, iv_contig);
3354+
#ifdef DEBUG
3355+
print_hex_dump(KERN_ERR,
3356+
"ablkcipher jobdesc@" __stringify(__LINE__) ": ",
3357+
DUMP_PREFIX_ADDRESS, 16, 4, edesc->hw_desc,
3358+
desc_bytes(edesc->hw_desc), 1);
3359+
#endif
3360+
desc = edesc->hw_desc;
3361+
ret = caam_jr_enqueue(jrdev, desc, ablkcipher_encrypt_done, req);
3362+
3363+
if (!ret) {
3364+
ret = -EINPROGRESS;
3365+
} else {
3366+
ablkcipher_unmap(jrdev, edesc, req);
3367+
kfree(edesc);
3368+
}
3369+
3370+
return ret;
3371+
}
3372+
31023373
#define template_aead template_u.aead
31033374
#define template_ablkcipher template_u.ablkcipher
31043375
struct caam_alg_template {
@@ -3769,12 +4040,13 @@ static struct caam_alg_template driver_algs[] = {
37694040
.name = "cbc(aes)",
37704041
.driver_name = "cbc-aes-caam",
37714042
.blocksize = AES_BLOCK_SIZE,
3772-
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
4043+
.type = CRYPTO_ALG_TYPE_GIVCIPHER,
37734044
.template_ablkcipher = {
37744045
.setkey = ablkcipher_setkey,
37754046
.encrypt = ablkcipher_encrypt,
37764047
.decrypt = ablkcipher_decrypt,
3777-
.geniv = "eseqiv",
4048+
.givencrypt = ablkcipher_givencrypt,
4049+
.geniv = "<built-in>",
37784050
.min_keysize = AES_MIN_KEY_SIZE,
37794051
.max_keysize = AES_MAX_KEY_SIZE,
37804052
.ivsize = AES_BLOCK_SIZE,
@@ -3833,12 +4105,13 @@ static struct caam_alg_template driver_algs[] = {
38334105
.name = "rfc3686(ctr(aes))",
38344106
.driver_name = "rfc3686-ctr-aes-caam",
38354107
.blocksize = 1,
3836-
.type = CRYPTO_ALG_TYPE_ABLKCIPHER,
4108+
.type = CRYPTO_ALG_TYPE_GIVCIPHER,
38374109
.template_ablkcipher = {
38384110
.setkey = ablkcipher_setkey,
38394111
.encrypt = ablkcipher_encrypt,
38404112
.decrypt = ablkcipher_decrypt,
3841-
.geniv = "seqiv",
4113+
.givencrypt = ablkcipher_givencrypt,
4114+
.geniv = "<built-in>",
38424115
.min_keysize = AES_MIN_KEY_SIZE +
38434116
CTR_RFC3686_NONCE_SIZE,
38444117
.max_keysize = AES_MAX_KEY_SIZE +
@@ -3946,6 +4219,10 @@ static struct caam_crypto_alg *caam_alg_alloc(struct caam_alg_template
39464219
alg->cra_flags = CRYPTO_ALG_ASYNC | CRYPTO_ALG_KERN_DRIVER_ONLY |
39474220
template->type;
39484221
switch (template->type) {
4222+
case CRYPTO_ALG_TYPE_GIVCIPHER:
4223+
alg->cra_type = &crypto_givcipher_type;
4224+
alg->cra_ablkcipher = template->template_ablkcipher;
4225+
break;
39494226
case CRYPTO_ALG_TYPE_ABLKCIPHER:
39504227
alg->cra_type = &crypto_ablkcipher_type;
39514228
alg->cra_ablkcipher = template->template_ablkcipher;

0 commit comments

Comments
 (0)