72
72
#define AUTHENC_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + \
73
73
CAAM_CMD_SZ * 5)
74
74
75
+ #define CHACHAPOLY_DESC_JOB_IO_LEN (AEAD_DESC_JOB_IO_LEN + CAAM_CMD_SZ * 6)
76
+
75
77
#define DESC_MAX_USED_BYTES (CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN)
76
78
#define DESC_MAX_USED_LEN (DESC_MAX_USED_BYTES / CAAM_CMD_SZ)
77
79
@@ -513,6 +515,61 @@ static int rfc4543_setauthsize(struct crypto_aead *authenc,
513
515
return 0 ;
514
516
}
515
517
518
+ static int chachapoly_set_sh_desc (struct crypto_aead * aead )
519
+ {
520
+ struct caam_ctx * ctx = crypto_aead_ctx (aead );
521
+ struct device * jrdev = ctx -> jrdev ;
522
+ unsigned int ivsize = crypto_aead_ivsize (aead );
523
+ u32 * desc ;
524
+
525
+ if (!ctx -> cdata .keylen || !ctx -> authsize )
526
+ return 0 ;
527
+
528
+ desc = ctx -> sh_desc_enc ;
529
+ cnstr_shdsc_chachapoly (desc , & ctx -> cdata , & ctx -> adata , ivsize ,
530
+ ctx -> authsize , true);
531
+ dma_sync_single_for_device (jrdev , ctx -> sh_desc_enc_dma ,
532
+ desc_bytes (desc ), ctx -> dir );
533
+
534
+ desc = ctx -> sh_desc_dec ;
535
+ cnstr_shdsc_chachapoly (desc , & ctx -> cdata , & ctx -> adata , ivsize ,
536
+ ctx -> authsize , false);
537
+ dma_sync_single_for_device (jrdev , ctx -> sh_desc_dec_dma ,
538
+ desc_bytes (desc ), ctx -> dir );
539
+
540
+ return 0 ;
541
+ }
542
+
543
+ static int chachapoly_setauthsize (struct crypto_aead * aead ,
544
+ unsigned int authsize )
545
+ {
546
+ struct caam_ctx * ctx = crypto_aead_ctx (aead );
547
+
548
+ if (authsize != POLY1305_DIGEST_SIZE )
549
+ return - EINVAL ;
550
+
551
+ ctx -> authsize = authsize ;
552
+ return chachapoly_set_sh_desc (aead );
553
+ }
554
+
555
+ static int chachapoly_setkey (struct crypto_aead * aead , const u8 * key ,
556
+ unsigned int keylen )
557
+ {
558
+ struct caam_ctx * ctx = crypto_aead_ctx (aead );
559
+ unsigned int ivsize = crypto_aead_ivsize (aead );
560
+ unsigned int saltlen = CHACHAPOLY_IV_SIZE - ivsize ;
561
+
562
+ if (keylen != CHACHA20_KEY_SIZE + saltlen ) {
563
+ crypto_aead_set_flags (aead , CRYPTO_TFM_RES_BAD_KEY_LEN );
564
+ return - EINVAL ;
565
+ }
566
+
567
+ ctx -> cdata .key_virt = key ;
568
+ ctx -> cdata .keylen = keylen - saltlen ;
569
+
570
+ return chachapoly_set_sh_desc (aead );
571
+ }
572
+
516
573
static int aead_setkey (struct crypto_aead * aead ,
517
574
const u8 * key , unsigned int keylen )
518
575
{
@@ -1031,6 +1088,40 @@ static void init_gcm_job(struct aead_request *req,
1031
1088
/* End of blank commands */
1032
1089
}
1033
1090
1091
+ static void init_chachapoly_job (struct aead_request * req ,
1092
+ struct aead_edesc * edesc , bool all_contig ,
1093
+ bool encrypt )
1094
+ {
1095
+ struct crypto_aead * aead = crypto_aead_reqtfm (req );
1096
+ unsigned int ivsize = crypto_aead_ivsize (aead );
1097
+ unsigned int assoclen = req -> assoclen ;
1098
+ u32 * desc = edesc -> hw_desc ;
1099
+ u32 ctx_iv_off = 4 ;
1100
+
1101
+ init_aead_job (req , edesc , all_contig , encrypt );
1102
+
1103
+ if (ivsize != CHACHAPOLY_IV_SIZE ) {
1104
+ /* IPsec specific: CONTEXT1[223:128] = {NONCE, IV} */
1105
+ ctx_iv_off += 4 ;
1106
+
1107
+ /*
1108
+ * The associated data comes already with the IV but we need
1109
+ * to skip it when we authenticate or encrypt...
1110
+ */
1111
+ assoclen -= ivsize ;
1112
+ }
1113
+
1114
+ append_math_add_imm_u32 (desc , REG3 , ZERO , IMM , assoclen );
1115
+
1116
+ /*
1117
+ * For IPsec load the IV further in the same register.
1118
+ * For RFC7539 simply load the 12 bytes nonce in a single operation
1119
+ */
1120
+ append_load_as_imm (desc , req -> iv , ivsize , LDST_CLASS_1_CCB |
1121
+ LDST_SRCDST_BYTE_CONTEXT |
1122
+ ctx_iv_off << LDST_OFFSET_SHIFT );
1123
+ }
1124
+
1034
1125
static void init_authenc_job (struct aead_request * req ,
1035
1126
struct aead_edesc * edesc ,
1036
1127
bool all_contig , bool encrypt )
@@ -1289,6 +1380,72 @@ static int gcm_encrypt(struct aead_request *req)
1289
1380
return ret ;
1290
1381
}
1291
1382
1383
+ static int chachapoly_encrypt (struct aead_request * req )
1384
+ {
1385
+ struct aead_edesc * edesc ;
1386
+ struct crypto_aead * aead = crypto_aead_reqtfm (req );
1387
+ struct caam_ctx * ctx = crypto_aead_ctx (aead );
1388
+ struct device * jrdev = ctx -> jrdev ;
1389
+ bool all_contig ;
1390
+ u32 * desc ;
1391
+ int ret ;
1392
+
1393
+ edesc = aead_edesc_alloc (req , CHACHAPOLY_DESC_JOB_IO_LEN , & all_contig ,
1394
+ true);
1395
+ if (IS_ERR (edesc ))
1396
+ return PTR_ERR (edesc );
1397
+
1398
+ desc = edesc -> hw_desc ;
1399
+
1400
+ init_chachapoly_job (req , edesc , all_contig , true);
1401
+ print_hex_dump_debug ("chachapoly jobdesc@" __stringify (__LINE__ )": " ,
1402
+ DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes (desc ),
1403
+ 1 );
1404
+
1405
+ ret = caam_jr_enqueue (jrdev , desc , aead_encrypt_done , req );
1406
+ if (!ret ) {
1407
+ ret = - EINPROGRESS ;
1408
+ } else {
1409
+ aead_unmap (jrdev , edesc , req );
1410
+ kfree (edesc );
1411
+ }
1412
+
1413
+ return ret ;
1414
+ }
1415
+
1416
+ static int chachapoly_decrypt (struct aead_request * req )
1417
+ {
1418
+ struct aead_edesc * edesc ;
1419
+ struct crypto_aead * aead = crypto_aead_reqtfm (req );
1420
+ struct caam_ctx * ctx = crypto_aead_ctx (aead );
1421
+ struct device * jrdev = ctx -> jrdev ;
1422
+ bool all_contig ;
1423
+ u32 * desc ;
1424
+ int ret ;
1425
+
1426
+ edesc = aead_edesc_alloc (req , CHACHAPOLY_DESC_JOB_IO_LEN , & all_contig ,
1427
+ false);
1428
+ if (IS_ERR (edesc ))
1429
+ return PTR_ERR (edesc );
1430
+
1431
+ desc = edesc -> hw_desc ;
1432
+
1433
+ init_chachapoly_job (req , edesc , all_contig , false);
1434
+ print_hex_dump_debug ("chachapoly jobdesc@" __stringify (__LINE__ )": " ,
1435
+ DUMP_PREFIX_ADDRESS , 16 , 4 , desc , desc_bytes (desc ),
1436
+ 1 );
1437
+
1438
+ ret = caam_jr_enqueue (jrdev , desc , aead_decrypt_done , req );
1439
+ if (!ret ) {
1440
+ ret = - EINPROGRESS ;
1441
+ } else {
1442
+ aead_unmap (jrdev , edesc , req );
1443
+ kfree (edesc );
1444
+ }
1445
+
1446
+ return ret ;
1447
+ }
1448
+
1292
1449
static int ipsec_gcm_encrypt (struct aead_request * req )
1293
1450
{
1294
1451
if (req -> assoclen < 8 )
@@ -3002,6 +3159,50 @@ static struct caam_aead_alg driver_aeads[] = {
3002
3159
.geniv = true,
3003
3160
},
3004
3161
},
3162
+ {
3163
+ .aead = {
3164
+ .base = {
3165
+ .cra_name = "rfc7539(chacha20,poly1305)" ,
3166
+ .cra_driver_name = "rfc7539-chacha20-poly1305-"
3167
+ "caam" ,
3168
+ .cra_blocksize = 1 ,
3169
+ },
3170
+ .setkey = chachapoly_setkey ,
3171
+ .setauthsize = chachapoly_setauthsize ,
3172
+ .encrypt = chachapoly_encrypt ,
3173
+ .decrypt = chachapoly_decrypt ,
3174
+ .ivsize = CHACHAPOLY_IV_SIZE ,
3175
+ .maxauthsize = POLY1305_DIGEST_SIZE ,
3176
+ },
3177
+ .caam = {
3178
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3179
+ OP_ALG_AAI_AEAD ,
3180
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3181
+ OP_ALG_AAI_AEAD ,
3182
+ },
3183
+ },
3184
+ {
3185
+ .aead = {
3186
+ .base = {
3187
+ .cra_name = "rfc7539esp(chacha20,poly1305)" ,
3188
+ .cra_driver_name = "rfc7539esp-chacha20-"
3189
+ "poly1305-caam" ,
3190
+ .cra_blocksize = 1 ,
3191
+ },
3192
+ .setkey = chachapoly_setkey ,
3193
+ .setauthsize = chachapoly_setauthsize ,
3194
+ .encrypt = chachapoly_encrypt ,
3195
+ .decrypt = chachapoly_decrypt ,
3196
+ .ivsize = 8 ,
3197
+ .maxauthsize = POLY1305_DIGEST_SIZE ,
3198
+ },
3199
+ .caam = {
3200
+ .class1_alg_type = OP_ALG_ALGSEL_CHACHA20 |
3201
+ OP_ALG_AAI_AEAD ,
3202
+ .class2_alg_type = OP_ALG_ALGSEL_POLY1305 |
3203
+ OP_ALG_AAI_AEAD ,
3204
+ },
3205
+ },
3005
3206
};
3006
3207
3007
3208
static int caam_init_common (struct caam_ctx * ctx , struct caam_alg_entry * caam ,
@@ -3135,7 +3336,7 @@ static int __init caam_algapi_init(void)
3135
3336
struct device * ctrldev ;
3136
3337
struct caam_drv_private * priv ;
3137
3338
int i = 0 , err = 0 ;
3138
- u32 aes_vid , aes_inst , des_inst , md_vid , md_inst ;
3339
+ u32 aes_vid , aes_inst , des_inst , md_vid , md_inst , ccha_inst , ptha_inst ;
3139
3340
unsigned int md_limit = SHA512_DIGEST_SIZE ;
3140
3341
bool registered = false;
3141
3342
@@ -3180,6 +3381,8 @@ static int __init caam_algapi_init(void)
3180
3381
CHA_ID_LS_DES_SHIFT ;
3181
3382
aes_inst = cha_inst & CHA_ID_LS_AES_MASK ;
3182
3383
md_inst = (cha_inst & CHA_ID_LS_MD_MASK ) >> CHA_ID_LS_MD_SHIFT ;
3384
+ ccha_inst = 0 ;
3385
+ ptha_inst = 0 ;
3183
3386
} else {
3184
3387
u32 aesa , mdha ;
3185
3388
@@ -3192,6 +3395,8 @@ static int __init caam_algapi_init(void)
3192
3395
des_inst = rd_reg32 (& priv -> ctrl -> vreg .desa ) & CHA_VER_NUM_MASK ;
3193
3396
aes_inst = aesa & CHA_VER_NUM_MASK ;
3194
3397
md_inst = mdha & CHA_VER_NUM_MASK ;
3398
+ ccha_inst = rd_reg32 (& priv -> ctrl -> vreg .ccha ) & CHA_VER_NUM_MASK ;
3399
+ ptha_inst = rd_reg32 (& priv -> ctrl -> vreg .ptha ) & CHA_VER_NUM_MASK ;
3195
3400
}
3196
3401
3197
3402
/* If MD is present, limit digest size based on LP256 */
@@ -3252,6 +3457,14 @@ static int __init caam_algapi_init(void)
3252
3457
if (!aes_inst && (c1_alg_sel == OP_ALG_ALGSEL_AES ))
3253
3458
continue ;
3254
3459
3460
+ /* Skip CHACHA20 algorithms if not supported by device */
3461
+ if (c1_alg_sel == OP_ALG_ALGSEL_CHACHA20 && !ccha_inst )
3462
+ continue ;
3463
+
3464
+ /* Skip POLY1305 algorithms if not supported by device */
3465
+ if (c2_alg_sel == OP_ALG_ALGSEL_POLY1305 && !ptha_inst )
3466
+ continue ;
3467
+
3255
3468
/*
3256
3469
* Check support for AES algorithms not available
3257
3470
* on LP devices.
@@ -3263,9 +3476,9 @@ static int __init caam_algapi_init(void)
3263
3476
* Skip algorithms requiring message digests
3264
3477
* if MD or MD size is not supported by device.
3265
3478
*/
3266
- if (c2_alg_sel &&
3267
- (!md_inst || ( t_alg -> aead .maxauthsize > md_limit ) ))
3268
- continue ;
3479
+ if (( c2_alg_sel & ~ OP_ALG_ALGSEL_SUBMASK ) == 0x40 &&
3480
+ (!md_inst || t_alg -> aead .maxauthsize > md_limit ))
3481
+ continue ;
3269
3482
3270
3483
caam_aead_alg_init (t_alg );
3271
3484
0 commit comments