@@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
123
123
124
124
static inline struct uld_ctx * ULD_CTX (struct chcr_context * ctx )
125
125
{
126
- return ctx -> dev -> u_ctx ;
126
+ return container_of ( ctx -> dev , struct uld_ctx , dev ) ;
127
127
}
128
128
129
129
static inline int is_ofld_imm (const struct sk_buff * skb )
@@ -198,17 +198,40 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
198
198
* err = 0 ;
199
199
}
200
200
201
+ static int chcr_inc_wrcount (struct chcr_dev * dev )
202
+ {
203
+ int err = 0 ;
204
+
205
+ spin_lock_bh (& dev -> lock_chcr_dev );
206
+ if (dev -> state == CHCR_DETACH )
207
+ err = 1 ;
208
+ else
209
+ atomic_inc (& dev -> inflight );
210
+
211
+ spin_unlock_bh (& dev -> lock_chcr_dev );
212
+
213
+ return err ;
214
+ }
215
+
216
+ static inline void chcr_dec_wrcount (struct chcr_dev * dev )
217
+ {
218
+ atomic_dec (& dev -> inflight );
219
+ }
220
+
201
221
static inline void chcr_handle_aead_resp (struct aead_request * req ,
202
222
unsigned char * input ,
203
223
int err )
204
224
{
205
225
struct chcr_aead_reqctx * reqctx = aead_request_ctx (req );
226
+ struct crypto_aead * tfm = crypto_aead_reqtfm (req );
227
+ struct chcr_dev * dev = a_ctx (tfm )-> dev ;
206
228
207
229
chcr_aead_common_exit (req );
208
230
if (reqctx -> verify == VERIFY_SW ) {
209
231
chcr_verify_tag (req , input , & err );
210
232
reqctx -> verify = VERIFY_HW ;
211
233
}
234
+ chcr_dec_wrcount (dev );
212
235
req -> base .complete (& req -> base , err );
213
236
}
214
237
@@ -1100,6 +1123,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1100
1123
struct cpl_fw6_pld * fw6_pld = (struct cpl_fw6_pld * )input ;
1101
1124
struct chcr_blkcipher_req_ctx * reqctx = ablkcipher_request_ctx (req );
1102
1125
struct cipher_wr_param wrparam ;
1126
+ struct chcr_dev * dev = c_ctx (tfm )-> dev ;
1103
1127
int bytes ;
1104
1128
1105
1129
if (err )
@@ -1161,6 +1185,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
1161
1185
unmap :
1162
1186
chcr_cipher_dma_unmap (& ULD_CTX (c_ctx (tfm ))-> lldi .pdev -> dev , req );
1163
1187
complete :
1188
+ chcr_dec_wrcount (dev );
1164
1189
req -> base .complete (& req -> base , err );
1165
1190
return err ;
1166
1191
}
@@ -1187,7 +1212,10 @@ static int process_cipher(struct ablkcipher_request *req,
1187
1212
ablkctx -> enckey_len , req -> nbytes , ivsize );
1188
1213
goto error ;
1189
1214
}
1190
- chcr_cipher_dma_map (& ULD_CTX (c_ctx (tfm ))-> lldi .pdev -> dev , req );
1215
+
1216
+ err = chcr_cipher_dma_map (& ULD_CTX (c_ctx (tfm ))-> lldi .pdev -> dev , req );
1217
+ if (err )
1218
+ goto error ;
1191
1219
if (req -> nbytes < (SGE_MAX_WR_LEN - (sizeof (struct chcr_wr ) +
1192
1220
AES_MIN_KEY_SIZE +
1193
1221
sizeof (struct cpl_rx_phys_dsgl ) +
@@ -1276,15 +1304,21 @@ static int process_cipher(struct ablkcipher_request *req,
1276
1304
static int chcr_aes_encrypt (struct ablkcipher_request * req )
1277
1305
{
1278
1306
struct crypto_ablkcipher * tfm = crypto_ablkcipher_reqtfm (req );
1307
+ struct chcr_dev * dev = c_ctx (tfm )-> dev ;
1279
1308
struct sk_buff * skb = NULL ;
1280
1309
int err , isfull = 0 ;
1281
1310
struct uld_ctx * u_ctx = ULD_CTX (c_ctx (tfm ));
1282
1311
1312
+ err = chcr_inc_wrcount (dev );
1313
+ if (err )
1314
+ return - ENXIO ;
1283
1315
if (unlikely (cxgb4_is_crypto_q_full (u_ctx -> lldi .ports [0 ],
1284
1316
c_ctx (tfm )-> tx_qidx ))) {
1285
1317
isfull = 1 ;
1286
- if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG ))
1287
- return - ENOSPC ;
1318
+ if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG )) {
1319
+ err = - ENOSPC ;
1320
+ goto error ;
1321
+ }
1288
1322
}
1289
1323
1290
1324
err = process_cipher (req , u_ctx -> lldi .rxq_ids [c_ctx (tfm )-> rx_qidx ],
@@ -1295,15 +1329,23 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
1295
1329
set_wr_txq (skb , CPL_PRIORITY_DATA , c_ctx (tfm )-> tx_qidx );
1296
1330
chcr_send_wr (skb );
1297
1331
return isfull ? - EBUSY : - EINPROGRESS ;
1332
+ error :
1333
+ chcr_dec_wrcount (dev );
1334
+ return err ;
1298
1335
}
1299
1336
1300
1337
static int chcr_aes_decrypt (struct ablkcipher_request * req )
1301
1338
{
1302
1339
struct crypto_ablkcipher * tfm = crypto_ablkcipher_reqtfm (req );
1303
1340
struct uld_ctx * u_ctx = ULD_CTX (c_ctx (tfm ));
1341
+ struct chcr_dev * dev = c_ctx (tfm )-> dev ;
1304
1342
struct sk_buff * skb = NULL ;
1305
1343
int err , isfull = 0 ;
1306
1344
1345
+ err = chcr_inc_wrcount (dev );
1346
+ if (err )
1347
+ return - ENXIO ;
1348
+
1307
1349
if (unlikely (cxgb4_is_crypto_q_full (u_ctx -> lldi .ports [0 ],
1308
1350
c_ctx (tfm )-> tx_qidx ))) {
1309
1351
isfull = 1 ;
@@ -1333,10 +1375,11 @@ static int chcr_device_init(struct chcr_context *ctx)
1333
1375
if (!ctx -> dev ) {
1334
1376
u_ctx = assign_chcr_device ();
1335
1377
if (!u_ctx ) {
1378
+ err = - ENXIO ;
1336
1379
pr_err ("chcr device assignment fails\n" );
1337
1380
goto out ;
1338
1381
}
1339
- ctx -> dev = u_ctx -> dev ;
1382
+ ctx -> dev = & u_ctx -> dev ;
1340
1383
adap = padap (ctx -> dev );
1341
1384
ntxq = u_ctx -> lldi .ntxq ;
1342
1385
rxq_perchan = u_ctx -> lldi .nrxq / u_ctx -> lldi .nchan ;
@@ -1561,6 +1604,7 @@ static int chcr_ahash_update(struct ahash_request *req)
1561
1604
struct chcr_ahash_req_ctx * req_ctx = ahash_request_ctx (req );
1562
1605
struct crypto_ahash * rtfm = crypto_ahash_reqtfm (req );
1563
1606
struct uld_ctx * u_ctx = NULL ;
1607
+ struct chcr_dev * dev = h_ctx (rtfm )-> dev ;
1564
1608
struct sk_buff * skb ;
1565
1609
u8 remainder = 0 , bs ;
1566
1610
unsigned int nbytes = req -> nbytes ;
@@ -1569,12 +1613,6 @@ static int chcr_ahash_update(struct ahash_request *req)
1569
1613
1570
1614
bs = crypto_tfm_alg_blocksize (crypto_ahash_tfm (rtfm ));
1571
1615
u_ctx = ULD_CTX (h_ctx (rtfm ));
1572
- if (unlikely (cxgb4_is_crypto_q_full (u_ctx -> lldi .ports [0 ],
1573
- h_ctx (rtfm )-> tx_qidx ))) {
1574
- isfull = 1 ;
1575
- if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG ))
1576
- return - ENOSPC ;
1577
- }
1578
1616
1579
1617
if (nbytes + req_ctx -> reqlen >= bs ) {
1580
1618
remainder = (nbytes + req_ctx -> reqlen ) % bs ;
@@ -1585,10 +1623,27 @@ static int chcr_ahash_update(struct ahash_request *req)
1585
1623
req_ctx -> reqlen += nbytes ;
1586
1624
return 0 ;
1587
1625
}
1626
+ error = chcr_inc_wrcount (dev );
1627
+ if (error )
1628
+ return - ENXIO ;
1629
+ /* Detach state for CHCR means lldi or padap is freed. Increasing
1630
+ * inflight count for dev guarantees that lldi and padap is valid
1631
+ */
1632
+ if (unlikely (cxgb4_is_crypto_q_full (u_ctx -> lldi .ports [0 ],
1633
+ h_ctx (rtfm )-> tx_qidx ))) {
1634
+ isfull = 1 ;
1635
+ if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG )) {
1636
+ error = - ENOSPC ;
1637
+ goto err ;
1638
+ }
1639
+ }
1640
+
1588
1641
chcr_init_hctx_per_wr (req_ctx );
1589
1642
error = chcr_hash_dma_map (& u_ctx -> lldi .pdev -> dev , req );
1590
- if (error )
1591
- return - ENOMEM ;
1643
+ if (error ) {
1644
+ error = - ENOMEM ;
1645
+ goto err ;
1646
+ }
1592
1647
get_alg_config (& params .alg_prm , crypto_ahash_digestsize (rtfm ));
1593
1648
params .kctx_len = roundup (params .alg_prm .result_size , 16 );
1594
1649
params .sg_len = chcr_hash_ent_in_wr (req -> src , !!req_ctx -> reqlen ,
@@ -1628,6 +1683,8 @@ static int chcr_ahash_update(struct ahash_request *req)
1628
1683
return isfull ? - EBUSY : - EINPROGRESS ;
1629
1684
unmap :
1630
1685
chcr_hash_dma_unmap (& u_ctx -> lldi .pdev -> dev , req );
1686
+ err :
1687
+ chcr_dec_wrcount (dev );
1631
1688
return error ;
1632
1689
}
1633
1690
@@ -1645,10 +1702,16 @@ static int chcr_ahash_final(struct ahash_request *req)
1645
1702
{
1646
1703
struct chcr_ahash_req_ctx * req_ctx = ahash_request_ctx (req );
1647
1704
struct crypto_ahash * rtfm = crypto_ahash_reqtfm (req );
1705
+ struct chcr_dev * dev = h_ctx (rtfm )-> dev ;
1648
1706
struct hash_wr_param params ;
1649
1707
struct sk_buff * skb ;
1650
1708
struct uld_ctx * u_ctx = NULL ;
1651
1709
u8 bs = crypto_tfm_alg_blocksize (crypto_ahash_tfm (rtfm ));
1710
+ int error = - EINVAL ;
1711
+
1712
+ error = chcr_inc_wrcount (dev );
1713
+ if (error )
1714
+ return - ENXIO ;
1652
1715
1653
1716
chcr_init_hctx_per_wr (req_ctx );
1654
1717
u_ctx = ULD_CTX (h_ctx (rtfm ));
@@ -1685,19 +1748,25 @@ static int chcr_ahash_final(struct ahash_request *req)
1685
1748
}
1686
1749
params .hash_size = crypto_ahash_digestsize (rtfm );
1687
1750
skb = create_hash_wr (req , & params );
1688
- if (IS_ERR (skb ))
1689
- return PTR_ERR (skb );
1751
+ if (IS_ERR (skb )) {
1752
+ error = PTR_ERR (skb );
1753
+ goto err ;
1754
+ }
1690
1755
req_ctx -> reqlen = 0 ;
1691
1756
skb -> dev = u_ctx -> lldi .ports [0 ];
1692
1757
set_wr_txq (skb , CPL_PRIORITY_DATA , h_ctx (rtfm )-> tx_qidx );
1693
1758
chcr_send_wr (skb );
1694
1759
return - EINPROGRESS ;
1760
+ err :
1761
+ chcr_dec_wrcount (dev );
1762
+ return error ;
1695
1763
}
1696
1764
1697
1765
static int chcr_ahash_finup (struct ahash_request * req )
1698
1766
{
1699
1767
struct chcr_ahash_req_ctx * req_ctx = ahash_request_ctx (req );
1700
1768
struct crypto_ahash * rtfm = crypto_ahash_reqtfm (req );
1769
+ struct chcr_dev * dev = h_ctx (rtfm )-> dev ;
1701
1770
struct uld_ctx * u_ctx = NULL ;
1702
1771
struct sk_buff * skb ;
1703
1772
struct hash_wr_param params ;
@@ -1706,17 +1775,24 @@ static int chcr_ahash_finup(struct ahash_request *req)
1706
1775
1707
1776
bs = crypto_tfm_alg_blocksize (crypto_ahash_tfm (rtfm ));
1708
1777
u_ctx = ULD_CTX (h_ctx (rtfm ));
1778
+ error = chcr_inc_wrcount (dev );
1779
+ if (error )
1780
+ return - ENXIO ;
1709
1781
1710
1782
if (unlikely (cxgb4_is_crypto_q_full (u_ctx -> lldi .ports [0 ],
1711
1783
h_ctx (rtfm )-> tx_qidx ))) {
1712
1784
isfull = 1 ;
1713
- if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG ))
1714
- return - ENOSPC ;
1785
+ if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG )) {
1786
+ error = - ENOSPC ;
1787
+ goto err ;
1788
+ }
1715
1789
}
1716
1790
chcr_init_hctx_per_wr (req_ctx );
1717
1791
error = chcr_hash_dma_map (& u_ctx -> lldi .pdev -> dev , req );
1718
- if (error )
1719
- return - ENOMEM ;
1792
+ if (error ) {
1793
+ error = - ENOMEM ;
1794
+ goto err ;
1795
+ }
1720
1796
1721
1797
get_alg_config (& params .alg_prm , crypto_ahash_digestsize (rtfm ));
1722
1798
params .kctx_len = roundup (params .alg_prm .result_size , 16 );
@@ -1773,13 +1849,16 @@ static int chcr_ahash_finup(struct ahash_request *req)
1773
1849
return isfull ? - EBUSY : - EINPROGRESS ;
1774
1850
unmap :
1775
1851
chcr_hash_dma_unmap (& u_ctx -> lldi .pdev -> dev , req );
1852
+ err :
1853
+ chcr_dec_wrcount (dev );
1776
1854
return error ;
1777
1855
}
1778
1856
1779
1857
static int chcr_ahash_digest (struct ahash_request * req )
1780
1858
{
1781
1859
struct chcr_ahash_req_ctx * req_ctx = ahash_request_ctx (req );
1782
1860
struct crypto_ahash * rtfm = crypto_ahash_reqtfm (req );
1861
+ struct chcr_dev * dev = h_ctx (rtfm )-> dev ;
1783
1862
struct uld_ctx * u_ctx = NULL ;
1784
1863
struct sk_buff * skb ;
1785
1864
struct hash_wr_param params ;
@@ -1788,19 +1867,26 @@ static int chcr_ahash_digest(struct ahash_request *req)
1788
1867
1789
1868
rtfm -> init (req );
1790
1869
bs = crypto_tfm_alg_blocksize (crypto_ahash_tfm (rtfm ));
1870
+ error = chcr_inc_wrcount (dev );
1871
+ if (error )
1872
+ return - ENXIO ;
1791
1873
1792
1874
u_ctx = ULD_CTX (h_ctx (rtfm ));
1793
1875
if (unlikely (cxgb4_is_crypto_q_full (u_ctx -> lldi .ports [0 ],
1794
1876
h_ctx (rtfm )-> tx_qidx ))) {
1795
1877
isfull = 1 ;
1796
- if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG ))
1797
- return - ENOSPC ;
1878
+ if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG )) {
1879
+ error = - ENOSPC ;
1880
+ goto err ;
1881
+ }
1798
1882
}
1799
1883
1800
1884
chcr_init_hctx_per_wr (req_ctx );
1801
1885
error = chcr_hash_dma_map (& u_ctx -> lldi .pdev -> dev , req );
1802
- if (error )
1803
- return - ENOMEM ;
1886
+ if (error ) {
1887
+ error = - ENOMEM ;
1888
+ goto err ;
1889
+ }
1804
1890
1805
1891
get_alg_config (& params .alg_prm , crypto_ahash_digestsize (rtfm ));
1806
1892
params .kctx_len = roundup (params .alg_prm .result_size , 16 );
@@ -1853,6 +1939,8 @@ static int chcr_ahash_digest(struct ahash_request *req)
1853
1939
return isfull ? - EBUSY : - EINPROGRESS ;
1854
1940
unmap :
1855
1941
chcr_hash_dma_unmap (& u_ctx -> lldi .pdev -> dev , req );
1942
+ err :
1943
+ chcr_dec_wrcount (dev );
1856
1944
return error ;
1857
1945
}
1858
1946
@@ -1924,6 +2012,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1924
2012
int digestsize , updated_digestsize ;
1925
2013
struct crypto_ahash * tfm = crypto_ahash_reqtfm (req );
1926
2014
struct uld_ctx * u_ctx = ULD_CTX (h_ctx (tfm ));
2015
+ struct chcr_dev * dev = h_ctx (tfm )-> dev ;
1927
2016
1928
2017
if (input == NULL )
1929
2018
goto out ;
@@ -1966,6 +2055,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
1966
2055
1967
2056
1968
2057
out :
2058
+ chcr_dec_wrcount (dev );
1969
2059
req -> base .complete (& req -> base , err );
1970
2060
}
1971
2061
@@ -3553,27 +3643,42 @@ static int chcr_aead_op(struct aead_request *req,
3553
3643
create_wr_t create_wr_fn )
3554
3644
{
3555
3645
struct crypto_aead * tfm = crypto_aead_reqtfm (req );
3646
+ struct chcr_aead_reqctx * reqctx = aead_request_ctx (req );
3556
3647
struct uld_ctx * u_ctx ;
3557
3648
struct sk_buff * skb ;
3558
3649
int isfull = 0 ;
3650
+ struct chcr_dev * cdev ;
3559
3651
3560
- if (!a_ctx (tfm )-> dev ) {
3652
+ cdev = a_ctx (tfm )-> dev ;
3653
+ if (!cdev ) {
3561
3654
pr_err ("chcr : %s : No crypto device.\n" , __func__ );
3562
3655
return - ENXIO ;
3563
3656
}
3657
+
3658
+ if (chcr_inc_wrcount (cdev )) {
3659
+ /* Detach state for CHCR means lldi or padap is freed.
3660
+ * We cannot increment fallback here.
3661
+ */
3662
+ return chcr_aead_fallback (req , reqctx -> op );
3663
+ }
3664
+
3564
3665
u_ctx = ULD_CTX (a_ctx (tfm ));
3565
3666
if (cxgb4_is_crypto_q_full (u_ctx -> lldi .ports [0 ],
3566
3667
a_ctx (tfm )-> tx_qidx )) {
3567
3668
isfull = 1 ;
3568
- if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG ))
3669
+ if (!(req -> base .flags & CRYPTO_TFM_REQ_MAY_BACKLOG )) {
3670
+ chcr_dec_wrcount (cdev );
3569
3671
return - ENOSPC ;
3672
+ }
3570
3673
}
3571
3674
3572
3675
/* Form a WR from req */
3573
3676
skb = create_wr_fn (req , u_ctx -> lldi .rxq_ids [a_ctx (tfm )-> rx_qidx ], size );
3574
3677
3575
- if (IS_ERR (skb ) || !skb )
3678
+ if (IS_ERR (skb ) || !skb ) {
3679
+ chcr_dec_wrcount (cdev );
3576
3680
return PTR_ERR (skb );
3681
+ }
3577
3682
3578
3683
skb -> dev = u_ctx -> lldi .ports [0 ];
3579
3684
set_wr_txq (skb , CPL_PRIORITY_DATA , a_ctx (tfm )-> tx_qidx );
0 commit comments