Skip to content

Commit fef4912

Browse files
Harsh Jainherbertx
authored andcommitted
crypto: chelsio - Handle PCI shutdown event
chcr receives "CXGB4_STATE_DETACH" event on PCI Shutdown. Wait for processing of inflight request and Mark the device unavailable. Signed-off-by: Harsh Jain <harsh@chelsio.com> Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
1 parent c4f6d44 commit fef4912

File tree

3 files changed

+278
-93
lines changed

3 files changed

+278
-93
lines changed

drivers/crypto/chelsio/chcr_algo.c

Lines changed: 131 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ static inline struct chcr_authenc_ctx *AUTHENC_CTX(struct chcr_aead_ctx *gctx)
123123

124124
static inline struct uld_ctx *ULD_CTX(struct chcr_context *ctx)
125125
{
126-
return ctx->dev->u_ctx;
126+
return container_of(ctx->dev, struct uld_ctx, dev);
127127
}
128128

129129
static inline int is_ofld_imm(const struct sk_buff *skb)
@@ -198,17 +198,40 @@ void chcr_verify_tag(struct aead_request *req, u8 *input, int *err)
198198
*err = 0;
199199
}
200200

201+
static int chcr_inc_wrcount(struct chcr_dev *dev)
202+
{
203+
int err = 0;
204+
205+
spin_lock_bh(&dev->lock_chcr_dev);
206+
if (dev->state == CHCR_DETACH)
207+
err = 1;
208+
else
209+
atomic_inc(&dev->inflight);
210+
211+
spin_unlock_bh(&dev->lock_chcr_dev);
212+
213+
return err;
214+
}
215+
216+
static inline void chcr_dec_wrcount(struct chcr_dev *dev)
217+
{
218+
atomic_dec(&dev->inflight);
219+
}
220+
201221
static inline void chcr_handle_aead_resp(struct aead_request *req,
202222
unsigned char *input,
203223
int err)
204224
{
205225
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
226+
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
227+
struct chcr_dev *dev = a_ctx(tfm)->dev;
206228

207229
chcr_aead_common_exit(req);
208230
if (reqctx->verify == VERIFY_SW) {
209231
chcr_verify_tag(req, input, &err);
210232
reqctx->verify = VERIFY_HW;
211233
}
234+
chcr_dec_wrcount(dev);
212235
req->base.complete(&req->base, err);
213236
}
214237

@@ -1100,6 +1123,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
11001123
struct cpl_fw6_pld *fw6_pld = (struct cpl_fw6_pld *)input;
11011124
struct chcr_blkcipher_req_ctx *reqctx = ablkcipher_request_ctx(req);
11021125
struct cipher_wr_param wrparam;
1126+
struct chcr_dev *dev = c_ctx(tfm)->dev;
11031127
int bytes;
11041128

11051129
if (err)
@@ -1161,6 +1185,7 @@ static int chcr_handle_cipher_resp(struct ablkcipher_request *req,
11611185
unmap:
11621186
chcr_cipher_dma_unmap(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
11631187
complete:
1188+
chcr_dec_wrcount(dev);
11641189
req->base.complete(&req->base, err);
11651190
return err;
11661191
}
@@ -1187,7 +1212,10 @@ static int process_cipher(struct ablkcipher_request *req,
11871212
ablkctx->enckey_len, req->nbytes, ivsize);
11881213
goto error;
11891214
}
1190-
chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1215+
1216+
err = chcr_cipher_dma_map(&ULD_CTX(c_ctx(tfm))->lldi.pdev->dev, req);
1217+
if (err)
1218+
goto error;
11911219
if (req->nbytes < (SGE_MAX_WR_LEN - (sizeof(struct chcr_wr) +
11921220
AES_MIN_KEY_SIZE +
11931221
sizeof(struct cpl_rx_phys_dsgl) +
@@ -1276,15 +1304,21 @@ static int process_cipher(struct ablkcipher_request *req,
12761304
static int chcr_aes_encrypt(struct ablkcipher_request *req)
12771305
{
12781306
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
1307+
struct chcr_dev *dev = c_ctx(tfm)->dev;
12791308
struct sk_buff *skb = NULL;
12801309
int err, isfull = 0;
12811310
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
12821311

1312+
err = chcr_inc_wrcount(dev);
1313+
if (err)
1314+
return -ENXIO;
12831315
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
12841316
c_ctx(tfm)->tx_qidx))) {
12851317
isfull = 1;
1286-
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1287-
return -ENOSPC;
1318+
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1319+
err = -ENOSPC;
1320+
goto error;
1321+
}
12881322
}
12891323

12901324
err = process_cipher(req, u_ctx->lldi.rxq_ids[c_ctx(tfm)->rx_qidx],
@@ -1295,15 +1329,23 @@ static int chcr_aes_encrypt(struct ablkcipher_request *req)
12951329
set_wr_txq(skb, CPL_PRIORITY_DATA, c_ctx(tfm)->tx_qidx);
12961330
chcr_send_wr(skb);
12971331
return isfull ? -EBUSY : -EINPROGRESS;
1332+
error:
1333+
chcr_dec_wrcount(dev);
1334+
return err;
12981335
}
12991336

13001337
static int chcr_aes_decrypt(struct ablkcipher_request *req)
13011338
{
13021339
struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req);
13031340
struct uld_ctx *u_ctx = ULD_CTX(c_ctx(tfm));
1341+
struct chcr_dev *dev = c_ctx(tfm)->dev;
13041342
struct sk_buff *skb = NULL;
13051343
int err, isfull = 0;
13061344

1345+
err = chcr_inc_wrcount(dev);
1346+
if (err)
1347+
return -ENXIO;
1348+
13071349
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
13081350
c_ctx(tfm)->tx_qidx))) {
13091351
isfull = 1;
@@ -1333,10 +1375,11 @@ static int chcr_device_init(struct chcr_context *ctx)
13331375
if (!ctx->dev) {
13341376
u_ctx = assign_chcr_device();
13351377
if (!u_ctx) {
1378+
err = -ENXIO;
13361379
pr_err("chcr device assignment fails\n");
13371380
goto out;
13381381
}
1339-
ctx->dev = u_ctx->dev;
1382+
ctx->dev = &u_ctx->dev;
13401383
adap = padap(ctx->dev);
13411384
ntxq = u_ctx->lldi.ntxq;
13421385
rxq_perchan = u_ctx->lldi.nrxq / u_ctx->lldi.nchan;
@@ -1561,6 +1604,7 @@ static int chcr_ahash_update(struct ahash_request *req)
15611604
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
15621605
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
15631606
struct uld_ctx *u_ctx = NULL;
1607+
struct chcr_dev *dev = h_ctx(rtfm)->dev;
15641608
struct sk_buff *skb;
15651609
u8 remainder = 0, bs;
15661610
unsigned int nbytes = req->nbytes;
@@ -1569,12 +1613,6 @@ static int chcr_ahash_update(struct ahash_request *req)
15691613

15701614
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
15711615
u_ctx = ULD_CTX(h_ctx(rtfm));
1572-
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1573-
h_ctx(rtfm)->tx_qidx))) {
1574-
isfull = 1;
1575-
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1576-
return -ENOSPC;
1577-
}
15781616

15791617
if (nbytes + req_ctx->reqlen >= bs) {
15801618
remainder = (nbytes + req_ctx->reqlen) % bs;
@@ -1585,10 +1623,27 @@ static int chcr_ahash_update(struct ahash_request *req)
15851623
req_ctx->reqlen += nbytes;
15861624
return 0;
15871625
}
1626+
error = chcr_inc_wrcount(dev);
1627+
if (error)
1628+
return -ENXIO;
1629+
/* Detach state for CHCR means lldi or padap is freed. Increasing
1630+
* inflight count for dev guarantees that lldi and padap is valid
1631+
*/
1632+
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
1633+
h_ctx(rtfm)->tx_qidx))) {
1634+
isfull = 1;
1635+
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1636+
error = -ENOSPC;
1637+
goto err;
1638+
}
1639+
}
1640+
15881641
chcr_init_hctx_per_wr(req_ctx);
15891642
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1590-
if (error)
1591-
return -ENOMEM;
1643+
if (error) {
1644+
error = -ENOMEM;
1645+
goto err;
1646+
}
15921647
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
15931648
params.kctx_len = roundup(params.alg_prm.result_size, 16);
15941649
params.sg_len = chcr_hash_ent_in_wr(req->src, !!req_ctx->reqlen,
@@ -1628,6 +1683,8 @@ static int chcr_ahash_update(struct ahash_request *req)
16281683
return isfull ? -EBUSY : -EINPROGRESS;
16291684
unmap:
16301685
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1686+
err:
1687+
chcr_dec_wrcount(dev);
16311688
return error;
16321689
}
16331690

@@ -1645,10 +1702,16 @@ static int chcr_ahash_final(struct ahash_request *req)
16451702
{
16461703
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
16471704
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1705+
struct chcr_dev *dev = h_ctx(rtfm)->dev;
16481706
struct hash_wr_param params;
16491707
struct sk_buff *skb;
16501708
struct uld_ctx *u_ctx = NULL;
16511709
u8 bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1710+
int error = -EINVAL;
1711+
1712+
error = chcr_inc_wrcount(dev);
1713+
if (error)
1714+
return -ENXIO;
16521715

16531716
chcr_init_hctx_per_wr(req_ctx);
16541717
u_ctx = ULD_CTX(h_ctx(rtfm));
@@ -1685,19 +1748,25 @@ static int chcr_ahash_final(struct ahash_request *req)
16851748
}
16861749
params.hash_size = crypto_ahash_digestsize(rtfm);
16871750
skb = create_hash_wr(req, &params);
1688-
if (IS_ERR(skb))
1689-
return PTR_ERR(skb);
1751+
if (IS_ERR(skb)) {
1752+
error = PTR_ERR(skb);
1753+
goto err;
1754+
}
16901755
req_ctx->reqlen = 0;
16911756
skb->dev = u_ctx->lldi.ports[0];
16921757
set_wr_txq(skb, CPL_PRIORITY_DATA, h_ctx(rtfm)->tx_qidx);
16931758
chcr_send_wr(skb);
16941759
return -EINPROGRESS;
1760+
err:
1761+
chcr_dec_wrcount(dev);
1762+
return error;
16951763
}
16961764

16971765
static int chcr_ahash_finup(struct ahash_request *req)
16981766
{
16991767
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
17001768
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1769+
struct chcr_dev *dev = h_ctx(rtfm)->dev;
17011770
struct uld_ctx *u_ctx = NULL;
17021771
struct sk_buff *skb;
17031772
struct hash_wr_param params;
@@ -1706,17 +1775,24 @@ static int chcr_ahash_finup(struct ahash_request *req)
17061775

17071776
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
17081777
u_ctx = ULD_CTX(h_ctx(rtfm));
1778+
error = chcr_inc_wrcount(dev);
1779+
if (error)
1780+
return -ENXIO;
17091781

17101782
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
17111783
h_ctx(rtfm)->tx_qidx))) {
17121784
isfull = 1;
1713-
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1714-
return -ENOSPC;
1785+
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1786+
error = -ENOSPC;
1787+
goto err;
1788+
}
17151789
}
17161790
chcr_init_hctx_per_wr(req_ctx);
17171791
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1718-
if (error)
1719-
return -ENOMEM;
1792+
if (error) {
1793+
error = -ENOMEM;
1794+
goto err;
1795+
}
17201796

17211797
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
17221798
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1773,13 +1849,16 @@ static int chcr_ahash_finup(struct ahash_request *req)
17731849
return isfull ? -EBUSY : -EINPROGRESS;
17741850
unmap:
17751851
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1852+
err:
1853+
chcr_dec_wrcount(dev);
17761854
return error;
17771855
}
17781856

17791857
static int chcr_ahash_digest(struct ahash_request *req)
17801858
{
17811859
struct chcr_ahash_req_ctx *req_ctx = ahash_request_ctx(req);
17821860
struct crypto_ahash *rtfm = crypto_ahash_reqtfm(req);
1861+
struct chcr_dev *dev = h_ctx(rtfm)->dev;
17831862
struct uld_ctx *u_ctx = NULL;
17841863
struct sk_buff *skb;
17851864
struct hash_wr_param params;
@@ -1788,19 +1867,26 @@ static int chcr_ahash_digest(struct ahash_request *req)
17881867

17891868
rtfm->init(req);
17901869
bs = crypto_tfm_alg_blocksize(crypto_ahash_tfm(rtfm));
1870+
error = chcr_inc_wrcount(dev);
1871+
if (error)
1872+
return -ENXIO;
17911873

17921874
u_ctx = ULD_CTX(h_ctx(rtfm));
17931875
if (unlikely(cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
17941876
h_ctx(rtfm)->tx_qidx))) {
17951877
isfull = 1;
1796-
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
1797-
return -ENOSPC;
1878+
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
1879+
error = -ENOSPC;
1880+
goto err;
1881+
}
17981882
}
17991883

18001884
chcr_init_hctx_per_wr(req_ctx);
18011885
error = chcr_hash_dma_map(&u_ctx->lldi.pdev->dev, req);
1802-
if (error)
1803-
return -ENOMEM;
1886+
if (error) {
1887+
error = -ENOMEM;
1888+
goto err;
1889+
}
18041890

18051891
get_alg_config(&params.alg_prm, crypto_ahash_digestsize(rtfm));
18061892
params.kctx_len = roundup(params.alg_prm.result_size, 16);
@@ -1853,6 +1939,8 @@ static int chcr_ahash_digest(struct ahash_request *req)
18531939
return isfull ? -EBUSY : -EINPROGRESS;
18541940
unmap:
18551941
chcr_hash_dma_unmap(&u_ctx->lldi.pdev->dev, req);
1942+
err:
1943+
chcr_dec_wrcount(dev);
18561944
return error;
18571945
}
18581946

@@ -1924,6 +2012,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
19242012
int digestsize, updated_digestsize;
19252013
struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
19262014
struct uld_ctx *u_ctx = ULD_CTX(h_ctx(tfm));
2015+
struct chcr_dev *dev = h_ctx(tfm)->dev;
19272016

19282017
if (input == NULL)
19292018
goto out;
@@ -1966,6 +2055,7 @@ static inline void chcr_handle_ahash_resp(struct ahash_request *req,
19662055

19672056

19682057
out:
2058+
chcr_dec_wrcount(dev);
19692059
req->base.complete(&req->base, err);
19702060
}
19712061

@@ -3553,27 +3643,42 @@ static int chcr_aead_op(struct aead_request *req,
35533643
create_wr_t create_wr_fn)
35543644
{
35553645
struct crypto_aead *tfm = crypto_aead_reqtfm(req);
3646+
struct chcr_aead_reqctx *reqctx = aead_request_ctx(req);
35563647
struct uld_ctx *u_ctx;
35573648
struct sk_buff *skb;
35583649
int isfull = 0;
3650+
struct chcr_dev *cdev;
35593651

3560-
if (!a_ctx(tfm)->dev) {
3652+
cdev = a_ctx(tfm)->dev;
3653+
if (!cdev) {
35613654
pr_err("chcr : %s : No crypto device.\n", __func__);
35623655
return -ENXIO;
35633656
}
3657+
3658+
if (chcr_inc_wrcount(cdev)) {
3659+
/* Detach state for CHCR means lldi or padap is freed.
3660+
* We cannot increment fallback here.
3661+
*/
3662+
return chcr_aead_fallback(req, reqctx->op);
3663+
}
3664+
35643665
u_ctx = ULD_CTX(a_ctx(tfm));
35653666
if (cxgb4_is_crypto_q_full(u_ctx->lldi.ports[0],
35663667
a_ctx(tfm)->tx_qidx)) {
35673668
isfull = 1;
3568-
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG))
3669+
if (!(req->base.flags & CRYPTO_TFM_REQ_MAY_BACKLOG)) {
3670+
chcr_dec_wrcount(cdev);
35693671
return -ENOSPC;
3672+
}
35703673
}
35713674

35723675
/* Form a WR from req */
35733676
skb = create_wr_fn(req, u_ctx->lldi.rxq_ids[a_ctx(tfm)->rx_qidx], size);
35743677

3575-
if (IS_ERR(skb) || !skb)
3678+
if (IS_ERR(skb) || !skb) {
3679+
chcr_dec_wrcount(cdev);
35763680
return PTR_ERR(skb);
3681+
}
35773682

35783683
skb->dev = u_ctx->lldi.ports[0];
35793684
set_wr_txq(skb, CPL_PRIORITY_DATA, a_ctx(tfm)->tx_qidx);

0 commit comments

Comments
 (0)