@@ -4536,6 +4536,28 @@ int mlx5_ib_dealloc_xrcd(struct ib_xrcd *xrcd)
4536
4536
return 0 ;
4537
4537
}
4538
4538
4539
+ static void mlx5_ib_wq_event (struct mlx5_core_qp * core_qp , int type )
4540
+ {
4541
+ struct mlx5_ib_rwq * rwq = to_mibrwq (core_qp );
4542
+ struct mlx5_ib_dev * dev = to_mdev (rwq -> ibwq .device );
4543
+ struct ib_event event ;
4544
+
4545
+ if (rwq -> ibwq .event_handler ) {
4546
+ event .device = rwq -> ibwq .device ;
4547
+ event .element .wq = & rwq -> ibwq ;
4548
+ switch (type ) {
4549
+ case MLX5_EVENT_TYPE_WQ_CATAS_ERROR :
4550
+ event .event = IB_EVENT_WQ_FATAL ;
4551
+ break ;
4552
+ default :
4553
+ mlx5_ib_warn (dev , "Unexpected event type %d on WQ %06x\n" , type , core_qp -> qpn );
4554
+ return ;
4555
+ }
4556
+
4557
+ rwq -> ibwq .event_handler (& event , rwq -> ibwq .wq_context );
4558
+ }
4559
+ }
4560
+
4539
4561
static int create_rq (struct mlx5_ib_rwq * rwq , struct ib_pd * pd ,
4540
4562
struct ib_wq_init_attr * init_attr )
4541
4563
{
@@ -4573,7 +4595,7 @@ static int create_rq(struct mlx5_ib_rwq *rwq, struct ib_pd *pd,
4573
4595
MLX5_SET64 (wq , wq , dbr_addr , rwq -> db .dma );
4574
4596
rq_pas0 = (__be64 * )MLX5_ADDR_OF (wq , wq , pas );
4575
4597
mlx5_ib_populate_pas (dev , rwq -> umem , rwq -> page_shift , rq_pas0 , 0 );
4576
- err = mlx5_core_create_rq (dev -> mdev , in , inlen , & rwq -> rqn );
4598
+ err = mlx5_core_create_rq_tracked (dev -> mdev , in , inlen , & rwq -> core_qp );
4577
4599
kvfree (in );
4578
4600
return err ;
4579
4601
}
@@ -4689,7 +4711,7 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
4689
4711
return ERR_PTR (- EINVAL );
4690
4712
}
4691
4713
4692
- rwq -> ibwq .wq_num = rwq -> rqn ;
4714
+ rwq -> ibwq .wq_num = rwq -> core_qp . qpn ;
4693
4715
rwq -> ibwq .state = IB_WQS_RESET ;
4694
4716
if (udata -> outlen ) {
4695
4717
resp .response_length = offsetof(typeof (resp ), response_length ) +
@@ -4699,10 +4721,12 @@ struct ib_wq *mlx5_ib_create_wq(struct ib_pd *pd,
4699
4721
goto err_copy ;
4700
4722
}
4701
4723
4724
+ rwq -> core_qp .event = mlx5_ib_wq_event ;
4725
+ rwq -> ibwq .event_handler = init_attr -> event_handler ;
4702
4726
return & rwq -> ibwq ;
4703
4727
4704
4728
err_copy :
4705
- mlx5_core_destroy_rq (dev -> mdev , rwq -> rqn );
4729
+ mlx5_core_destroy_rq_tracked (dev -> mdev , & rwq -> core_qp );
4706
4730
err_user_rq :
4707
4731
destroy_user_rq (pd , rwq );
4708
4732
err :
@@ -4715,7 +4739,7 @@ int mlx5_ib_destroy_wq(struct ib_wq *wq)
4715
4739
struct mlx5_ib_dev * dev = to_mdev (wq -> device );
4716
4740
struct mlx5_ib_rwq * rwq = to_mrwq (wq );
4717
4741
4718
- mlx5_core_destroy_rq (dev -> mdev , rwq -> rqn );
4742
+ mlx5_core_destroy_rq_tracked (dev -> mdev , & rwq -> core_qp );
4719
4743
destroy_user_rq (wq -> pd , rwq );
4720
4744
kfree (rwq );
4721
4745
@@ -4847,7 +4871,7 @@ int mlx5_ib_modify_wq(struct ib_wq *wq, struct ib_wq_attr *wq_attr,
4847
4871
MLX5_SET (modify_rq_in , in , rq_state , curr_wq_state );
4848
4872
MLX5_SET (rqc , rqc , state , wq_state );
4849
4873
4850
- err = mlx5_core_modify_rq (dev -> mdev , rwq -> rqn , in , inlen );
4874
+ err = mlx5_core_modify_rq (dev -> mdev , rwq -> core_qp . qpn , in , inlen );
4851
4875
kvfree (in );
4852
4876
if (!err )
4853
4877
rwq -> ibwq .state = (wq_state == MLX5_RQC_STATE_ERR ) ? IB_WQS_ERR : wq_state ;
0 commit comments