@@ -66,6 +66,7 @@ struct nvmet_rdma_rsp {
66
66
67
67
struct nvmet_req req ;
68
68
69
+ bool allocated ;
69
70
u8 n_rdma ;
70
71
u32 flags ;
71
72
u32 invalidate_rkey ;
@@ -174,11 +175,19 @@ nvmet_rdma_get_rsp(struct nvmet_rdma_queue *queue)
174
175
unsigned long flags ;
175
176
176
177
spin_lock_irqsave (& queue -> rsps_lock , flags );
177
- rsp = list_first_entry (& queue -> free_rsps ,
178
+ rsp = list_first_entry_or_null (& queue -> free_rsps ,
178
179
struct nvmet_rdma_rsp , free_list );
179
- list_del (& rsp -> free_list );
180
+ if (likely (rsp ))
181
+ list_del (& rsp -> free_list );
180
182
spin_unlock_irqrestore (& queue -> rsps_lock , flags );
181
183
184
+ if (unlikely (!rsp )) {
185
+ rsp = kmalloc (sizeof (* rsp ), GFP_KERNEL );
186
+ if (unlikely (!rsp ))
187
+ return NULL ;
188
+ rsp -> allocated = true;
189
+ }
190
+
182
191
return rsp ;
183
192
}
184
193
@@ -187,6 +196,11 @@ nvmet_rdma_put_rsp(struct nvmet_rdma_rsp *rsp)
187
196
{
188
197
unsigned long flags ;
189
198
199
+ if (rsp -> allocated ) {
200
+ kfree (rsp );
201
+ return ;
202
+ }
203
+
190
204
spin_lock_irqsave (& rsp -> queue -> rsps_lock , flags );
191
205
list_add_tail (& rsp -> free_list , & rsp -> queue -> free_rsps );
192
206
spin_unlock_irqrestore (& rsp -> queue -> rsps_lock , flags );
@@ -776,6 +790,15 @@ static void nvmet_rdma_recv_done(struct ib_cq *cq, struct ib_wc *wc)
776
790
777
791
cmd -> queue = queue ;
778
792
rsp = nvmet_rdma_get_rsp (queue );
793
+ if (unlikely (!rsp )) {
794
+ /*
795
+ * we get here only under memory pressure,
796
+ * silently drop and have the host retry
797
+ * as we can't even fail it.
798
+ */
799
+ nvmet_rdma_post_recv (queue -> dev , cmd );
800
+ return ;
801
+ }
779
802
rsp -> queue = queue ;
780
803
rsp -> cmd = cmd ;
781
804
rsp -> flags = 0 ;
0 commit comments