Skip to content

Commit 3da96be

Browse files
committed
xen/scsifront: don't request a slot on the ring until request is ready
Instead of requesting a new slot on the ring to the backend early, do so only after all has been setup for the request to be sent. This makes error handling easier as we don't need to undo the request id allocation and ring slot allocation. Suggested-by: Jan Beulich <jbeulich@suse.com> Signed-off-by: Juergen Gross <jgross@suse.com> Reviewed-by: Boris Ostrovsky <boris.ostrovsky@oracle.com> Signed-off-by: Juergen Gross <jgross@suse.com>
1 parent 738662c commit 3da96be

File tree

1 file changed

+83
-105
lines changed

1 file changed

+83
-105
lines changed

drivers/scsi/xen-scsifront.c

Lines changed: 83 additions & 105 deletions
Original file line numberDiff line numberDiff line change
@@ -79,10 +79,13 @@
7979
struct vscsifrnt_shadow {
8080
/* command between backend and frontend */
8181
unsigned char act;
82+
uint8_t nr_segments;
8283
uint16_t rqid;
84+
uint16_t ref_rqid;
8385

8486
unsigned int nr_grants; /* number of grants in gref[] */
8587
struct scsiif_request_segment *sg; /* scatter/gather elements */
88+
struct scsiif_request_segment seg[VSCSIIF_SG_TABLESIZE];
8689

8790
/* Do reset or abort function. */
8891
wait_queue_head_t wq_reset; /* reset work queue */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
172175
scsifront_wake_up(info);
173176
}
174177

175-
static struct vscsiif_request *scsifront_pre_req(struct vscsifrnt_info *info)
178+
static int scsifront_do_request(struct vscsifrnt_info *info,
179+
struct vscsifrnt_shadow *shadow)
176180
{
177181
struct vscsiif_front_ring *ring = &(info->ring);
178182
struct vscsiif_request *ring_req;
183+
struct scsi_cmnd *sc = shadow->sc;
179184
uint32_t id;
185+
int i, notify;
186+
187+
if (RING_FULL(&info->ring))
188+
return -EBUSY;
180189

181190
id = scsifront_get_rqid(info); /* use id in response */
182191
if (id >= VSCSIIF_MAX_REQS)
183-
return NULL;
192+
return -EBUSY;
184193

185-
ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
194+
info->shadow[id] = shadow;
195+
shadow->rqid = id;
186196

197+
ring_req = RING_GET_REQUEST(&(info->ring), ring->req_prod_pvt);
187198
ring->req_prod_pvt++;
188199

189-
ring_req->rqid = (uint16_t)id;
200+
ring_req->rqid = id;
201+
ring_req->act = shadow->act;
202+
ring_req->ref_rqid = shadow->ref_rqid;
203+
ring_req->nr_segments = shadow->nr_segments;
190204

191-
return ring_req;
192-
}
205+
ring_req->id = sc->device->id;
206+
ring_req->lun = sc->device->lun;
207+
ring_req->channel = sc->device->channel;
208+
ring_req->cmd_len = sc->cmd_len;
193209

194-
static void scsifront_do_request(struct vscsifrnt_info *info)
195-
{
196-
struct vscsiif_front_ring *ring = &(info->ring);
197-
int notify;
210+
BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
211+
212+
memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
213+
214+
ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
215+
ring_req->timeout_per_command = sc->request->timeout / HZ;
216+
217+
for (i = 0; i < (shadow->nr_segments & ~VSCSIIF_SG_GRANT); i++)
218+
ring_req->seg[i] = shadow->seg[i];
198219

199220
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(ring, notify);
200221
if (notify)
201222
notify_remote_via_irq(info->irq);
223+
224+
return 0;
202225
}
203226

204-
static void scsifront_gnttab_done(struct vscsifrnt_info *info, uint32_t id)
227+
static void scsifront_gnttab_done(struct vscsifrnt_info *info,
228+
struct vscsifrnt_shadow *shadow)
205229
{
206-
struct vscsifrnt_shadow *s = info->shadow[id];
207230
int i;
208231

209-
if (s->sc->sc_data_direction == DMA_NONE)
232+
if (shadow->sc->sc_data_direction == DMA_NONE)
210233
return;
211234

212-
for (i = 0; i < s->nr_grants; i++) {
213-
if (unlikely(gnttab_query_foreign_access(s->gref[i]) != 0)) {
235+
for (i = 0; i < shadow->nr_grants; i++) {
236+
if (unlikely(gnttab_query_foreign_access(shadow->gref[i]))) {
214237
shost_printk(KERN_ALERT, info->host, KBUILD_MODNAME
215238
"grant still in use by backend\n");
216239
BUG();
217240
}
218-
gnttab_end_foreign_access(s->gref[i], 0, 0UL);
241+
gnttab_end_foreign_access(shadow->gref[i], 0, 0UL);
219242
}
220243

221-
kfree(s->sg);
244+
kfree(shadow->sg);
222245
}
223246

224247
static void scsifront_cdb_cmd_done(struct vscsifrnt_info *info,
225248
struct vscsiif_response *ring_rsp)
226249
{
250+
struct vscsifrnt_shadow *shadow;
227251
struct scsi_cmnd *sc;
228252
uint32_t id;
229253
uint8_t sense_len;
230254

231255
id = ring_rsp->rqid;
232-
sc = info->shadow[id]->sc;
256+
shadow = info->shadow[id];
257+
sc = shadow->sc;
233258

234259
BUG_ON(sc == NULL);
235260

236-
scsifront_gnttab_done(info, id);
261+
scsifront_gnttab_done(info, shadow);
237262
scsifront_put_rqid(info, id);
238263

239264
sc->result = ring_rsp->rslt;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info)
366391

367392
static int map_data_for_request(struct vscsifrnt_info *info,
368393
struct scsi_cmnd *sc,
369-
struct vscsiif_request *ring_req,
370394
struct vscsifrnt_shadow *shadow)
371395
{
372396
grant_ref_t gref_head;
@@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
379403
struct scatterlist *sg;
380404
struct scsiif_request_segment *seg;
381405

382-
ring_req->nr_segments = 0;
383406
if (sc->sc_data_direction == DMA_NONE || !data_len)
384407
return 0;
385408

@@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
398421
if (!shadow->sg)
399422
return -ENOMEM;
400423
}
401-
seg = shadow->sg ? : ring_req->seg;
424+
seg = shadow->sg ? : shadow->seg;
402425

403426
err = gnttab_alloc_grant_references(seg_grants + data_grants,
404427
&gref_head);
@@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
423446
info->dev->otherend_id,
424447
xen_page_to_gfn(page), 1);
425448
shadow->gref[ref_cnt] = ref;
426-
ring_req->seg[ref_cnt].gref = ref;
427-
ring_req->seg[ref_cnt].offset = (uint16_t)off;
428-
ring_req->seg[ref_cnt].length = (uint16_t)bytes;
449+
shadow->seg[ref_cnt].gref = ref;
450+
shadow->seg[ref_cnt].offset = (uint16_t)off;
451+
shadow->seg[ref_cnt].length = (uint16_t)bytes;
429452

430453
page++;
431454
len -= bytes;
@@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info,
473496
}
474497

475498
if (seg_grants)
476-
ring_req->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
499+
shadow->nr_segments = VSCSIIF_SG_GRANT | seg_grants;
477500
else
478-
ring_req->nr_segments = (uint8_t)ref_cnt;
501+
shadow->nr_segments = (uint8_t)ref_cnt;
479502
shadow->nr_grants = ref_cnt;
480503

481504
return 0;
482505
}
483506

484-
static struct vscsiif_request *scsifront_command2ring(
485-
struct vscsifrnt_info *info, struct scsi_cmnd *sc,
486-
struct vscsifrnt_shadow *shadow)
487-
{
488-
struct vscsiif_request *ring_req;
489-
490-
memset(shadow, 0, sizeof(*shadow));
491-
492-
ring_req = scsifront_pre_req(info);
493-
if (!ring_req)
494-
return NULL;
495-
496-
info->shadow[ring_req->rqid] = shadow;
497-
shadow->rqid = ring_req->rqid;
498-
499-
ring_req->id = sc->device->id;
500-
ring_req->lun = sc->device->lun;
501-
ring_req->channel = sc->device->channel;
502-
ring_req->cmd_len = sc->cmd_len;
503-
504-
BUG_ON(sc->cmd_len > VSCSIIF_MAX_COMMAND_SIZE);
505-
506-
memcpy(ring_req->cmnd, sc->cmnd, sc->cmd_len);
507-
508-
ring_req->sc_data_direction = (uint8_t)sc->sc_data_direction;
509-
ring_req->timeout_per_command = sc->request->timeout / HZ;
510-
511-
return ring_req;
512-
}
513-
514507
static int scsifront_enter(struct vscsifrnt_info *info)
515508
{
516509
if (info->pause)
@@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
536529
struct scsi_cmnd *sc)
537530
{
538531
struct vscsifrnt_info *info = shost_priv(shost);
539-
struct vscsiif_request *ring_req;
540532
struct vscsifrnt_shadow *shadow = scsi_cmd_priv(sc);
541533
unsigned long flags;
542534
int err;
543-
uint16_t rqid;
535+
536+
sc->result = 0;
537+
memset(shadow, 0, sizeof(*shadow));
538+
539+
shadow->sc = sc;
540+
shadow->act = VSCSIIF_ACT_SCSI_CDB;
544541

545542
spin_lock_irqsave(shost->host_lock, flags);
546543
if (scsifront_enter(info)) {
547544
spin_unlock_irqrestore(shost->host_lock, flags);
548545
return SCSI_MLQUEUE_HOST_BUSY;
549546
}
550-
if (RING_FULL(&info->ring))
551-
goto busy;
552547

553-
ring_req = scsifront_command2ring(info, sc, shadow);
554-
if (!ring_req)
555-
goto busy;
556-
557-
sc->result = 0;
558-
559-
rqid = ring_req->rqid;
560-
ring_req->act = VSCSIIF_ACT_SCSI_CDB;
561-
562-
shadow->sc = sc;
563-
shadow->act = VSCSIIF_ACT_SCSI_CDB;
564-
565-
err = map_data_for_request(info, sc, ring_req, shadow);
548+
err = map_data_for_request(info, sc, shadow);
566549
if (err < 0) {
567550
pr_debug("%s: err %d\n", __func__, err);
568-
scsifront_put_rqid(info, rqid);
569551
scsifront_return(info);
570552
spin_unlock_irqrestore(shost->host_lock, flags);
571553
if (err == -ENOMEM)
@@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
575557
return 0;
576558
}
577559

578-
scsifront_do_request(info);
560+
if (scsifront_do_request(info, shadow)) {
561+
scsifront_gnttab_done(info, shadow);
562+
goto busy;
563+
}
564+
579565
scsifront_return(info);
580566
spin_unlock_irqrestore(shost->host_lock, flags);
581567

@@ -598,50 +584,37 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
598584
struct Scsi_Host *host = sc->device->host;
599585
struct vscsifrnt_info *info = shost_priv(host);
600586
struct vscsifrnt_shadow *shadow, *s = scsi_cmd_priv(sc);
601-
struct vscsiif_request *ring_req;
602587
int err = 0;
603588

604-
shadow = kmalloc(sizeof(*shadow), GFP_NOIO);
589+
shadow = kzalloc(sizeof(*shadow), GFP_NOIO);
605590
if (!shadow)
606591
return FAILED;
607592

593+
shadow->act = act;
594+
shadow->rslt_reset = RSLT_RESET_WAITING;
595+
shadow->sc = sc;
596+
shadow->ref_rqid = s->rqid;
597+
init_waitqueue_head(&shadow->wq_reset);
598+
608599
spin_lock_irq(host->host_lock);
609600

610601
for (;;) {
611-
if (!RING_FULL(&info->ring)) {
612-
ring_req = scsifront_command2ring(info, sc, shadow);
613-
if (ring_req)
614-
break;
615-
}
616-
if (err || info->pause) {
617-
spin_unlock_irq(host->host_lock);
618-
kfree(shadow);
619-
return FAILED;
620-
}
602+
if (scsifront_enter(info))
603+
goto fail;
604+
605+
if (!scsifront_do_request(info, shadow))
606+
break;
607+
608+
scsifront_return(info);
609+
if (err)
610+
goto fail;
621611
info->wait_ring_available = 1;
622612
spin_unlock_irq(host->host_lock);
623613
err = wait_event_interruptible(info->wq_sync,
624614
!info->wait_ring_available);
625615
spin_lock_irq(host->host_lock);
626616
}
627617

628-
if (scsifront_enter(info)) {
629-
spin_unlock_irq(host->host_lock);
630-
kfree(shadow);
631-
return FAILED;
632-
}
633-
634-
ring_req->act = act;
635-
ring_req->ref_rqid = s->rqid;
636-
637-
shadow->act = act;
638-
shadow->rslt_reset = RSLT_RESET_WAITING;
639-
init_waitqueue_head(&shadow->wq_reset);
640-
641-
ring_req->nr_segments = 0;
642-
643-
scsifront_do_request(info);
644-
645618
spin_unlock_irq(host->host_lock);
646619
err = wait_event_interruptible(shadow->wq_reset, shadow->wait_reset);
647620
spin_lock_irq(host->host_lock);
@@ -660,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
660633
scsifront_return(info);
661634
spin_unlock_irq(host->host_lock);
662635
return err;
636+
637+
fail:
638+
spin_unlock_irq(host->host_lock);
639+
kfree(shadow);
640+
return FAILED;
663641
}
664642

665643
static int scsifront_eh_abort_handler(struct scsi_cmnd *sc)

0 commit comments

Comments
 (0)