79
79
struct vscsifrnt_shadow {
80
80
/* command between backend and frontend */
81
81
unsigned char act ;
82
+ uint8_t nr_segments ;
82
83
uint16_t rqid ;
84
+ uint16_t ref_rqid ;
83
85
84
86
unsigned int nr_grants ; /* number of grants in gref[] */
85
87
struct scsiif_request_segment * sg ; /* scatter/gather elements */
88
+ struct scsiif_request_segment seg [VSCSIIF_SG_TABLESIZE ];
86
89
87
90
/* Do reset or abort function. */
88
91
wait_queue_head_t wq_reset ; /* reset work queue */
@@ -172,68 +175,90 @@ static void scsifront_put_rqid(struct vscsifrnt_info *info, uint32_t id)
172
175
scsifront_wake_up (info );
173
176
}
174
177
175
- static struct vscsiif_request * scsifront_pre_req (struct vscsifrnt_info * info )
178
+ static int scsifront_do_request (struct vscsifrnt_info * info ,
179
+ struct vscsifrnt_shadow * shadow )
176
180
{
177
181
struct vscsiif_front_ring * ring = & (info -> ring );
178
182
struct vscsiif_request * ring_req ;
183
+ struct scsi_cmnd * sc = shadow -> sc ;
179
184
uint32_t id ;
185
+ int i , notify ;
186
+
187
+ if (RING_FULL (& info -> ring ))
188
+ return - EBUSY ;
180
189
181
190
id = scsifront_get_rqid (info ); /* use id in response */
182
191
if (id >= VSCSIIF_MAX_REQS )
183
- return NULL ;
192
+ return - EBUSY ;
184
193
185
- ring_req = RING_GET_REQUEST (& (info -> ring ), ring -> req_prod_pvt );
194
+ info -> shadow [id ] = shadow ;
195
+ shadow -> rqid = id ;
186
196
197
+ ring_req = RING_GET_REQUEST (& (info -> ring ), ring -> req_prod_pvt );
187
198
ring -> req_prod_pvt ++ ;
188
199
189
- ring_req -> rqid = (uint16_t )id ;
200
+ ring_req -> rqid = id ;
201
+ ring_req -> act = shadow -> act ;
202
+ ring_req -> ref_rqid = shadow -> ref_rqid ;
203
+ ring_req -> nr_segments = shadow -> nr_segments ;
190
204
191
- return ring_req ;
192
- }
205
+ ring_req -> id = sc -> device -> id ;
206
+ ring_req -> lun = sc -> device -> lun ;
207
+ ring_req -> channel = sc -> device -> channel ;
208
+ ring_req -> cmd_len = sc -> cmd_len ;
193
209
194
- static void scsifront_do_request (struct vscsifrnt_info * info )
195
- {
196
- struct vscsiif_front_ring * ring = & (info -> ring );
197
- int notify ;
210
+ BUG_ON (sc -> cmd_len > VSCSIIF_MAX_COMMAND_SIZE );
211
+
212
+ memcpy (ring_req -> cmnd , sc -> cmnd , sc -> cmd_len );
213
+
214
+ ring_req -> sc_data_direction = (uint8_t )sc -> sc_data_direction ;
215
+ ring_req -> timeout_per_command = sc -> request -> timeout / HZ ;
216
+
217
+ for (i = 0 ; i < (shadow -> nr_segments & ~VSCSIIF_SG_GRANT ); i ++ )
218
+ ring_req -> seg [i ] = shadow -> seg [i ];
198
219
199
220
RING_PUSH_REQUESTS_AND_CHECK_NOTIFY (ring , notify );
200
221
if (notify )
201
222
notify_remote_via_irq (info -> irq );
223
+
224
+ return 0 ;
202
225
}
203
226
204
- static void scsifront_gnttab_done (struct vscsifrnt_info * info , uint32_t id )
227
+ static void scsifront_gnttab_done (struct vscsifrnt_info * info ,
228
+ struct vscsifrnt_shadow * shadow )
205
229
{
206
- struct vscsifrnt_shadow * s = info -> shadow [id ];
207
230
int i ;
208
231
209
- if (s -> sc -> sc_data_direction == DMA_NONE )
232
+ if (shadow -> sc -> sc_data_direction == DMA_NONE )
210
233
return ;
211
234
212
- for (i = 0 ; i < s -> nr_grants ; i ++ ) {
213
- if (unlikely (gnttab_query_foreign_access (s -> gref [i ]) != 0 )) {
235
+ for (i = 0 ; i < shadow -> nr_grants ; i ++ ) {
236
+ if (unlikely (gnttab_query_foreign_access (shadow -> gref [i ]))) {
214
237
shost_printk (KERN_ALERT , info -> host , KBUILD_MODNAME
215
238
"grant still in use by backend\n" );
216
239
BUG ();
217
240
}
218
- gnttab_end_foreign_access (s -> gref [i ], 0 , 0UL );
241
+ gnttab_end_foreign_access (shadow -> gref [i ], 0 , 0UL );
219
242
}
220
243
221
- kfree (s -> sg );
244
+ kfree (shadow -> sg );
222
245
}
223
246
224
247
static void scsifront_cdb_cmd_done (struct vscsifrnt_info * info ,
225
248
struct vscsiif_response * ring_rsp )
226
249
{
250
+ struct vscsifrnt_shadow * shadow ;
227
251
struct scsi_cmnd * sc ;
228
252
uint32_t id ;
229
253
uint8_t sense_len ;
230
254
231
255
id = ring_rsp -> rqid ;
232
- sc = info -> shadow [id ]-> sc ;
256
+ shadow = info -> shadow [id ];
257
+ sc = shadow -> sc ;
233
258
234
259
BUG_ON (sc == NULL );
235
260
236
- scsifront_gnttab_done (info , id );
261
+ scsifront_gnttab_done (info , shadow );
237
262
scsifront_put_rqid (info , id );
238
263
239
264
sc -> result = ring_rsp -> rslt ;
@@ -366,7 +391,6 @@ static void scsifront_finish_all(struct vscsifrnt_info *info)
366
391
367
392
static int map_data_for_request (struct vscsifrnt_info * info ,
368
393
struct scsi_cmnd * sc ,
369
- struct vscsiif_request * ring_req ,
370
394
struct vscsifrnt_shadow * shadow )
371
395
{
372
396
grant_ref_t gref_head ;
@@ -379,7 +403,6 @@ static int map_data_for_request(struct vscsifrnt_info *info,
379
403
struct scatterlist * sg ;
380
404
struct scsiif_request_segment * seg ;
381
405
382
- ring_req -> nr_segments = 0 ;
383
406
if (sc -> sc_data_direction == DMA_NONE || !data_len )
384
407
return 0 ;
385
408
@@ -398,7 +421,7 @@ static int map_data_for_request(struct vscsifrnt_info *info,
398
421
if (!shadow -> sg )
399
422
return - ENOMEM ;
400
423
}
401
- seg = shadow -> sg ? : ring_req -> seg ;
424
+ seg = shadow -> sg ? : shadow -> seg ;
402
425
403
426
err = gnttab_alloc_grant_references (seg_grants + data_grants ,
404
427
& gref_head );
@@ -423,9 +446,9 @@ static int map_data_for_request(struct vscsifrnt_info *info,
423
446
info -> dev -> otherend_id ,
424
447
xen_page_to_gfn (page ), 1 );
425
448
shadow -> gref [ref_cnt ] = ref ;
426
- ring_req -> seg [ref_cnt ].gref = ref ;
427
- ring_req -> seg [ref_cnt ].offset = (uint16_t )off ;
428
- ring_req -> seg [ref_cnt ].length = (uint16_t )bytes ;
449
+ shadow -> seg [ref_cnt ].gref = ref ;
450
+ shadow -> seg [ref_cnt ].offset = (uint16_t )off ;
451
+ shadow -> seg [ref_cnt ].length = (uint16_t )bytes ;
429
452
430
453
page ++ ;
431
454
len -= bytes ;
@@ -473,44 +496,14 @@ static int map_data_for_request(struct vscsifrnt_info *info,
473
496
}
474
497
475
498
if (seg_grants )
476
- ring_req -> nr_segments = VSCSIIF_SG_GRANT | seg_grants ;
499
+ shadow -> nr_segments = VSCSIIF_SG_GRANT | seg_grants ;
477
500
else
478
- ring_req -> nr_segments = (uint8_t )ref_cnt ;
501
+ shadow -> nr_segments = (uint8_t )ref_cnt ;
479
502
shadow -> nr_grants = ref_cnt ;
480
503
481
504
return 0 ;
482
505
}
483
506
484
- static struct vscsiif_request * scsifront_command2ring (
485
- struct vscsifrnt_info * info , struct scsi_cmnd * sc ,
486
- struct vscsifrnt_shadow * shadow )
487
- {
488
- struct vscsiif_request * ring_req ;
489
-
490
- memset (shadow , 0 , sizeof (* shadow ));
491
-
492
- ring_req = scsifront_pre_req (info );
493
- if (!ring_req )
494
- return NULL ;
495
-
496
- info -> shadow [ring_req -> rqid ] = shadow ;
497
- shadow -> rqid = ring_req -> rqid ;
498
-
499
- ring_req -> id = sc -> device -> id ;
500
- ring_req -> lun = sc -> device -> lun ;
501
- ring_req -> channel = sc -> device -> channel ;
502
- ring_req -> cmd_len = sc -> cmd_len ;
503
-
504
- BUG_ON (sc -> cmd_len > VSCSIIF_MAX_COMMAND_SIZE );
505
-
506
- memcpy (ring_req -> cmnd , sc -> cmnd , sc -> cmd_len );
507
-
508
- ring_req -> sc_data_direction = (uint8_t )sc -> sc_data_direction ;
509
- ring_req -> timeout_per_command = sc -> request -> timeout / HZ ;
510
-
511
- return ring_req ;
512
- }
513
-
514
507
static int scsifront_enter (struct vscsifrnt_info * info )
515
508
{
516
509
if (info -> pause )
@@ -536,36 +529,25 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
536
529
struct scsi_cmnd * sc )
537
530
{
538
531
struct vscsifrnt_info * info = shost_priv (shost );
539
- struct vscsiif_request * ring_req ;
540
532
struct vscsifrnt_shadow * shadow = scsi_cmd_priv (sc );
541
533
unsigned long flags ;
542
534
int err ;
543
- uint16_t rqid ;
535
+
536
+ sc -> result = 0 ;
537
+ memset (shadow , 0 , sizeof (* shadow ));
538
+
539
+ shadow -> sc = sc ;
540
+ shadow -> act = VSCSIIF_ACT_SCSI_CDB ;
544
541
545
542
spin_lock_irqsave (shost -> host_lock , flags );
546
543
if (scsifront_enter (info )) {
547
544
spin_unlock_irqrestore (shost -> host_lock , flags );
548
545
return SCSI_MLQUEUE_HOST_BUSY ;
549
546
}
550
- if (RING_FULL (& info -> ring ))
551
- goto busy ;
552
547
553
- ring_req = scsifront_command2ring (info , sc , shadow );
554
- if (!ring_req )
555
- goto busy ;
556
-
557
- sc -> result = 0 ;
558
-
559
- rqid = ring_req -> rqid ;
560
- ring_req -> act = VSCSIIF_ACT_SCSI_CDB ;
561
-
562
- shadow -> sc = sc ;
563
- shadow -> act = VSCSIIF_ACT_SCSI_CDB ;
564
-
565
- err = map_data_for_request (info , sc , ring_req , shadow );
548
+ err = map_data_for_request (info , sc , shadow );
566
549
if (err < 0 ) {
567
550
pr_debug ("%s: err %d\n" , __func__ , err );
568
- scsifront_put_rqid (info , rqid );
569
551
scsifront_return (info );
570
552
spin_unlock_irqrestore (shost -> host_lock , flags );
571
553
if (err == - ENOMEM )
@@ -575,7 +557,11 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
575
557
return 0 ;
576
558
}
577
559
578
- scsifront_do_request (info );
560
+ if (scsifront_do_request (info , shadow )) {
561
+ scsifront_gnttab_done (info , shadow );
562
+ goto busy ;
563
+ }
564
+
579
565
scsifront_return (info );
580
566
spin_unlock_irqrestore (shost -> host_lock , flags );
581
567
@@ -598,50 +584,37 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
598
584
struct Scsi_Host * host = sc -> device -> host ;
599
585
struct vscsifrnt_info * info = shost_priv (host );
600
586
struct vscsifrnt_shadow * shadow , * s = scsi_cmd_priv (sc );
601
- struct vscsiif_request * ring_req ;
602
587
int err = 0 ;
603
588
604
- shadow = kmalloc (sizeof (* shadow ), GFP_NOIO );
589
+ shadow = kzalloc (sizeof (* shadow ), GFP_NOIO );
605
590
if (!shadow )
606
591
return FAILED ;
607
592
593
+ shadow -> act = act ;
594
+ shadow -> rslt_reset = RSLT_RESET_WAITING ;
595
+ shadow -> sc = sc ;
596
+ shadow -> ref_rqid = s -> rqid ;
597
+ init_waitqueue_head (& shadow -> wq_reset );
598
+
608
599
spin_lock_irq (host -> host_lock );
609
600
610
601
for (;;) {
611
- if (!RING_FULL (& info -> ring )) {
612
- ring_req = scsifront_command2ring (info , sc , shadow );
613
- if (ring_req )
614
- break ;
615
- }
616
- if (err || info -> pause ) {
617
- spin_unlock_irq (host -> host_lock );
618
- kfree (shadow );
619
- return FAILED ;
620
- }
602
+ if (scsifront_enter (info ))
603
+ goto fail ;
604
+
605
+ if (!scsifront_do_request (info , shadow ))
606
+ break ;
607
+
608
+ scsifront_return (info );
609
+ if (err )
610
+ goto fail ;
621
611
info -> wait_ring_available = 1 ;
622
612
spin_unlock_irq (host -> host_lock );
623
613
err = wait_event_interruptible (info -> wq_sync ,
624
614
!info -> wait_ring_available );
625
615
spin_lock_irq (host -> host_lock );
626
616
}
627
617
628
- if (scsifront_enter (info )) {
629
- spin_unlock_irq (host -> host_lock );
630
- kfree (shadow );
631
- return FAILED ;
632
- }
633
-
634
- ring_req -> act = act ;
635
- ring_req -> ref_rqid = s -> rqid ;
636
-
637
- shadow -> act = act ;
638
- shadow -> rslt_reset = RSLT_RESET_WAITING ;
639
- init_waitqueue_head (& shadow -> wq_reset );
640
-
641
- ring_req -> nr_segments = 0 ;
642
-
643
- scsifront_do_request (info );
644
-
645
618
spin_unlock_irq (host -> host_lock );
646
619
err = wait_event_interruptible (shadow -> wq_reset , shadow -> wait_reset );
647
620
spin_lock_irq (host -> host_lock );
@@ -660,6 +633,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
660
633
scsifront_return (info );
661
634
spin_unlock_irq (host -> host_lock );
662
635
return err ;
636
+
637
+ fail :
638
+ spin_unlock_irq (host -> host_lock );
639
+ kfree (shadow );
640
+ return FAILED ;
663
641
}
664
642
665
643
static int scsifront_eh_abort_handler (struct scsi_cmnd * sc )
0 commit comments