63
63
64
64
#define VSCSIFRONT_OP_ADD_LUN 1
65
65
#define VSCSIFRONT_OP_DEL_LUN 2
66
+ #define VSCSIFRONT_OP_READD_LUN 3
66
67
67
68
/* Tuning point. */
68
69
#define VSCSIIF_DEFAULT_CMD_PER_LUN 10
@@ -113,8 +114,13 @@ struct vscsifrnt_info {
113
114
DECLARE_BITMAP (shadow_free_bitmap , VSCSIIF_MAX_REQS );
114
115
struct vscsifrnt_shadow * shadow [VSCSIIF_MAX_REQS ];
115
116
117
+ /* Following items are protected by the host lock. */
116
118
wait_queue_head_t wq_sync ;
119
+ wait_queue_head_t wq_pause ;
117
120
unsigned int wait_ring_available :1 ;
121
+ unsigned int waiting_pause :1 ;
122
+ unsigned int pause :1 ;
123
+ unsigned callers ;
118
124
119
125
char dev_state_path [64 ];
120
126
struct task_struct * curr ;
@@ -274,31 +280,31 @@ static void scsifront_sync_cmd_done(struct vscsifrnt_info *info,
274
280
wake_up (& shadow -> wq_reset );
275
281
}
276
282
277
- static int scsifront_cmd_done (struct vscsifrnt_info * info )
283
+ static void scsifront_do_response (struct vscsifrnt_info * info ,
284
+ struct vscsiif_response * ring_rsp )
285
+ {
286
+ if (WARN (ring_rsp -> rqid >= VSCSIIF_MAX_REQS ||
287
+ test_bit (ring_rsp -> rqid , info -> shadow_free_bitmap ),
288
+ "illegal rqid %u returned by backend!\n" , ring_rsp -> rqid ))
289
+ return ;
290
+
291
+ if (info -> shadow [ring_rsp -> rqid ]-> act == VSCSIIF_ACT_SCSI_CDB )
292
+ scsifront_cdb_cmd_done (info , ring_rsp );
293
+ else
294
+ scsifront_sync_cmd_done (info , ring_rsp );
295
+ }
296
+
297
+ static int scsifront_ring_drain (struct vscsifrnt_info * info )
278
298
{
279
299
struct vscsiif_response * ring_rsp ;
280
300
RING_IDX i , rp ;
281
301
int more_to_do = 0 ;
282
- unsigned long flags ;
283
-
284
- spin_lock_irqsave (info -> host -> host_lock , flags );
285
302
286
303
rp = info -> ring .sring -> rsp_prod ;
287
304
rmb (); /* ordering required respective to dom0 */
288
305
for (i = info -> ring .rsp_cons ; i != rp ; i ++ ) {
289
-
290
306
ring_rsp = RING_GET_RESPONSE (& info -> ring , i );
291
-
292
- if (WARN (ring_rsp -> rqid >= VSCSIIF_MAX_REQS ||
293
- test_bit (ring_rsp -> rqid , info -> shadow_free_bitmap ),
294
- "illegal rqid %u returned by backend!\n" ,
295
- ring_rsp -> rqid ))
296
- continue ;
297
-
298
- if (info -> shadow [ring_rsp -> rqid ]-> act == VSCSIIF_ACT_SCSI_CDB )
299
- scsifront_cdb_cmd_done (info , ring_rsp );
300
- else
301
- scsifront_sync_cmd_done (info , ring_rsp );
307
+ scsifront_do_response (info , ring_rsp );
302
308
}
303
309
304
310
info -> ring .rsp_cons = i ;
@@ -308,6 +314,18 @@ static int scsifront_cmd_done(struct vscsifrnt_info *info)
308
314
else
309
315
info -> ring .sring -> rsp_event = i + 1 ;
310
316
317
+ return more_to_do ;
318
+ }
319
+
320
+ static int scsifront_cmd_done (struct vscsifrnt_info * info )
321
+ {
322
+ int more_to_do ;
323
+ unsigned long flags ;
324
+
325
+ spin_lock_irqsave (info -> host -> host_lock , flags );
326
+
327
+ more_to_do = scsifront_ring_drain (info );
328
+
311
329
info -> wait_ring_available = 0 ;
312
330
313
331
spin_unlock_irqrestore (info -> host -> host_lock , flags );
@@ -328,6 +346,24 @@ static irqreturn_t scsifront_irq_fn(int irq, void *dev_id)
328
346
return IRQ_HANDLED ;
329
347
}
330
348
349
+ static void scsifront_finish_all (struct vscsifrnt_info * info )
350
+ {
351
+ unsigned i ;
352
+ struct vscsiif_response resp ;
353
+
354
+ scsifront_ring_drain (info );
355
+
356
+ for (i = 0 ; i < VSCSIIF_MAX_REQS ; i ++ ) {
357
+ if (test_bit (i , info -> shadow_free_bitmap ))
358
+ continue ;
359
+ resp .rqid = i ;
360
+ resp .sense_len = 0 ;
361
+ resp .rslt = DID_RESET << 16 ;
362
+ resp .residual_len = 0 ;
363
+ scsifront_do_response (info , & resp );
364
+ }
365
+ }
366
+
331
367
static int map_data_for_request (struct vscsifrnt_info * info ,
332
368
struct scsi_cmnd * sc ,
333
369
struct vscsiif_request * ring_req ,
@@ -475,6 +511,27 @@ static struct vscsiif_request *scsifront_command2ring(
475
511
return ring_req ;
476
512
}
477
513
514
+ static int scsifront_enter (struct vscsifrnt_info * info )
515
+ {
516
+ if (info -> pause )
517
+ return 1 ;
518
+ info -> callers ++ ;
519
+ return 0 ;
520
+ }
521
+
522
+ static void scsifront_return (struct vscsifrnt_info * info )
523
+ {
524
+ info -> callers -- ;
525
+ if (info -> callers )
526
+ return ;
527
+
528
+ if (!info -> waiting_pause )
529
+ return ;
530
+
531
+ info -> waiting_pause = 0 ;
532
+ wake_up (& info -> wq_pause );
533
+ }
534
+
478
535
static int scsifront_queuecommand (struct Scsi_Host * shost ,
479
536
struct scsi_cmnd * sc )
480
537
{
@@ -486,6 +543,10 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
486
543
uint16_t rqid ;
487
544
488
545
spin_lock_irqsave (shost -> host_lock , flags );
546
+ if (scsifront_enter (info )) {
547
+ spin_unlock_irqrestore (shost -> host_lock , flags );
548
+ return SCSI_MLQUEUE_HOST_BUSY ;
549
+ }
489
550
if (RING_FULL (& info -> ring ))
490
551
goto busy ;
491
552
@@ -505,6 +566,7 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
505
566
if (err < 0 ) {
506
567
pr_debug ("%s: err %d\n" , __func__ , err );
507
568
scsifront_put_rqid (info , rqid );
569
+ scsifront_return (info );
508
570
spin_unlock_irqrestore (shost -> host_lock , flags );
509
571
if (err == - ENOMEM )
510
572
return SCSI_MLQUEUE_HOST_BUSY ;
@@ -514,11 +576,13 @@ static int scsifront_queuecommand(struct Scsi_Host *shost,
514
576
}
515
577
516
578
scsifront_do_request (info );
579
+ scsifront_return (info );
517
580
spin_unlock_irqrestore (shost -> host_lock , flags );
518
581
519
582
return 0 ;
520
583
521
584
busy :
585
+ scsifront_return (info );
522
586
spin_unlock_irqrestore (shost -> host_lock , flags );
523
587
pr_debug ("%s: busy\n" , __func__ );
524
588
return SCSI_MLQUEUE_HOST_BUSY ;
@@ -549,7 +613,7 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
549
613
if (ring_req )
550
614
break ;
551
615
}
552
- if (err ) {
616
+ if (err || info -> pause ) {
553
617
spin_unlock_irq (host -> host_lock );
554
618
kfree (shadow );
555
619
return FAILED ;
@@ -561,6 +625,11 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
561
625
spin_lock_irq (host -> host_lock );
562
626
}
563
627
628
+ if (scsifront_enter (info )) {
629
+ spin_unlock_irq (host -> host_lock );
630
+ return FAILED ;
631
+ }
632
+
564
633
ring_req -> act = act ;
565
634
ring_req -> ref_rqid = s -> rqid ;
566
635
@@ -587,6 +656,7 @@ static int scsifront_action_handler(struct scsi_cmnd *sc, uint8_t act)
587
656
err = FAILED ;
588
657
}
589
658
659
+ scsifront_return (info );
590
660
spin_unlock_irq (host -> host_lock );
591
661
return err ;
592
662
}
@@ -698,6 +768,13 @@ static int scsifront_alloc_ring(struct vscsifrnt_info *info)
698
768
return err ;
699
769
}
700
770
771
+ static void scsifront_free_ring (struct vscsifrnt_info * info )
772
+ {
773
+ unbind_from_irqhandler (info -> irq , info );
774
+ gnttab_end_foreign_access (info -> ring_ref , 0 ,
775
+ (unsigned long )info -> ring .sring );
776
+ }
777
+
701
778
static int scsifront_init_ring (struct vscsifrnt_info * info )
702
779
{
703
780
struct xenbus_device * dev = info -> dev ;
@@ -744,9 +821,7 @@ static int scsifront_init_ring(struct vscsifrnt_info *info)
744
821
fail :
745
822
xenbus_transaction_end (xbt , 1 );
746
823
free_sring :
747
- unbind_from_irqhandler (info -> irq , info );
748
- gnttab_end_foreign_access (info -> ring_ref , 0 ,
749
- (unsigned long )info -> ring .sring );
824
+ scsifront_free_ring (info );
750
825
751
826
return err ;
752
827
}
@@ -779,6 +854,7 @@ static int scsifront_probe(struct xenbus_device *dev,
779
854
}
780
855
781
856
init_waitqueue_head (& info -> wq_sync );
857
+ init_waitqueue_head (& info -> wq_pause );
782
858
spin_lock_init (& info -> shadow_lock );
783
859
784
860
snprintf (name , TASK_COMM_LEN , "vscsiif.%d" , host -> host_no );
@@ -802,13 +878,60 @@ static int scsifront_probe(struct xenbus_device *dev,
802
878
return 0 ;
803
879
804
880
free_sring :
805
- unbind_from_irqhandler (info -> irq , info );
806
- gnttab_end_foreign_access (info -> ring_ref , 0 ,
807
- (unsigned long )info -> ring .sring );
881
+ scsifront_free_ring (info );
808
882
scsi_host_put (host );
809
883
return err ;
810
884
}
811
885
886
+ static int scsifront_resume (struct xenbus_device * dev )
887
+ {
888
+ struct vscsifrnt_info * info = dev_get_drvdata (& dev -> dev );
889
+ struct Scsi_Host * host = info -> host ;
890
+ int err ;
891
+
892
+ spin_lock_irq (host -> host_lock );
893
+
894
+ /* Finish all still pending commands. */
895
+ scsifront_finish_all (info );
896
+
897
+ spin_unlock_irq (host -> host_lock );
898
+
899
+ /* Reconnect to dom0. */
900
+ scsifront_free_ring (info );
901
+ err = scsifront_init_ring (info );
902
+ if (err ) {
903
+ dev_err (& dev -> dev , "fail to resume %d\n" , err );
904
+ scsi_host_put (host );
905
+ return err ;
906
+ }
907
+
908
+ xenbus_switch_state (dev , XenbusStateInitialised );
909
+
910
+ return 0 ;
911
+ }
912
+
913
+ static int scsifront_suspend (struct xenbus_device * dev )
914
+ {
915
+ struct vscsifrnt_info * info = dev_get_drvdata (& dev -> dev );
916
+ struct Scsi_Host * host = info -> host ;
917
+ int err = 0 ;
918
+
919
+ /* No new commands for the backend. */
920
+ spin_lock_irq (host -> host_lock );
921
+ info -> pause = 1 ;
922
+ while (info -> callers && !err ) {
923
+ info -> waiting_pause = 1 ;
924
+ info -> wait_ring_available = 0 ;
925
+ spin_unlock_irq (host -> host_lock );
926
+ wake_up (& info -> wq_sync );
927
+ err = wait_event_interruptible (info -> wq_pause ,
928
+ !info -> waiting_pause );
929
+ spin_lock_irq (host -> host_lock );
930
+ }
931
+ spin_unlock_irq (host -> host_lock );
932
+ return err ;
933
+ }
934
+
812
935
static int scsifront_remove (struct xenbus_device * dev )
813
936
{
814
937
struct vscsifrnt_info * info = dev_get_drvdata (& dev -> dev );
@@ -823,10 +946,7 @@ static int scsifront_remove(struct xenbus_device *dev)
823
946
}
824
947
mutex_unlock (& scsifront_mutex );
825
948
826
- gnttab_end_foreign_access (info -> ring_ref , 0 ,
827
- (unsigned long )info -> ring .sring );
828
- unbind_from_irqhandler (info -> irq , info );
829
-
949
+ scsifront_free_ring (info );
830
950
scsi_host_put (info -> host );
831
951
832
952
return 0 ;
@@ -919,6 +1039,12 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
919
1039
scsi_device_put (sdev );
920
1040
}
921
1041
break ;
1042
+ case VSCSIFRONT_OP_READD_LUN :
1043
+ if (device_state == XenbusStateConnected )
1044
+ xenbus_printf (XBT_NIL , dev -> nodename ,
1045
+ info -> dev_state_path ,
1046
+ "%d" , XenbusStateConnected );
1047
+ break ;
922
1048
default :
923
1049
break ;
924
1050
}
@@ -932,21 +1058,29 @@ static void scsifront_do_lun_hotplug(struct vscsifrnt_info *info, int op)
932
1058
static void scsifront_read_backend_params (struct xenbus_device * dev ,
933
1059
struct vscsifrnt_info * info )
934
1060
{
935
- unsigned int sg_grant ;
1061
+ unsigned int sg_grant , nr_segs ;
936
1062
int ret ;
937
1063
struct Scsi_Host * host = info -> host ;
938
1064
939
1065
ret = xenbus_scanf (XBT_NIL , dev -> otherend , "feature-sg-grant" , "%u" ,
940
1066
& sg_grant );
941
- if (ret == 1 && sg_grant ) {
942
- sg_grant = min_t (unsigned int , sg_grant , SG_ALL );
943
- sg_grant = max_t (unsigned int , sg_grant , VSCSIIF_SG_TABLESIZE );
944
- host -> sg_tablesize = min_t (unsigned int , sg_grant ,
1067
+ if (ret != 1 )
1068
+ sg_grant = 0 ;
1069
+ nr_segs = min_t (unsigned int , sg_grant , SG_ALL );
1070
+ nr_segs = max_t (unsigned int , nr_segs , VSCSIIF_SG_TABLESIZE );
1071
+ nr_segs = min_t (unsigned int , nr_segs ,
945
1072
VSCSIIF_SG_TABLESIZE * PAGE_SIZE /
946
1073
sizeof (struct scsiif_request_segment ));
947
- host -> max_sectors = (host -> sg_tablesize - 1 ) * PAGE_SIZE / 512 ;
948
- }
949
- dev_info (& dev -> dev , "using up to %d SG entries\n" , host -> sg_tablesize );
1074
+
1075
+ if (!info -> pause && sg_grant )
1076
+ dev_info (& dev -> dev , "using up to %d SG entries\n" , nr_segs );
1077
+ else if (info -> pause && nr_segs < host -> sg_tablesize )
1078
+ dev_warn (& dev -> dev ,
1079
+ "SG entries decreased from %d to %u - device may not work properly anymore\n" ,
1080
+ host -> sg_tablesize , nr_segs );
1081
+
1082
+ host -> sg_tablesize = nr_segs ;
1083
+ host -> max_sectors = (nr_segs - 1 ) * PAGE_SIZE / 512 ;
950
1084
}
951
1085
952
1086
static void scsifront_backend_changed (struct xenbus_device * dev ,
@@ -965,6 +1099,14 @@ static void scsifront_backend_changed(struct xenbus_device *dev,
965
1099
966
1100
case XenbusStateConnected :
967
1101
scsifront_read_backend_params (dev , info );
1102
+
1103
+ if (info -> pause ) {
1104
+ scsifront_do_lun_hotplug (info , VSCSIFRONT_OP_READD_LUN );
1105
+ xenbus_switch_state (dev , XenbusStateConnected );
1106
+ info -> pause = 0 ;
1107
+ return ;
1108
+ }
1109
+
968
1110
if (xenbus_read_driver_state (dev -> nodename ) ==
969
1111
XenbusStateInitialised )
970
1112
scsifront_do_lun_hotplug (info , VSCSIFRONT_OP_ADD_LUN );
@@ -1002,6 +1144,8 @@ static struct xenbus_driver scsifront_driver = {
1002
1144
.ids = scsifront_ids ,
1003
1145
.probe = scsifront_probe ,
1004
1146
.remove = scsifront_remove ,
1147
+ .resume = scsifront_resume ,
1148
+ .suspend = scsifront_suspend ,
1005
1149
.otherend_changed = scsifront_backend_changed ,
1006
1150
};
1007
1151
0 commit comments