@@ -129,11 +129,66 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
129
129
}
130
130
EXPORT_SYMBOL (blk_rq_init );
131
131
132
+ static const struct {
133
+ int errno ;
134
+ const char * name ;
135
+ } blk_errors [] = {
136
+ [BLK_STS_OK ] = { 0 , "" },
137
+ [BLK_STS_NOTSUPP ] = { - EOPNOTSUPP , "operation not supported" },
138
+ [BLK_STS_TIMEOUT ] = { - ETIMEDOUT , "timeout" },
139
+ [BLK_STS_NOSPC ] = { - ENOSPC , "critical space allocation" },
140
+ [BLK_STS_TRANSPORT ] = { - ENOLINK , "recoverable transport" },
141
+ [BLK_STS_TARGET ] = { - EREMOTEIO , "critical target" },
142
+ [BLK_STS_NEXUS ] = { - EBADE , "critical nexus" },
143
+ [BLK_STS_MEDIUM ] = { - ENODATA , "critical medium" },
144
+ [BLK_STS_PROTECTION ] = { - EILSEQ , "protection" },
145
+ [BLK_STS_RESOURCE ] = { - ENOMEM , "kernel resource" },
146
+
147
+ /* everything else not covered above: */
148
+ [BLK_STS_IOERR ] = { - EIO , "I/O" },
149
+ };
150
+
151
+ blk_status_t errno_to_blk_status (int errno )
152
+ {
153
+ int i ;
154
+
155
+ for (i = 0 ; i < ARRAY_SIZE (blk_errors ); i ++ ) {
156
+ if (blk_errors [i ].errno == errno )
157
+ return (__force blk_status_t )i ;
158
+ }
159
+
160
+ return BLK_STS_IOERR ;
161
+ }
162
+ EXPORT_SYMBOL_GPL (errno_to_blk_status );
163
+
164
+ int blk_status_to_errno (blk_status_t status )
165
+ {
166
+ int idx = (__force int )status ;
167
+
168
+ if (WARN_ON_ONCE (idx > ARRAY_SIZE (blk_errors )))
169
+ return - EIO ;
170
+ return blk_errors [idx ].errno ;
171
+ }
172
+ EXPORT_SYMBOL_GPL (blk_status_to_errno );
173
+
174
+ static void print_req_error (struct request * req , blk_status_t status )
175
+ {
176
+ int idx = (__force int )status ;
177
+
178
+ if (WARN_ON_ONCE (idx > ARRAY_SIZE (blk_errors )))
179
+ return ;
180
+
181
+ printk_ratelimited (KERN_ERR "%s: %s error, dev %s, sector %llu\n" ,
182
+ __func__ , blk_errors [idx ].name , req -> rq_disk ?
183
+ req -> rq_disk -> disk_name : "?" ,
184
+ (unsigned long long )blk_rq_pos (req ));
185
+ }
186
+
132
187
static void req_bio_endio (struct request * rq , struct bio * bio ,
133
- unsigned int nbytes , int error )
188
+ unsigned int nbytes , blk_status_t error )
134
189
{
135
190
if (error )
136
- bio -> bi_error = error ;
191
+ bio -> bi_error = blk_status_to_errno ( error ) ;
137
192
138
193
if (unlikely (rq -> rq_flags & RQF_QUIET ))
139
194
bio_set_flag (bio , BIO_QUIET );
@@ -2177,29 +2232,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
2177
2232
* @q: the queue to submit the request
2178
2233
* @rq: the request being queued
2179
2234
*/
2180
- int blk_insert_cloned_request (struct request_queue * q , struct request * rq )
2235
+ blk_status_t blk_insert_cloned_request (struct request_queue * q , struct request * rq )
2181
2236
{
2182
2237
unsigned long flags ;
2183
2238
int where = ELEVATOR_INSERT_BACK ;
2184
2239
2185
2240
if (blk_cloned_rq_check_limits (q , rq ))
2186
- return - EIO ;
2241
+ return BLK_STS_IOERR ;
2187
2242
2188
2243
if (rq -> rq_disk &&
2189
2244
should_fail_request (& rq -> rq_disk -> part0 , blk_rq_bytes (rq )))
2190
- return - EIO ;
2245
+ return BLK_STS_IOERR ;
2191
2246
2192
2247
if (q -> mq_ops ) {
2193
2248
if (blk_queue_io_stat (q ))
2194
2249
blk_account_io_start (rq , true);
2195
2250
blk_mq_sched_insert_request (rq , false, true, false, false);
2196
- return 0 ;
2251
+ return BLK_STS_OK ;
2197
2252
}
2198
2253
2199
2254
spin_lock_irqsave (q -> queue_lock , flags );
2200
2255
if (unlikely (blk_queue_dying (q ))) {
2201
2256
spin_unlock_irqrestore (q -> queue_lock , flags );
2202
- return - ENODEV ;
2257
+ return BLK_STS_IOERR ;
2203
2258
}
2204
2259
2205
2260
/*
@@ -2216,7 +2271,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2216
2271
__blk_run_queue (q );
2217
2272
spin_unlock_irqrestore (q -> queue_lock , flags );
2218
2273
2219
- return 0 ;
2274
+ return BLK_STS_OK ;
2220
2275
}
2221
2276
EXPORT_SYMBOL_GPL (blk_insert_cloned_request );
2222
2277
@@ -2450,15 +2505,14 @@ struct request *blk_peek_request(struct request_queue *q)
2450
2505
rq = NULL ;
2451
2506
break ;
2452
2507
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID ) {
2453
- int err = (ret == BLKPREP_INVALID ) ? - EREMOTEIO : - EIO ;
2454
-
2455
2508
rq -> rq_flags |= RQF_QUIET ;
2456
2509
/*
2457
2510
* Mark this request as started so we don't trigger
2458
2511
* any debug logic in the end I/O path.
2459
2512
*/
2460
2513
blk_start_request (rq );
2461
- __blk_end_request_all (rq , err );
2514
+ __blk_end_request_all (rq , ret == BLKPREP_INVALID ?
2515
+ BLK_STS_TARGET : BLK_STS_IOERR );
2462
2516
} else {
2463
2517
printk (KERN_ERR "%s: bad return=%d\n" , __func__ , ret );
2464
2518
break ;
@@ -2547,7 +2601,7 @@ EXPORT_SYMBOL(blk_fetch_request);
2547
2601
/**
2548
2602
* blk_update_request - Special helper function for request stacking drivers
2549
2603
* @req: the request being processed
2550
- * @error: %0 for success, < %0 for error
2604
+ * @error: block status code
2551
2605
* @nr_bytes: number of bytes to complete @req
2552
2606
*
2553
2607
* Description:
@@ -2566,49 +2620,19 @@ EXPORT_SYMBOL(blk_fetch_request);
2566
2620
* %false - this request doesn't have any more data
2567
2621
* %true - this request has more data
2568
2622
**/
2569
- bool blk_update_request (struct request * req , int error , unsigned int nr_bytes )
2623
+ bool blk_update_request (struct request * req , blk_status_t error ,
2624
+ unsigned int nr_bytes )
2570
2625
{
2571
2626
int total_bytes ;
2572
2627
2573
- trace_block_rq_complete (req , error , nr_bytes );
2628
+ trace_block_rq_complete (req , blk_status_to_errno ( error ) , nr_bytes );
2574
2629
2575
2630
if (!req -> bio )
2576
2631
return false;
2577
2632
2578
- if (error && !blk_rq_is_passthrough (req ) &&
2579
- !(req -> rq_flags & RQF_QUIET )) {
2580
- char * error_type ;
2581
-
2582
- switch (error ) {
2583
- case - ENOLINK :
2584
- error_type = "recoverable transport" ;
2585
- break ;
2586
- case - EREMOTEIO :
2587
- error_type = "critical target" ;
2588
- break ;
2589
- case - EBADE :
2590
- error_type = "critical nexus" ;
2591
- break ;
2592
- case - ETIMEDOUT :
2593
- error_type = "timeout" ;
2594
- break ;
2595
- case - ENOSPC :
2596
- error_type = "critical space allocation" ;
2597
- break ;
2598
- case - ENODATA :
2599
- error_type = "critical medium" ;
2600
- break ;
2601
- case - EIO :
2602
- default :
2603
- error_type = "I/O" ;
2604
- break ;
2605
- }
2606
- printk_ratelimited (KERN_ERR "%s: %s error, dev %s, sector %llu\n" ,
2607
- __func__ , error_type , req -> rq_disk ?
2608
- req -> rq_disk -> disk_name : "?" ,
2609
- (unsigned long long )blk_rq_pos (req ));
2610
-
2611
- }
2633
+ if (unlikely (error && !blk_rq_is_passthrough (req ) &&
2634
+ !(req -> rq_flags & RQF_QUIET )))
2635
+ print_req_error (req , error );
2612
2636
2613
2637
blk_account_io_completion (req , nr_bytes );
2614
2638
@@ -2674,7 +2698,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2674
2698
}
2675
2699
EXPORT_SYMBOL_GPL (blk_update_request );
2676
2700
2677
- static bool blk_update_bidi_request (struct request * rq , int error ,
2701
+ static bool blk_update_bidi_request (struct request * rq , blk_status_t error ,
2678
2702
unsigned int nr_bytes ,
2679
2703
unsigned int bidi_bytes )
2680
2704
{
@@ -2715,7 +2739,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
2715
2739
/*
2716
2740
* queue lock must be held
2717
2741
*/
2718
- void blk_finish_request (struct request * req , int error )
2742
+ void blk_finish_request (struct request * req , blk_status_t error )
2719
2743
{
2720
2744
struct request_queue * q = req -> q ;
2721
2745
@@ -2752,7 +2776,7 @@ EXPORT_SYMBOL(blk_finish_request);
2752
2776
/**
2753
2777
* blk_end_bidi_request - Complete a bidi request
2754
2778
* @rq: the request to complete
2755
- * @error: %0 for success, < %0 for error
2779
+ * @error: block status code
2756
2780
* @nr_bytes: number of bytes to complete @rq
2757
2781
* @bidi_bytes: number of bytes to complete @rq->next_rq
2758
2782
*
@@ -2766,7 +2790,7 @@ EXPORT_SYMBOL(blk_finish_request);
2766
2790
* %false - we are done with this request
2767
2791
* %true - still buffers pending for this request
2768
2792
**/
2769
- static bool blk_end_bidi_request (struct request * rq , int error ,
2793
+ static bool blk_end_bidi_request (struct request * rq , blk_status_t error ,
2770
2794
unsigned int nr_bytes , unsigned int bidi_bytes )
2771
2795
{
2772
2796
struct request_queue * q = rq -> q ;
@@ -2785,7 +2809,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
2785
2809
/**
2786
2810
* __blk_end_bidi_request - Complete a bidi request with queue lock held
2787
2811
* @rq: the request to complete
2788
- * @error: %0 for success, < %0 for error
2812
+ * @error: block status code
2789
2813
* @nr_bytes: number of bytes to complete @rq
2790
2814
* @bidi_bytes: number of bytes to complete @rq->next_rq
2791
2815
*
@@ -2797,7 +2821,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
2797
2821
* %false - we are done with this request
2798
2822
* %true - still buffers pending for this request
2799
2823
**/
2800
- static bool __blk_end_bidi_request (struct request * rq , int error ,
2824
+ static bool __blk_end_bidi_request (struct request * rq , blk_status_t error ,
2801
2825
unsigned int nr_bytes , unsigned int bidi_bytes )
2802
2826
{
2803
2827
if (blk_update_bidi_request (rq , error , nr_bytes , bidi_bytes ))
@@ -2811,7 +2835,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
2811
2835
/**
2812
2836
* blk_end_request - Helper function for drivers to complete the request.
2813
2837
* @rq: the request being processed
2814
- * @error: %0 for success, < %0 for error
2838
+ * @error: block status code
2815
2839
* @nr_bytes: number of bytes to complete
2816
2840
*
2817
2841
* Description:
@@ -2822,7 +2846,8 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
2822
2846
* %false - we are done with this request
2823
2847
* %true - still buffers pending for this request
2824
2848
**/
2825
- bool blk_end_request (struct request * rq , int error , unsigned int nr_bytes )
2849
+ bool blk_end_request (struct request * rq , blk_status_t error ,
2850
+ unsigned int nr_bytes )
2826
2851
{
2827
2852
return blk_end_bidi_request (rq , error , nr_bytes , 0 );
2828
2853
}
@@ -2831,12 +2856,12 @@ EXPORT_SYMBOL(blk_end_request);
2831
2856
/**
2832
2857
* blk_end_request_all - Helper function for drives to finish the request.
2833
2858
* @rq: the request to finish
2834
- * @error: %0 for success, < %0 for error
2859
+ * @error: block status code
2835
2860
*
2836
2861
* Description:
2837
2862
* Completely finish @rq.
2838
2863
*/
2839
- void blk_end_request_all (struct request * rq , int error )
2864
+ void blk_end_request_all (struct request * rq , blk_status_t error )
2840
2865
{
2841
2866
bool pending ;
2842
2867
unsigned int bidi_bytes = 0 ;
@@ -2852,7 +2877,7 @@ EXPORT_SYMBOL(blk_end_request_all);
2852
2877
/**
2853
2878
* __blk_end_request - Helper function for drivers to complete the request.
2854
2879
* @rq: the request being processed
2855
- * @error: %0 for success, < %0 for error
2880
+ * @error: block status code
2856
2881
* @nr_bytes: number of bytes to complete
2857
2882
*
2858
2883
* Description:
@@ -2862,7 +2887,8 @@ EXPORT_SYMBOL(blk_end_request_all);
2862
2887
* %false - we are done with this request
2863
2888
* %true - still buffers pending for this request
2864
2889
**/
2865
- bool __blk_end_request (struct request * rq , int error , unsigned int nr_bytes )
2890
+ bool __blk_end_request (struct request * rq , blk_status_t error ,
2891
+ unsigned int nr_bytes )
2866
2892
{
2867
2893
return __blk_end_bidi_request (rq , error , nr_bytes , 0 );
2868
2894
}
@@ -2871,12 +2897,12 @@ EXPORT_SYMBOL(__blk_end_request);
2871
2897
/**
2872
2898
* __blk_end_request_all - Helper function for drives to finish the request.
2873
2899
* @rq: the request to finish
2874
- * @error: %0 for success, < %0 for error
2900
+ * @error: block status code
2875
2901
*
2876
2902
* Description:
2877
2903
* Completely finish @rq. Must be called with queue lock held.
2878
2904
*/
2879
- void __blk_end_request_all (struct request * rq , int error )
2905
+ void __blk_end_request_all (struct request * rq , blk_status_t error )
2880
2906
{
2881
2907
bool pending ;
2882
2908
unsigned int bidi_bytes = 0 ;
@@ -2892,7 +2918,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
2892
2918
/**
2893
2919
* __blk_end_request_cur - Helper function to finish the current request chunk.
2894
2920
* @rq: the request to finish the current chunk for
2895
- * @error: %0 for success, < %0 for error
2921
+ * @error: block status code
2896
2922
*
2897
2923
* Description:
2898
2924
* Complete the current consecutively mapped chunk from @rq. Must
@@ -2902,7 +2928,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
2902
2928
* %false - we are done with this request
2903
2929
* %true - still buffers pending for this request
2904
2930
*/
2905
- bool __blk_end_request_cur (struct request * rq , int error )
2931
+ bool __blk_end_request_cur (struct request * rq , blk_status_t error )
2906
2932
{
2907
2933
return __blk_end_request (rq , error , blk_rq_cur_bytes (rq ));
2908
2934
}
@@ -3243,7 +3269,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
3243
3269
* Short-circuit if @q is dead
3244
3270
*/
3245
3271
if (unlikely (blk_queue_dying (q ))) {
3246
- __blk_end_request_all (rq , - ENODEV );
3272
+ __blk_end_request_all (rq , BLK_STS_IOERR );
3247
3273
continue ;
3248
3274
}
3249
3275
0 commit comments