Skip to content

Commit 2a842ac

Browse files
Christoph Hellwigaxboe
authored andcommitted
block: introduce new block status code type
Currently we use nornal Linux errno values in the block layer, and while we accept any error a few have overloaded magic meanings. This patch instead introduces a new blk_status_t value that holds block layer specific status codes and explicitly explains their meaning. Helpers to convert from and to the previous special meanings are provided for now, but I suspect we want to get rid of them in the long run - those drivers that have a errno input (e.g. networking) usually get errnos that don't know about the special block layer overloads, and similarly returning them to userspace will usually return somethings that strictly speaking isn't correct for file system operations, but that's left as an exercise for later. For now the set of errors is a very limited set that closely corresponds to the previous overloaded errno values, but there is some low hanging fruite to improve it. blk_status_t (ab)uses the sparse __bitwise annotations to allow for sparse typechecking, so that we can easily catch places passing the wrong values. Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@fb.com>
1 parent 1be5690 commit 2a842ac

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

76 files changed

+474
-428
lines changed

arch/s390/include/asm/eadm.h

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33

44
#include <linux/types.h>
55
#include <linux/device.h>
6+
#include <linux/blkdev.h>
67

78
struct arqb {
89
u64 data;
@@ -105,13 +106,14 @@ struct scm_driver {
105106
int (*probe) (struct scm_device *scmdev);
106107
int (*remove) (struct scm_device *scmdev);
107108
void (*notify) (struct scm_device *scmdev, enum scm_event event);
108-
void (*handler) (struct scm_device *scmdev, void *data, int error);
109+
void (*handler) (struct scm_device *scmdev, void *data,
110+
blk_status_t error);
109111
};
110112

111113
int scm_driver_register(struct scm_driver *scmdrv);
112114
void scm_driver_unregister(struct scm_driver *scmdrv);
113115

114116
int eadm_start_aob(struct aob *aob);
115-
void scm_irq_handler(struct aob *aob, int error);
117+
void scm_irq_handler(struct aob *aob, blk_status_t error);
116118

117119
#endif /* _ASM_S390_EADM_H */

arch/um/drivers/ubd_kern.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -534,7 +534,7 @@ static void ubd_handler(void)
534534
for (count = 0; count < n/sizeof(struct io_thread_req *); count++) {
535535
blk_end_request(
536536
(*irq_req_buffer)[count]->req,
537-
0,
537+
BLK_STS_OK,
538538
(*irq_req_buffer)[count]->length
539539
);
540540
kfree((*irq_req_buffer)[count]);

block/blk-core.c

Lines changed: 91 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -129,11 +129,66 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
129129
}
130130
EXPORT_SYMBOL(blk_rq_init);
131131

132+
static const struct {
133+
int errno;
134+
const char *name;
135+
} blk_errors[] = {
136+
[BLK_STS_OK] = { 0, "" },
137+
[BLK_STS_NOTSUPP] = { -EOPNOTSUPP, "operation not supported" },
138+
[BLK_STS_TIMEOUT] = { -ETIMEDOUT, "timeout" },
139+
[BLK_STS_NOSPC] = { -ENOSPC, "critical space allocation" },
140+
[BLK_STS_TRANSPORT] = { -ENOLINK, "recoverable transport" },
141+
[BLK_STS_TARGET] = { -EREMOTEIO, "critical target" },
142+
[BLK_STS_NEXUS] = { -EBADE, "critical nexus" },
143+
[BLK_STS_MEDIUM] = { -ENODATA, "critical medium" },
144+
[BLK_STS_PROTECTION] = { -EILSEQ, "protection" },
145+
[BLK_STS_RESOURCE] = { -ENOMEM, "kernel resource" },
146+
147+
/* everything else not covered above: */
148+
[BLK_STS_IOERR] = { -EIO, "I/O" },
149+
};
150+
151+
blk_status_t errno_to_blk_status(int errno)
152+
{
153+
int i;
154+
155+
for (i = 0; i < ARRAY_SIZE(blk_errors); i++) {
156+
if (blk_errors[i].errno == errno)
157+
return (__force blk_status_t)i;
158+
}
159+
160+
return BLK_STS_IOERR;
161+
}
162+
EXPORT_SYMBOL_GPL(errno_to_blk_status);
163+
164+
int blk_status_to_errno(blk_status_t status)
165+
{
166+
int idx = (__force int)status;
167+
168+
if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
169+
return -EIO;
170+
return blk_errors[idx].errno;
171+
}
172+
EXPORT_SYMBOL_GPL(blk_status_to_errno);
173+
174+
static void print_req_error(struct request *req, blk_status_t status)
175+
{
176+
int idx = (__force int)status;
177+
178+
if (WARN_ON_ONCE(idx > ARRAY_SIZE(blk_errors)))
179+
return;
180+
181+
printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
182+
__func__, blk_errors[idx].name, req->rq_disk ?
183+
req->rq_disk->disk_name : "?",
184+
(unsigned long long)blk_rq_pos(req));
185+
}
186+
132187
static void req_bio_endio(struct request *rq, struct bio *bio,
133-
unsigned int nbytes, int error)
188+
unsigned int nbytes, blk_status_t error)
134189
{
135190
if (error)
136-
bio->bi_error = error;
191+
bio->bi_error = blk_status_to_errno(error);
137192

138193
if (unlikely(rq->rq_flags & RQF_QUIET))
139194
bio_set_flag(bio, BIO_QUIET);
@@ -2177,29 +2232,29 @@ static int blk_cloned_rq_check_limits(struct request_queue *q,
21772232
* @q: the queue to submit the request
21782233
* @rq: the request being queued
21792234
*/
2180-
int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
2235+
blk_status_t blk_insert_cloned_request(struct request_queue *q, struct request *rq)
21812236
{
21822237
unsigned long flags;
21832238
int where = ELEVATOR_INSERT_BACK;
21842239

21852240
if (blk_cloned_rq_check_limits(q, rq))
2186-
return -EIO;
2241+
return BLK_STS_IOERR;
21872242

21882243
if (rq->rq_disk &&
21892244
should_fail_request(&rq->rq_disk->part0, blk_rq_bytes(rq)))
2190-
return -EIO;
2245+
return BLK_STS_IOERR;
21912246

21922247
if (q->mq_ops) {
21932248
if (blk_queue_io_stat(q))
21942249
blk_account_io_start(rq, true);
21952250
blk_mq_sched_insert_request(rq, false, true, false, false);
2196-
return 0;
2251+
return BLK_STS_OK;
21972252
}
21982253

21992254
spin_lock_irqsave(q->queue_lock, flags);
22002255
if (unlikely(blk_queue_dying(q))) {
22012256
spin_unlock_irqrestore(q->queue_lock, flags);
2202-
return -ENODEV;
2257+
return BLK_STS_IOERR;
22032258
}
22042259

22052260
/*
@@ -2216,7 +2271,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
22162271
__blk_run_queue(q);
22172272
spin_unlock_irqrestore(q->queue_lock, flags);
22182273

2219-
return 0;
2274+
return BLK_STS_OK;
22202275
}
22212276
EXPORT_SYMBOL_GPL(blk_insert_cloned_request);
22222277

@@ -2450,15 +2505,14 @@ struct request *blk_peek_request(struct request_queue *q)
24502505
rq = NULL;
24512506
break;
24522507
} else if (ret == BLKPREP_KILL || ret == BLKPREP_INVALID) {
2453-
int err = (ret == BLKPREP_INVALID) ? -EREMOTEIO : -EIO;
2454-
24552508
rq->rq_flags |= RQF_QUIET;
24562509
/*
24572510
* Mark this request as started so we don't trigger
24582511
* any debug logic in the end I/O path.
24592512
*/
24602513
blk_start_request(rq);
2461-
__blk_end_request_all(rq, err);
2514+
__blk_end_request_all(rq, ret == BLKPREP_INVALID ?
2515+
BLK_STS_TARGET : BLK_STS_IOERR);
24622516
} else {
24632517
printk(KERN_ERR "%s: bad return=%d\n", __func__, ret);
24642518
break;
@@ -2547,7 +2601,7 @@ EXPORT_SYMBOL(blk_fetch_request);
25472601
/**
25482602
* blk_update_request - Special helper function for request stacking drivers
25492603
* @req: the request being processed
2550-
* @error: %0 for success, < %0 for error
2604+
* @error: block status code
25512605
* @nr_bytes: number of bytes to complete @req
25522606
*
25532607
* Description:
@@ -2566,49 +2620,19 @@ EXPORT_SYMBOL(blk_fetch_request);
25662620
* %false - this request doesn't have any more data
25672621
* %true - this request has more data
25682622
**/
2569-
bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2623+
bool blk_update_request(struct request *req, blk_status_t error,
2624+
unsigned int nr_bytes)
25702625
{
25712626
int total_bytes;
25722627

2573-
trace_block_rq_complete(req, error, nr_bytes);
2628+
trace_block_rq_complete(req, blk_status_to_errno(error), nr_bytes);
25742629

25752630
if (!req->bio)
25762631
return false;
25772632

2578-
if (error && !blk_rq_is_passthrough(req) &&
2579-
!(req->rq_flags & RQF_QUIET)) {
2580-
char *error_type;
2581-
2582-
switch (error) {
2583-
case -ENOLINK:
2584-
error_type = "recoverable transport";
2585-
break;
2586-
case -EREMOTEIO:
2587-
error_type = "critical target";
2588-
break;
2589-
case -EBADE:
2590-
error_type = "critical nexus";
2591-
break;
2592-
case -ETIMEDOUT:
2593-
error_type = "timeout";
2594-
break;
2595-
case -ENOSPC:
2596-
error_type = "critical space allocation";
2597-
break;
2598-
case -ENODATA:
2599-
error_type = "critical medium";
2600-
break;
2601-
case -EIO:
2602-
default:
2603-
error_type = "I/O";
2604-
break;
2605-
}
2606-
printk_ratelimited(KERN_ERR "%s: %s error, dev %s, sector %llu\n",
2607-
__func__, error_type, req->rq_disk ?
2608-
req->rq_disk->disk_name : "?",
2609-
(unsigned long long)blk_rq_pos(req));
2610-
2611-
}
2633+
if (unlikely(error && !blk_rq_is_passthrough(req) &&
2634+
!(req->rq_flags & RQF_QUIET)))
2635+
print_req_error(req, error);
26122636

26132637
blk_account_io_completion(req, nr_bytes);
26142638

@@ -2674,7 +2698,7 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
26742698
}
26752699
EXPORT_SYMBOL_GPL(blk_update_request);
26762700

2677-
static bool blk_update_bidi_request(struct request *rq, int error,
2701+
static bool blk_update_bidi_request(struct request *rq, blk_status_t error,
26782702
unsigned int nr_bytes,
26792703
unsigned int bidi_bytes)
26802704
{
@@ -2715,7 +2739,7 @@ EXPORT_SYMBOL_GPL(blk_unprep_request);
27152739
/*
27162740
* queue lock must be held
27172741
*/
2718-
void blk_finish_request(struct request *req, int error)
2742+
void blk_finish_request(struct request *req, blk_status_t error)
27192743
{
27202744
struct request_queue *q = req->q;
27212745

@@ -2752,7 +2776,7 @@ EXPORT_SYMBOL(blk_finish_request);
27522776
/**
27532777
* blk_end_bidi_request - Complete a bidi request
27542778
* @rq: the request to complete
2755-
* @error: %0 for success, < %0 for error
2779+
* @error: block status code
27562780
* @nr_bytes: number of bytes to complete @rq
27572781
* @bidi_bytes: number of bytes to complete @rq->next_rq
27582782
*
@@ -2766,7 +2790,7 @@ EXPORT_SYMBOL(blk_finish_request);
27662790
* %false - we are done with this request
27672791
* %true - still buffers pending for this request
27682792
**/
2769-
static bool blk_end_bidi_request(struct request *rq, int error,
2793+
static bool blk_end_bidi_request(struct request *rq, blk_status_t error,
27702794
unsigned int nr_bytes, unsigned int bidi_bytes)
27712795
{
27722796
struct request_queue *q = rq->q;
@@ -2785,7 +2809,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
27852809
/**
27862810
* __blk_end_bidi_request - Complete a bidi request with queue lock held
27872811
* @rq: the request to complete
2788-
* @error: %0 for success, < %0 for error
2812+
* @error: block status code
27892813
* @nr_bytes: number of bytes to complete @rq
27902814
* @bidi_bytes: number of bytes to complete @rq->next_rq
27912815
*
@@ -2797,7 +2821,7 @@ static bool blk_end_bidi_request(struct request *rq, int error,
27972821
* %false - we are done with this request
27982822
* %true - still buffers pending for this request
27992823
**/
2800-
static bool __blk_end_bidi_request(struct request *rq, int error,
2824+
static bool __blk_end_bidi_request(struct request *rq, blk_status_t error,
28012825
unsigned int nr_bytes, unsigned int bidi_bytes)
28022826
{
28032827
if (blk_update_bidi_request(rq, error, nr_bytes, bidi_bytes))
@@ -2811,7 +2835,7 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
28112835
/**
28122836
* blk_end_request - Helper function for drivers to complete the request.
28132837
* @rq: the request being processed
2814-
* @error: %0 for success, < %0 for error
2838+
* @error: block status code
28152839
* @nr_bytes: number of bytes to complete
28162840
*
28172841
* Description:
@@ -2822,7 +2846,8 @@ static bool __blk_end_bidi_request(struct request *rq, int error,
28222846
* %false - we are done with this request
28232847
* %true - still buffers pending for this request
28242848
**/
2825-
bool blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2849+
bool blk_end_request(struct request *rq, blk_status_t error,
2850+
unsigned int nr_bytes)
28262851
{
28272852
return blk_end_bidi_request(rq, error, nr_bytes, 0);
28282853
}
@@ -2831,12 +2856,12 @@ EXPORT_SYMBOL(blk_end_request);
28312856
/**
28322857
* blk_end_request_all - Helper function for drives to finish the request.
28332858
* @rq: the request to finish
2834-
* @error: %0 for success, < %0 for error
2859+
* @error: block status code
28352860
*
28362861
* Description:
28372862
* Completely finish @rq.
28382863
*/
2839-
void blk_end_request_all(struct request *rq, int error)
2864+
void blk_end_request_all(struct request *rq, blk_status_t error)
28402865
{
28412866
bool pending;
28422867
unsigned int bidi_bytes = 0;
@@ -2852,7 +2877,7 @@ EXPORT_SYMBOL(blk_end_request_all);
28522877
/**
28532878
* __blk_end_request - Helper function for drivers to complete the request.
28542879
* @rq: the request being processed
2855-
* @error: %0 for success, < %0 for error
2880+
* @error: block status code
28562881
* @nr_bytes: number of bytes to complete
28572882
*
28582883
* Description:
@@ -2862,7 +2887,8 @@ EXPORT_SYMBOL(blk_end_request_all);
28622887
* %false - we are done with this request
28632888
* %true - still buffers pending for this request
28642889
**/
2865-
bool __blk_end_request(struct request *rq, int error, unsigned int nr_bytes)
2890+
bool __blk_end_request(struct request *rq, blk_status_t error,
2891+
unsigned int nr_bytes)
28662892
{
28672893
return __blk_end_bidi_request(rq, error, nr_bytes, 0);
28682894
}
@@ -2871,12 +2897,12 @@ EXPORT_SYMBOL(__blk_end_request);
28712897
/**
28722898
* __blk_end_request_all - Helper function for drives to finish the request.
28732899
* @rq: the request to finish
2874-
* @error: %0 for success, < %0 for error
2900+
* @error: block status code
28752901
*
28762902
* Description:
28772903
* Completely finish @rq. Must be called with queue lock held.
28782904
*/
2879-
void __blk_end_request_all(struct request *rq, int error)
2905+
void __blk_end_request_all(struct request *rq, blk_status_t error)
28802906
{
28812907
bool pending;
28822908
unsigned int bidi_bytes = 0;
@@ -2892,7 +2918,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
28922918
/**
28932919
* __blk_end_request_cur - Helper function to finish the current request chunk.
28942920
* @rq: the request to finish the current chunk for
2895-
* @error: %0 for success, < %0 for error
2921+
* @error: block status code
28962922
*
28972923
* Description:
28982924
* Complete the current consecutively mapped chunk from @rq. Must
@@ -2902,7 +2928,7 @@ EXPORT_SYMBOL(__blk_end_request_all);
29022928
* %false - we are done with this request
29032929
* %true - still buffers pending for this request
29042930
*/
2905-
bool __blk_end_request_cur(struct request *rq, int error)
2931+
bool __blk_end_request_cur(struct request *rq, blk_status_t error)
29062932
{
29072933
return __blk_end_request(rq, error, blk_rq_cur_bytes(rq));
29082934
}
@@ -3243,7 +3269,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
32433269
* Short-circuit if @q is dead
32443270
*/
32453271
if (unlikely(blk_queue_dying(q))) {
3246-
__blk_end_request_all(rq, -ENODEV);
3272+
__blk_end_request_all(rq, BLK_STS_IOERR);
32473273
continue;
32483274
}
32493275

block/blk-exec.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@
1616
* @rq: request to complete
1717
* @error: end I/O status of the request
1818
*/
19-
static void blk_end_sync_rq(struct request *rq, int error)
19+
static void blk_end_sync_rq(struct request *rq, blk_status_t error)
2020
{
2121
struct completion *waiting = rq->end_io_data;
2222

@@ -69,7 +69,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
6969

7070
if (unlikely(blk_queue_dying(q))) {
7171
rq->rq_flags |= RQF_QUIET;
72-
__blk_end_request_all(rq, -ENXIO);
72+
__blk_end_request_all(rq, BLK_STS_IOERR);
7373
spin_unlock_irq(q->queue_lock);
7474
return;
7575
}

0 commit comments

Comments
 (0)