@@ -160,12 +160,6 @@ struct ubd {
160
160
spinlock_t lock ;
161
161
};
162
162
163
- struct ubd_pdu {
164
- struct scatterlist sg [MAX_SG ];
165
- int start_sg , end_sg ;
166
- sector_t rq_pos ;
167
- };
168
-
169
163
#define DEFAULT_COW { \
170
164
.file = NULL, \
171
165
.fd = -1, \
@@ -197,9 +191,6 @@ static struct proc_dir_entry *proc_ide = NULL;
197
191
198
192
static blk_status_t ubd_queue_rq (struct blk_mq_hw_ctx * hctx ,
199
193
const struct blk_mq_queue_data * bd );
200
- static int ubd_init_request (struct blk_mq_tag_set * set ,
201
- struct request * req , unsigned int hctx_idx ,
202
- unsigned int numa_node );
203
194
204
195
static void make_proc_ide (void )
205
196
{
@@ -895,7 +886,6 @@ static int ubd_disk_register(int major, u64 size, int unit,
895
886
896
887
static const struct blk_mq_ops ubd_mq_ops = {
897
888
.queue_rq = ubd_queue_rq ,
898
- .init_request = ubd_init_request ,
899
889
};
900
890
901
891
static int ubd_add (int n , char * * error_out )
@@ -918,7 +908,6 @@ static int ubd_add(int n, char **error_out)
918
908
ubd_dev -> tag_set .queue_depth = 64 ;
919
909
ubd_dev -> tag_set .numa_node = NUMA_NO_NODE ;
920
910
ubd_dev -> tag_set .flags = BLK_MQ_F_SHOULD_MERGE ;
921
- ubd_dev -> tag_set .cmd_size = sizeof (struct ubd_pdu );
922
911
ubd_dev -> tag_set .driver_data = ubd_dev ;
923
912
ubd_dev -> tag_set .nr_hw_queues = 1 ;
924
913
@@ -1300,123 +1289,84 @@ static void cowify_req(struct io_thread_req *req, unsigned long *bitmap,
1300
1289
req -> bitmap_words , bitmap_len );
1301
1290
}
1302
1291
1303
- /* Called with dev->lock held */
1304
- static void prepare_request (struct request * req , struct io_thread_req * io_req ,
1305
- unsigned long long offset , int page_offset ,
1306
- int len , struct page * page )
1292
+ static int ubd_queue_one_vec (struct blk_mq_hw_ctx * hctx , struct request * req ,
1293
+ u64 off , struct bio_vec * bvec )
1307
1294
{
1308
- struct gendisk * disk = req -> rq_disk ;
1309
- struct ubd * ubd_dev = disk -> private_data ;
1310
-
1311
- io_req -> req = req ;
1312
- io_req -> fds [0 ] = (ubd_dev -> cow .file != NULL ) ? ubd_dev -> cow .fd :
1313
- ubd_dev -> fd ;
1314
- io_req -> fds [1 ] = ubd_dev -> fd ;
1315
- io_req -> cow_offset = -1 ;
1316
- io_req -> offset = offset ;
1317
- io_req -> length = len ;
1318
- io_req -> error = 0 ;
1319
- io_req -> sector_mask = 0 ;
1320
-
1321
- io_req -> op = (rq_data_dir (req ) == READ ) ? UBD_READ : UBD_WRITE ;
1322
- io_req -> offsets [0 ] = 0 ;
1323
- io_req -> offsets [1 ] = ubd_dev -> cow .data_offset ;
1324
- io_req -> buffer = page_address (page ) + page_offset ;
1325
- io_req -> sectorsize = 1 << 9 ;
1326
-
1327
- if (ubd_dev -> cow .file != NULL )
1328
- cowify_req (io_req , ubd_dev -> cow .bitmap ,
1329
- ubd_dev -> cow .bitmap_offset , ubd_dev -> cow .bitmap_len );
1330
-
1331
- }
1295
+ struct ubd * dev = hctx -> queue -> queuedata ;
1296
+ struct io_thread_req * io_req ;
1297
+ int ret ;
1332
1298
1333
- /* Called with dev->lock held */
1334
- static void prepare_flush_request (struct request * req ,
1335
- struct io_thread_req * io_req )
1336
- {
1337
- struct gendisk * disk = req -> rq_disk ;
1338
- struct ubd * ubd_dev = disk -> private_data ;
1299
+ io_req = kmalloc (sizeof (struct io_thread_req ), GFP_ATOMIC );
1300
+ if (!io_req )
1301
+ return - ENOMEM ;
1339
1302
1340
1303
io_req -> req = req ;
1341
- io_req -> fds [0 ] = (ubd_dev -> cow .file != NULL ) ? ubd_dev -> cow .fd :
1342
- ubd_dev -> fd ;
1343
- io_req -> op = UBD_FLUSH ;
1344
- }
1345
-
1346
- static void submit_request (struct io_thread_req * io_req , struct ubd * dev )
1347
- {
1348
- int n = os_write_file (thread_fd , & io_req ,
1349
- sizeof (io_req ));
1304
+ if (dev -> cow .file )
1305
+ io_req -> fds [0 ] = dev -> cow .fd ;
1306
+ else
1307
+ io_req -> fds [0 ] = dev -> fd ;
1350
1308
1351
- if (n != sizeof (io_req )) {
1352
- if (n != - EAGAIN )
1353
- pr_err ("write to io thread failed: %d\n" , - n );
1309
+ if (req_op (req ) == REQ_OP_FLUSH ) {
1310
+ io_req -> op = UBD_FLUSH ;
1311
+ } else {
1312
+ io_req -> fds [1 ] = dev -> fd ;
1313
+ io_req -> cow_offset = -1 ;
1314
+ io_req -> offset = off ;
1315
+ io_req -> length = bvec -> bv_len ;
1316
+ io_req -> error = 0 ;
1317
+ io_req -> sector_mask = 0 ;
1318
+
1319
+ io_req -> op = rq_data_dir (req ) == READ ? UBD_READ : UBD_WRITE ;
1320
+ io_req -> offsets [0 ] = 0 ;
1321
+ io_req -> offsets [1 ] = dev -> cow .data_offset ;
1322
+ io_req -> buffer = page_address (bvec -> bv_page ) + bvec -> bv_offset ;
1323
+ io_req -> sectorsize = 1 << 9 ;
1324
+
1325
+ if (dev -> cow .file ) {
1326
+ cowify_req (io_req , dev -> cow .bitmap ,
1327
+ dev -> cow .bitmap_offset , dev -> cow .bitmap_len );
1328
+ }
1329
+ }
1354
1330
1355
- blk_mq_requeue_request (io_req -> req , true);
1331
+ ret = os_write_file (thread_fd , & io_req , sizeof (io_req ));
1332
+ if (ret != sizeof (io_req )) {
1333
+ if (ret != - EAGAIN )
1334
+ pr_err ("write to io thread failed: %d\n" , - ret );
1356
1335
kfree (io_req );
1357
1336
}
1337
+
1338
+ return ret ;
1358
1339
}
1359
1340
1360
1341
static blk_status_t ubd_queue_rq (struct blk_mq_hw_ctx * hctx ,
1361
1342
const struct blk_mq_queue_data * bd )
1362
1343
{
1363
1344
struct request * req = bd -> rq ;
1364
- struct ubd * dev = hctx -> queue -> queuedata ;
1365
- struct ubd_pdu * pdu = blk_mq_rq_to_pdu (req );
1366
- struct io_thread_req * io_req ;
1345
+ int ret = 0 ;
1367
1346
1368
1347
blk_mq_start_request (req );
1369
1348
1370
- pdu -> rq_pos = blk_rq_pos (req );
1371
- pdu -> start_sg = 0 ;
1372
- pdu -> end_sg = blk_rq_map_sg (req -> q , req , pdu -> sg );
1373
-
1374
1349
if (req_op (req ) == REQ_OP_FLUSH ) {
1375
- io_req = kmalloc (sizeof (struct io_thread_req ), GFP_ATOMIC );
1376
- if (io_req == NULL ) {
1377
- blk_mq_requeue_request (req , true);
1378
- goto done ;
1350
+ ret = ubd_queue_one_vec (hctx , req , 0 , NULL );
1351
+ } else {
1352
+ struct req_iterator iter ;
1353
+ struct bio_vec bvec ;
1354
+ u64 off = (u64 )blk_rq_pos (req ) << 9 ;
1355
+
1356
+ rq_for_each_segment (bvec , req , iter ) {
1357
+ ret = ubd_queue_one_vec (hctx , req , off , & bvec );
1358
+ if (ret < 0 )
1359
+ goto out ;
1360
+ off += bvec .bv_len ;
1379
1361
}
1380
- prepare_flush_request (req , io_req );
1381
- submit_request (io_req , dev );
1382
-
1383
- goto done ;
1384
1362
}
1385
-
1386
- while (pdu -> start_sg < pdu -> end_sg ) {
1387
- struct scatterlist * sg = & pdu -> sg [pdu -> start_sg ];
1388
-
1389
- io_req = kmalloc (sizeof (struct io_thread_req ),
1390
- GFP_ATOMIC );
1391
- if (io_req == NULL ) {
1392
- blk_mq_requeue_request (req , true);
1393
- goto done ;
1394
- }
1395
- prepare_request (req , io_req ,
1396
- (unsigned long long )pdu -> rq_pos << 9 ,
1397
- sg -> offset , sg -> length , sg_page (sg ));
1398
-
1399
- submit_request (io_req , dev );
1400
-
1401
- pdu -> rq_pos += sg -> length >> 9 ;
1402
- pdu -> start_sg ++ ;
1363
+ out :
1364
+ if (ret < 0 ) {
1365
+ blk_mq_requeue_request (req , true);
1403
1366
}
1404
-
1405
- done :
1406
1367
return BLK_STS_OK ;
1407
1368
}
1408
1369
1409
- static int ubd_init_request (struct blk_mq_tag_set * set ,
1410
- struct request * req , unsigned int hctx_idx ,
1411
- unsigned int numa_node )
1412
- {
1413
- struct ubd_pdu * pdu = blk_mq_rq_to_pdu (req );
1414
-
1415
- sg_init_table (pdu -> sg , MAX_SG );
1416
-
1417
- return 0 ;
1418
- }
1419
-
1420
1370
static int ubd_getgeo (struct block_device * bdev , struct hd_geometry * geo )
1421
1371
{
1422
1372
struct ubd * ubd_dev = bdev -> bd_disk -> private_data ;
0 commit comments