Skip to content

Commit 3a48250

Browse files
committed
rbd: introduce rbd_obj_issue_copyup_ops()
In preparation for deep-flatten feature, split rbd_obj_issue_copyup() into two functions and add a new write state to make the state machine slightly more clear. Make the copyup op optional and start using that for when the overlap goes to 0. Signed-off-by: Ilya Dryomov <idryomov@gmail.com>
1 parent 13488d5 commit 3a48250

File tree

1 file changed

+43
-33
lines changed

1 file changed

+43
-33
lines changed

drivers/block/rbd.c

Lines changed: 43 additions & 33 deletions
Original file line numberDiff line numberDiff line change
@@ -236,7 +236,8 @@ enum obj_operation_type {
236236
enum rbd_obj_write_state {
237237
RBD_OBJ_WRITE_FLAT = 1,
238238
RBD_OBJ_WRITE_GUARD,
239-
RBD_OBJ_WRITE_COPYUP,
239+
RBD_OBJ_WRITE_READ_FROM_PARENT,
240+
RBD_OBJ_WRITE_COPYUP_OPS,
240241
};
241242

242243
struct rbd_obj_request {
@@ -2458,10 +2459,13 @@ static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
24582459
return true;
24592460
}
24602461

2461-
static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2462+
#define MODS_ONLY U32_MAX
2463+
2464+
static int rbd_obj_issue_copyup_ops(struct rbd_obj_request *obj_req, u32 bytes)
24622465
{
24632466
struct rbd_img_request *img_req = obj_req->img_request;
2464-
unsigned int num_osd_ops = 1;
2467+
unsigned int num_osd_ops = (bytes != MODS_ONLY);
2468+
unsigned int which = 0;
24652469
int ret;
24662470

24672471
dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
@@ -2483,31 +2487,25 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
24832487
if (!obj_req->osd_req)
24842488
return -ENOMEM;
24852489

2486-
ret = osd_req_op_cls_init(obj_req->osd_req, 0, "rbd", "copyup");
2487-
if (ret)
2488-
return ret;
2490+
if (bytes != MODS_ONLY) {
2491+
ret = osd_req_op_cls_init(obj_req->osd_req, which, "rbd",
2492+
"copyup");
2493+
if (ret)
2494+
return ret;
24892495

2490-
/*
2491-
* Only send non-zero copyup data to save some I/O and network
2492-
* bandwidth -- zero copyup data is equivalent to the object not
2493-
* existing.
2494-
*/
2495-
if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2496-
dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2497-
bytes = 0;
2496+
osd_req_op_cls_request_data_bvecs(obj_req->osd_req, which++,
2497+
obj_req->copyup_bvecs,
2498+
obj_req->copyup_bvec_count,
2499+
bytes);
24982500
}
2499-
osd_req_op_cls_request_data_bvecs(obj_req->osd_req, 0,
2500-
obj_req->copyup_bvecs,
2501-
obj_req->copyup_bvec_count,
2502-
bytes);
25032501

25042502
switch (img_req->op_type) {
25052503
case OBJ_OP_WRITE:
2506-
__rbd_obj_setup_write(obj_req, 1);
2504+
__rbd_obj_setup_write(obj_req, which);
25072505
break;
25082506
case OBJ_OP_ZEROOUT:
25092507
rbd_assert(!rbd_obj_is_entire(obj_req));
2510-
__rbd_obj_setup_zeroout(obj_req, 1);
2508+
__rbd_obj_setup_zeroout(obj_req, which);
25112509
break;
25122510
default:
25132511
rbd_assert(0);
@@ -2521,6 +2519,22 @@ static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
25212519
return 0;
25222520
}
25232521

2522+
static int rbd_obj_issue_copyup(struct rbd_obj_request *obj_req, u32 bytes)
2523+
{
2524+
/*
2525+
* Only send non-zero copyup data to save some I/O and network
2526+
* bandwidth -- zero copyup data is equivalent to the object not
2527+
* existing.
2528+
*/
2529+
if (is_zero_bvecs(obj_req->copyup_bvecs, bytes)) {
2530+
dout("%s obj_req %p detected zeroes\n", __func__, obj_req);
2531+
bytes = 0;
2532+
}
2533+
2534+
obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS;
2535+
return rbd_obj_issue_copyup_ops(obj_req, bytes);
2536+
}
2537+
25242538
static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
25252539
{
25262540
u32 i;
@@ -2560,30 +2574,26 @@ static int rbd_obj_handle_write_guard(struct rbd_obj_request *obj_req)
25602574
if (!obj_req->num_img_extents) {
25612575
/*
25622576
* The overlap has become 0 (most likely because the
2563-
* image has been flattened). Use rbd_obj_issue_copyup()
2564-
* to re-submit the original write request -- the copyup
2565-
* operation itself will be a no-op, since someone must
2566-
* have populated the child object while we weren't
2567-
* looking. Move to WRITE_FLAT state as we'll be done
2568-
* with the operation once the null copyup completes.
2577+
* image has been flattened). Re-submit the original write
2578+
* request -- pass MODS_ONLY since the copyup isn't needed
2579+
* anymore.
25692580
*/
2570-
obj_req->write_state = RBD_OBJ_WRITE_FLAT;
2571-
return rbd_obj_issue_copyup(obj_req, 0);
2581+
obj_req->write_state = RBD_OBJ_WRITE_COPYUP_OPS;
2582+
return rbd_obj_issue_copyup_ops(obj_req, MODS_ONLY);
25722583
}
25732584

25742585
ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
25752586
if (ret)
25762587
return ret;
25772588

2578-
obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
2589+
obj_req->write_state = RBD_OBJ_WRITE_READ_FROM_PARENT;
25792590
return rbd_obj_read_from_parent(obj_req);
25802591
}
25812592

25822593
static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
25832594
{
25842595
int ret;
25852596

2586-
again:
25872597
switch (obj_req->write_state) {
25882598
case RBD_OBJ_WRITE_GUARD:
25892599
rbd_assert(!obj_req->xferred);
@@ -2602,17 +2612,17 @@ static bool rbd_obj_handle_write(struct rbd_obj_request *obj_req)
26022612
}
26032613
/* fall through */
26042614
case RBD_OBJ_WRITE_FLAT:
2615+
case RBD_OBJ_WRITE_COPYUP_OPS:
26052616
if (!obj_req->result)
26062617
/*
26072618
* There is no such thing as a successful short
26082619
* write -- indicate the whole request was satisfied.
26092620
*/
26102621
obj_req->xferred = obj_req->ex.oe_len;
26112622
return true;
2612-
case RBD_OBJ_WRITE_COPYUP:
2613-
obj_req->write_state = RBD_OBJ_WRITE_GUARD;
2623+
case RBD_OBJ_WRITE_READ_FROM_PARENT:
26142624
if (obj_req->result)
2615-
goto again;
2625+
return true;
26162626

26172627
rbd_assert(obj_req->xferred);
26182628
ret = rbd_obj_issue_copyup(obj_req, obj_req->xferred);

0 commit comments

Comments
 (0)