Skip to content

Commit e0639dc

Browse files
olgakorn1J. Bruce Fields
authored andcommitted
NFSD introduce async copy feature
Upon receiving a request for async copy, create a new kthread. If we get asynchronous request, make sure to copy the needed arguments/state from the stack before starting the copy. Then start the thread and reply back to the client indicating copy is asynchronous. nfsd_copy_file_range() will copy in a loop over the total number of bytes is needed to copy. In case a failure happens in the middle, we ignore the error and return how much we copied so far. Once done creating a workitem for the callback workqueue and send CB_OFFLOAD with the results. The lifetime of the copy stateid is bound to the vfs copy. This way we don't need to keep the nfsd_net structure for the callback. We could keep it around longer so that an OFFLOAD_STATUS that came late would still get results, but clients should be able to deal without that. We handle OFFLOAD_CANCEL by sending a signal to the copy thread and calling kthread_stop. A client should cancel any ongoing copies before calling DESTROY_CLIENT; if not, we return a CLIENT_BUSY error. If the client is destroyed for some other reason (lease expiration, or server shutdown), we must clean up any ongoing copies ourselves. Signed-off-by: Olga Kornievskaia <kolga@netapp.com> [colin.king@canonical.com: fix leak in error case] [bfields@fieldses.org: remove signalling, merge patches] Signed-off-by: J. Bruce Fields <bfields@redhat.com>
1 parent 885e2bf commit e0639dc

File tree

7 files changed

+326
-24
lines changed

7 files changed

+326
-24
lines changed

fs/nfsd/netns.h

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,14 @@ struct nfsd_net {
123123

124124
wait_queue_head_t ntf_wq;
125125
atomic_t ntf_refcnt;
126+
127+
/*
128+
* clientid and stateid data for construction of net unique COPY
129+
* stateids.
130+
*/
131+
u32 s2s_cp_cl_id;
132+
struct idr s2s_cp_stateids;
133+
spinlock_t s2s_cp_lock;
126134
};
127135

128136
/* Simple check to find out if a given net was properly initialized */

fs/nfsd/nfs4proc.c

Lines changed: 242 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@
3636
#include <linux/file.h>
3737
#include <linux/falloc.h>
3838
#include <linux/slab.h>
39+
#include <linux/kthread.h>
3940

4041
#include "idmap.h"
4142
#include "cache.h"
@@ -1089,45 +1090,255 @@ nfsd4_clone(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
10891090
return status;
10901091
}
10911092

1093+
void nfs4_put_copy(struct nfsd4_copy *copy)
1094+
{
1095+
if (!refcount_dec_and_test(&copy->refcount))
1096+
return;
1097+
kfree(copy);
1098+
}
1099+
1100+
static bool
1101+
check_and_set_stop_copy(struct nfsd4_copy *copy)
1102+
{
1103+
bool value;
1104+
1105+
spin_lock(&copy->cp_clp->async_lock);
1106+
value = copy->stopped;
1107+
if (!copy->stopped)
1108+
copy->stopped = true;
1109+
spin_unlock(&copy->cp_clp->async_lock);
1110+
return value;
1111+
}
1112+
1113+
static void nfsd4_stop_copy(struct nfsd4_copy *copy)
1114+
{
1115+
/* only 1 thread should stop the copy */
1116+
if (!check_and_set_stop_copy(copy))
1117+
kthread_stop(copy->copy_task);
1118+
nfs4_put_copy(copy);
1119+
}
1120+
1121+
static struct nfsd4_copy *nfsd4_get_copy(struct nfs4_client *clp)
1122+
{
1123+
struct nfsd4_copy *copy = NULL;
1124+
1125+
spin_lock(&clp->async_lock);
1126+
if (!list_empty(&clp->async_copies)) {
1127+
copy = list_first_entry(&clp->async_copies, struct nfsd4_copy,
1128+
copies);
1129+
refcount_inc(&copy->refcount);
1130+
}
1131+
spin_unlock(&clp->async_lock);
1132+
return copy;
1133+
}
1134+
1135+
void nfsd4_shutdown_copy(struct nfs4_client *clp)
1136+
{
1137+
struct nfsd4_copy *copy;
1138+
1139+
while ((copy = nfsd4_get_copy(clp)) != NULL)
1140+
nfsd4_stop_copy(copy);
1141+
}
1142+
1143+
static void nfsd4_cb_offload_release(struct nfsd4_callback *cb)
1144+
{
1145+
struct nfsd4_copy *copy = container_of(cb, struct nfsd4_copy, cp_cb);
1146+
1147+
nfs4_put_copy(copy);
1148+
}
1149+
1150+
static int nfsd4_cb_offload_done(struct nfsd4_callback *cb,
1151+
struct rpc_task *task)
1152+
{
1153+
return 1;
1154+
}
1155+
1156+
static const struct nfsd4_callback_ops nfsd4_cb_offload_ops = {
1157+
.release = nfsd4_cb_offload_release,
1158+
.done = nfsd4_cb_offload_done
1159+
};
1160+
1161+
static void nfsd4_init_copy_res(struct nfsd4_copy *copy, bool sync)
1162+
{
1163+
copy->cp_res.wr_stable_how = NFS_UNSTABLE;
1164+
copy->cp_synchronous = sync;
1165+
gen_boot_verifier(&copy->cp_res.wr_verifier, copy->cp_clp->net);
1166+
}
1167+
1168+
static ssize_t _nfsd_copy_file_range(struct nfsd4_copy *copy)
1169+
{
1170+
ssize_t bytes_copied = 0;
1171+
size_t bytes_total = copy->cp_count;
1172+
u64 src_pos = copy->cp_src_pos;
1173+
u64 dst_pos = copy->cp_dst_pos;
1174+
1175+
do {
1176+
if (kthread_should_stop())
1177+
break;
1178+
bytes_copied = nfsd_copy_file_range(copy->file_src, src_pos,
1179+
copy->file_dst, dst_pos, bytes_total);
1180+
if (bytes_copied <= 0)
1181+
break;
1182+
bytes_total -= bytes_copied;
1183+
copy->cp_res.wr_bytes_written += bytes_copied;
1184+
src_pos += bytes_copied;
1185+
dst_pos += bytes_copied;
1186+
} while (bytes_total > 0 && !copy->cp_synchronous);
1187+
return bytes_copied;
1188+
}
1189+
1190+
static __be32 nfsd4_do_copy(struct nfsd4_copy *copy, bool sync)
1191+
{
1192+
__be32 status;
1193+
ssize_t bytes;
1194+
1195+
bytes = _nfsd_copy_file_range(copy);
1196+
/* for async copy, we ignore the error, client can always retry
1197+
* to get the error
1198+
*/
1199+
if (bytes < 0 && !copy->cp_res.wr_bytes_written)
1200+
status = nfserrno(bytes);
1201+
else {
1202+
nfsd4_init_copy_res(copy, sync);
1203+
status = nfs_ok;
1204+
}
1205+
1206+
fput(copy->file_src);
1207+
fput(copy->file_dst);
1208+
return status;
1209+
}
1210+
1211+
static void dup_copy_fields(struct nfsd4_copy *src, struct nfsd4_copy *dst)
1212+
{
1213+
dst->cp_src_pos = src->cp_src_pos;
1214+
dst->cp_dst_pos = src->cp_dst_pos;
1215+
dst->cp_count = src->cp_count;
1216+
dst->cp_synchronous = src->cp_synchronous;
1217+
memcpy(&dst->cp_res, &src->cp_res, sizeof(src->cp_res));
1218+
memcpy(&dst->fh, &src->fh, sizeof(src->fh));
1219+
dst->cp_clp = src->cp_clp;
1220+
dst->file_dst = get_file(src->file_dst);
1221+
dst->file_src = get_file(src->file_src);
1222+
memcpy(&dst->cp_stateid, &src->cp_stateid, sizeof(src->cp_stateid));
1223+
}
1224+
1225+
static void cleanup_async_copy(struct nfsd4_copy *copy)
1226+
{
1227+
nfs4_free_cp_state(copy);
1228+
fput(copy->file_dst);
1229+
fput(copy->file_src);
1230+
spin_lock(&copy->cp_clp->async_lock);
1231+
list_del(&copy->copies);
1232+
spin_unlock(&copy->cp_clp->async_lock);
1233+
nfs4_put_copy(copy);
1234+
}
1235+
1236+
static int nfsd4_do_async_copy(void *data)
1237+
{
1238+
struct nfsd4_copy *copy = (struct nfsd4_copy *)data;
1239+
struct nfsd4_copy *cb_copy;
1240+
1241+
copy->nfserr = nfsd4_do_copy(copy, 0);
1242+
cb_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
1243+
if (!cb_copy)
1244+
goto out;
1245+
memcpy(&cb_copy->cp_res, &copy->cp_res, sizeof(copy->cp_res));
1246+
cb_copy->cp_clp = copy->cp_clp;
1247+
cb_copy->nfserr = copy->nfserr;
1248+
memcpy(&cb_copy->fh, &copy->fh, sizeof(copy->fh));
1249+
nfsd4_init_cb(&cb_copy->cp_cb, cb_copy->cp_clp,
1250+
&nfsd4_cb_offload_ops, NFSPROC4_CLNT_CB_OFFLOAD);
1251+
nfsd4_run_cb(&cb_copy->cp_cb);
1252+
out:
1253+
cleanup_async_copy(copy);
1254+
return 0;
1255+
}
1256+
10921257
static __be32
10931258
nfsd4_copy(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
10941259
union nfsd4_op_u *u)
10951260
{
10961261
struct nfsd4_copy *copy = &u->copy;
1097-
struct file *src, *dst;
10981262
__be32 status;
1099-
ssize_t bytes;
1263+
struct nfsd4_copy *async_copy = NULL;
11001264

1101-
status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid, &src,
1102-
&copy->cp_dst_stateid, &dst);
1265+
status = nfsd4_verify_copy(rqstp, cstate, &copy->cp_src_stateid,
1266+
&copy->file_src, &copy->cp_dst_stateid,
1267+
&copy->file_dst);
11031268
if (status)
11041269
goto out;
11051270

1106-
bytes = nfsd_copy_file_range(src, copy->cp_src_pos,
1107-
dst, copy->cp_dst_pos, copy->cp_count);
1271+
copy->cp_clp = cstate->clp;
1272+
memcpy(&copy->fh, &cstate->current_fh.fh_handle,
1273+
sizeof(struct knfsd_fh));
1274+
if (!copy->cp_synchronous) {
1275+
struct nfsd_net *nn = net_generic(SVC_NET(rqstp), nfsd_net_id);
11081276

1109-
if (bytes < 0)
1110-
status = nfserrno(bytes);
1111-
else {
1112-
copy->cp_res.wr_bytes_written = bytes;
1113-
copy->cp_res.wr_stable_how = NFS_UNSTABLE;
1114-
copy->cp_synchronous = 1;
1115-
gen_boot_verifier(&copy->cp_res.wr_verifier, SVC_NET(rqstp));
1277+
status = nfserrno(-ENOMEM);
1278+
async_copy = kzalloc(sizeof(struct nfsd4_copy), GFP_KERNEL);
1279+
if (!async_copy)
1280+
goto out;
1281+
if (!nfs4_init_cp_state(nn, copy)) {
1282+
kfree(async_copy);
1283+
goto out;
1284+
}
1285+
refcount_set(&async_copy->refcount, 1);
1286+
memcpy(&copy->cp_res.cb_stateid, &copy->cp_stateid,
1287+
sizeof(copy->cp_stateid));
1288+
dup_copy_fields(copy, async_copy);
1289+
async_copy->copy_task = kthread_create(nfsd4_do_async_copy,
1290+
async_copy, "%s", "copy thread");
1291+
if (IS_ERR(async_copy->copy_task))
1292+
goto out_err;
1293+
spin_lock(&async_copy->cp_clp->async_lock);
1294+
list_add(&async_copy->copies,
1295+
&async_copy->cp_clp->async_copies);
1296+
spin_unlock(&async_copy->cp_clp->async_lock);
1297+
wake_up_process(async_copy->copy_task);
11161298
status = nfs_ok;
1117-
}
1118-
1119-
fput(src);
1120-
fput(dst);
1299+
} else
1300+
status = nfsd4_do_copy(copy, 1);
11211301
out:
11221302
return status;
1303+
out_err:
1304+
cleanup_async_copy(async_copy);
1305+
goto out;
1306+
}
1307+
1308+
struct nfsd4_copy *
1309+
find_async_copy(struct nfs4_client *clp, stateid_t *stateid)
1310+
{
1311+
struct nfsd4_copy *copy;
1312+
1313+
spin_lock(&clp->async_lock);
1314+
list_for_each_entry(copy, &clp->async_copies, copies) {
1315+
if (memcmp(&copy->cp_stateid, stateid, NFS4_STATEID_SIZE))
1316+
continue;
1317+
refcount_inc(&copy->refcount);
1318+
spin_unlock(&clp->async_lock);
1319+
return copy;
1320+
}
1321+
spin_unlock(&clp->async_lock);
1322+
return NULL;
11231323
}
11241324

11251325
static __be32
11261326
nfsd4_offload_cancel(struct svc_rqst *rqstp,
11271327
struct nfsd4_compound_state *cstate,
11281328
union nfsd4_op_u *u)
11291329
{
1130-
return 0;
1330+
struct nfsd4_offload_status *os = &u->offload_status;
1331+
__be32 status = 0;
1332+
struct nfsd4_copy *copy;
1333+
struct nfs4_client *clp = cstate->clp;
1334+
1335+
copy = find_async_copy(clp, &os->stateid);
1336+
if (copy)
1337+
nfsd4_stop_copy(copy);
1338+
else
1339+
status = nfserr_bad_stateid;
1340+
1341+
return status;
11311342
}
11321343

11331344
static __be32
@@ -1157,7 +1368,19 @@ nfsd4_offload_status(struct svc_rqst *rqstp,
11571368
struct nfsd4_compound_state *cstate,
11581369
union nfsd4_op_u *u)
11591370
{
1160-
return nfserr_notsupp;
1371+
struct nfsd4_offload_status *os = &u->offload_status;
1372+
__be32 status = 0;
1373+
struct nfsd4_copy *copy;
1374+
struct nfs4_client *clp = cstate->clp;
1375+
1376+
copy = find_async_copy(clp, &os->stateid);
1377+
if (copy) {
1378+
os->count = copy->cp_res.wr_bytes_written;
1379+
nfs4_put_copy(copy);
1380+
} else
1381+
status = nfserr_bad_stateid;
1382+
1383+
return status;
11611384
}
11621385

11631386
static __be32

fs/nfsd/nfs4state.c

Lines changed: 37 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -713,6 +713,36 @@ struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl, struct kmem_cache *sla
713713
return NULL;
714714
}
715715

716+
/*
717+
* Create a unique stateid_t to represent each COPY.
718+
*/
719+
int nfs4_init_cp_state(struct nfsd_net *nn, struct nfsd4_copy *copy)
720+
{
721+
int new_id;
722+
723+
idr_preload(GFP_KERNEL);
724+
spin_lock(&nn->s2s_cp_lock);
725+
new_id = idr_alloc_cyclic(&nn->s2s_cp_stateids, copy, 0, 0, GFP_NOWAIT);
726+
spin_unlock(&nn->s2s_cp_lock);
727+
idr_preload_end();
728+
if (new_id < 0)
729+
return 0;
730+
copy->cp_stateid.si_opaque.so_id = new_id;
731+
copy->cp_stateid.si_opaque.so_clid.cl_boot = nn->boot_time;
732+
copy->cp_stateid.si_opaque.so_clid.cl_id = nn->s2s_cp_cl_id;
733+
return 1;
734+
}
735+
736+
void nfs4_free_cp_state(struct nfsd4_copy *copy)
737+
{
738+
struct nfsd_net *nn;
739+
740+
nn = net_generic(copy->cp_clp->net, nfsd_net_id);
741+
spin_lock(&nn->s2s_cp_lock);
742+
idr_remove(&nn->s2s_cp_stateids, copy->cp_stateid.si_opaque.so_id);
743+
spin_unlock(&nn->s2s_cp_lock);
744+
}
745+
716746
static struct nfs4_ol_stateid * nfs4_alloc_open_stateid(struct nfs4_client *clp)
717747
{
718748
struct nfs4_stid *stid;
@@ -1827,6 +1857,8 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
18271857
#ifdef CONFIG_NFSD_PNFS
18281858
INIT_LIST_HEAD(&clp->cl_lo_states);
18291859
#endif
1860+
INIT_LIST_HEAD(&clp->async_copies);
1861+
spin_lock_init(&clp->async_lock);
18301862
spin_lock_init(&clp->cl_lock);
18311863
rpc_init_wait_queue(&clp->cl_cb_waitq, "Backchannel slot table");
18321864
return clp;
@@ -1942,6 +1974,7 @@ __destroy_client(struct nfs4_client *clp)
19421974
}
19431975
}
19441976
nfsd4_return_all_client_layouts(clp);
1977+
nfsd4_shutdown_copy(clp);
19451978
nfsd4_shutdown_callback(clp);
19461979
if (clp->cl_cb_conn.cb_xprt)
19471980
svc_xprt_put(clp->cl_cb_conn.cb_xprt);
@@ -2475,7 +2508,8 @@ static bool client_has_state(struct nfs4_client *clp)
24752508
|| !list_empty(&clp->cl_lo_states)
24762509
#endif
24772510
|| !list_empty(&clp->cl_delegations)
2478-
|| !list_empty(&clp->cl_sessions);
2511+
|| !list_empty(&clp->cl_sessions)
2512+
|| !list_empty(&clp->async_copies);
24792513
}
24802514

24812515
__be32
@@ -7161,6 +7195,8 @@ static int nfs4_state_create_net(struct net *net)
71617195
INIT_LIST_HEAD(&nn->close_lru);
71627196
INIT_LIST_HEAD(&nn->del_recall_lru);
71637197
spin_lock_init(&nn->client_lock);
7198+
spin_lock_init(&nn->s2s_cp_lock);
7199+
idr_init(&nn->s2s_cp_stateids);
71647200

71657201
spin_lock_init(&nn->blocked_locks_lock);
71667202
INIT_LIST_HEAD(&nn->blocked_locks_lru);

0 commit comments

Comments
 (0)