@@ -222,7 +222,7 @@ static void nfs_end_page_writeback(struct page *page)
222
222
clear_bdi_congested (& nfss -> backing_dev_info , BLK_RW_ASYNC );
223
223
}
224
224
225
- static struct nfs_page * nfs_find_and_lock_request (struct page * page )
225
+ static struct nfs_page * nfs_find_and_lock_request (struct page * page , bool nonblock )
226
226
{
227
227
struct inode * inode = page -> mapping -> host ;
228
228
struct nfs_page * req ;
@@ -241,7 +241,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
241
241
* request as dirty (in which case we don't care).
242
242
*/
243
243
spin_unlock (& inode -> i_lock );
244
- ret = nfs_wait_on_request (req );
244
+ if (!nonblock )
245
+ ret = nfs_wait_on_request (req );
246
+ else
247
+ ret = - EAGAIN ;
245
248
nfs_release_request (req );
246
249
if (ret != 0 )
247
250
return ERR_PTR (ret );
@@ -256,12 +259,12 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page)
256
259
* May return an error if the user signalled nfs_wait_on_request().
257
260
*/
258
261
static int nfs_page_async_flush (struct nfs_pageio_descriptor * pgio ,
259
- struct page * page )
262
+ struct page * page , bool nonblock )
260
263
{
261
264
struct nfs_page * req ;
262
265
int ret = 0 ;
263
266
264
- req = nfs_find_and_lock_request (page );
267
+ req = nfs_find_and_lock_request (page , nonblock );
265
268
if (!req )
266
269
goto out ;
267
270
ret = PTR_ERR (req );
@@ -283,12 +286,20 @@ static int nfs_page_async_flush(struct nfs_pageio_descriptor *pgio,
283
286
static int nfs_do_writepage (struct page * page , struct writeback_control * wbc , struct nfs_pageio_descriptor * pgio )
284
287
{
285
288
struct inode * inode = page -> mapping -> host ;
289
+ int ret ;
286
290
287
291
nfs_inc_stats (inode , NFSIOS_VFSWRITEPAGE );
288
292
nfs_add_stats (inode , NFSIOS_WRITEPAGES , 1 );
289
293
290
294
nfs_pageio_cond_complete (pgio , page -> index );
291
- return nfs_page_async_flush (pgio , page );
295
+ ret = nfs_page_async_flush (pgio , page ,
296
+ wbc -> sync_mode == WB_SYNC_NONE ||
297
+ wbc -> nonblocking != 0 );
298
+ if (ret == - EAGAIN ) {
299
+ redirty_page_for_writepage (wbc , page );
300
+ ret = 0 ;
301
+ }
302
+ return ret ;
292
303
}
293
304
294
305
/*
@@ -1379,7 +1390,7 @@ static const struct rpc_call_ops nfs_commit_ops = {
1379
1390
.rpc_release = nfs_commit_release ,
1380
1391
};
1381
1392
1382
- static int nfs_commit_inode (struct inode * inode , int how )
1393
+ int nfs_commit_inode (struct inode * inode , int how )
1383
1394
{
1384
1395
LIST_HEAD (head );
1385
1396
int may_wait = how & FLUSH_SYNC ;
@@ -1443,7 +1454,7 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
1443
1454
return ret ;
1444
1455
}
1445
1456
#else
1446
- static int nfs_commit_inode (struct inode * inode , int how )
1457
+ int nfs_commit_inode (struct inode * inode , int how )
1447
1458
{
1448
1459
return 0 ;
1449
1460
}
@@ -1546,7 +1557,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1546
1557
1547
1558
nfs_fscache_release_page (page , GFP_KERNEL );
1548
1559
1549
- req = nfs_find_and_lock_request (page );
1560
+ req = nfs_find_and_lock_request (page , false );
1550
1561
ret = PTR_ERR (req );
1551
1562
if (IS_ERR (req ))
1552
1563
goto out ;
0 commit comments