@@ -134,19 +134,14 @@ EXPORT_SYMBOL_GPL(nfs_async_iocounter_wait);
134
134
/*
135
135
* nfs_page_group_lock - lock the head of the page group
136
136
* @req - request in group that is to be locked
137
- * @nonblock - if true don't block waiting for lock
138
137
*
139
- * this lock must be held if modifying the page group list
138
+ * this lock must be held when traversing or modifying the page
139
+ * group list
140
140
*
141
- * return 0 on success, < 0 on error: -EDELAY if nonblocking or the
142
- * result from wait_on_bit_lock
143
- *
144
- * NOTE: calling with nonblock=false should always have set the
145
- * lock bit (see fs/buffer.c and other uses of wait_on_bit_lock
146
- * with TASK_UNINTERRUPTIBLE), so there is no need to check the result.
141
+ * return 0 on success, < 0 on error
147
142
*/
148
143
int
149
- nfs_page_group_lock (struct nfs_page * req , bool nonblock )
144
+ nfs_page_group_lock (struct nfs_page * req )
150
145
{
151
146
struct nfs_page * head = req -> wb_head ;
152
147
@@ -155,14 +150,10 @@ nfs_page_group_lock(struct nfs_page *req, bool nonblock)
155
150
if (!test_and_set_bit (PG_HEADLOCK , & head -> wb_flags ))
156
151
return 0 ;
157
152
158
- if (!nonblock ) {
159
- set_bit (PG_CONTENDED1 , & head -> wb_flags );
160
- smp_mb__after_atomic ();
161
- return wait_on_bit_lock (& head -> wb_flags , PG_HEADLOCK ,
153
+ set_bit (PG_CONTENDED1 , & head -> wb_flags );
154
+ smp_mb__after_atomic ();
155
+ return wait_on_bit_lock (& head -> wb_flags , PG_HEADLOCK ,
162
156
TASK_UNINTERRUPTIBLE );
163
- }
164
-
165
- return - EAGAIN ;
166
157
}
167
158
168
159
/*
@@ -225,7 +216,7 @@ bool nfs_page_group_sync_on_bit(struct nfs_page *req, unsigned int bit)
225
216
{
226
217
bool ret ;
227
218
228
- nfs_page_group_lock (req , false );
219
+ nfs_page_group_lock (req );
229
220
ret = nfs_page_group_sync_on_bit_locked (req , bit );
230
221
nfs_page_group_unlock (req );
231
222
@@ -1016,7 +1007,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1016
1007
unsigned int bytes_left = 0 ;
1017
1008
unsigned int offset , pgbase ;
1018
1009
1019
- nfs_page_group_lock (req , false );
1010
+ nfs_page_group_lock (req );
1020
1011
1021
1012
subreq = req ;
1022
1013
bytes_left = subreq -> wb_bytes ;
@@ -1038,7 +1029,7 @@ static int __nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1038
1029
if (mirror -> pg_recoalesce )
1039
1030
return 0 ;
1040
1031
/* retry add_request for this subreq */
1041
- nfs_page_group_lock (req , false );
1032
+ nfs_page_group_lock (req );
1042
1033
continue ;
1043
1034
}
1044
1035
@@ -1135,7 +1126,7 @@ int nfs_pageio_add_request(struct nfs_pageio_descriptor *desc,
1135
1126
1136
1127
for (midx = 0 ; midx < desc -> pg_mirror_count ; midx ++ ) {
1137
1128
if (midx ) {
1138
- nfs_page_group_lock (req , false );
1129
+ nfs_page_group_lock (req );
1139
1130
1140
1131
/* find the last request */
1141
1132
for (lastreq = req -> wb_head ;
0 commit comments