47
47
48
48
static const struct vm_operations_struct xfs_file_vm_ops ;
49
49
50
- /*
51
- * Locking primitives for read and write IO paths to ensure we consistently use
52
- * and order the inode->i_mutex, ip->i_lock and ip->i_iolock.
53
- */
54
- static inline void
55
- xfs_rw_ilock (
56
- struct xfs_inode * ip ,
57
- int type )
58
- {
59
- if (type & XFS_IOLOCK_EXCL )
60
- inode_lock (VFS_I (ip ));
61
- xfs_ilock (ip , type );
62
- }
63
-
64
- static inline void
65
- xfs_rw_iunlock (
66
- struct xfs_inode * ip ,
67
- int type )
68
- {
69
- xfs_iunlock (ip , type );
70
- if (type & XFS_IOLOCK_EXCL )
71
- inode_unlock (VFS_I (ip ));
72
- }
73
-
74
- static inline void
75
- xfs_rw_ilock_demote (
76
- struct xfs_inode * ip ,
77
- int type )
78
- {
79
- xfs_ilock_demote (ip , type );
80
- if (type & XFS_IOLOCK_EXCL )
81
- inode_unlock (VFS_I (ip ));
82
- }
83
-
84
50
/*
85
51
* Clear the specified ranges to zero through either the pagecache or DAX.
86
52
* Holes and unwritten extents will be left as-is as they already are zeroed.
@@ -273,7 +239,7 @@ xfs_file_dio_aio_read(
273
239
274
240
file_accessed (iocb -> ki_filp );
275
241
276
- xfs_rw_ilock (ip , XFS_IOLOCK_SHARED );
242
+ xfs_ilock (ip , XFS_IOLOCK_SHARED );
277
243
if (mapping -> nrpages ) {
278
244
ret = filemap_write_and_wait_range (mapping , iocb -> ki_pos , end );
279
245
if (ret )
@@ -299,7 +265,7 @@ xfs_file_dio_aio_read(
299
265
}
300
266
301
267
out_unlock :
302
- xfs_rw_iunlock (ip , XFS_IOLOCK_SHARED );
268
+ xfs_iunlock (ip , XFS_IOLOCK_SHARED );
303
269
return ret ;
304
270
}
305
271
@@ -317,9 +283,9 @@ xfs_file_dax_read(
317
283
if (!count )
318
284
return 0 ; /* skip atime */
319
285
320
- xfs_rw_ilock (ip , XFS_IOLOCK_SHARED );
286
+ xfs_ilock (ip , XFS_IOLOCK_SHARED );
321
287
ret = dax_iomap_rw (iocb , to , & xfs_iomap_ops );
322
- xfs_rw_iunlock (ip , XFS_IOLOCK_SHARED );
288
+ xfs_iunlock (ip , XFS_IOLOCK_SHARED );
323
289
324
290
file_accessed (iocb -> ki_filp );
325
291
return ret ;
@@ -335,9 +301,9 @@ xfs_file_buffered_aio_read(
335
301
336
302
trace_xfs_file_buffered_read (ip , iov_iter_count (to ), iocb -> ki_pos );
337
303
338
- xfs_rw_ilock (ip , XFS_IOLOCK_SHARED );
304
+ xfs_ilock (ip , XFS_IOLOCK_SHARED );
339
305
ret = generic_file_read_iter (iocb , to );
340
- xfs_rw_iunlock (ip , XFS_IOLOCK_SHARED );
306
+ xfs_iunlock (ip , XFS_IOLOCK_SHARED );
341
307
342
308
return ret ;
343
309
}
@@ -418,15 +384,18 @@ xfs_file_aio_write_checks(
418
384
if (error <= 0 )
419
385
return error ;
420
386
421
- error = xfs_break_layouts (inode , iolock , true );
387
+ error = xfs_break_layouts (inode , iolock );
422
388
if (error )
423
389
return error ;
424
390
425
- /* For changing security info in file_remove_privs() we need i_mutex */
391
+ /*
392
+ * For changing security info in file_remove_privs() we need i_rwsem
393
+ * exclusively.
394
+ */
426
395
if (* iolock == XFS_IOLOCK_SHARED && !IS_NOSEC (inode )) {
427
- xfs_rw_iunlock (ip , * iolock );
396
+ xfs_iunlock (ip , * iolock );
428
397
* iolock = XFS_IOLOCK_EXCL ;
429
- xfs_rw_ilock (ip , * iolock );
398
+ xfs_ilock (ip , * iolock );
430
399
goto restart ;
431
400
}
432
401
/*
@@ -451,9 +420,9 @@ xfs_file_aio_write_checks(
451
420
spin_unlock (& ip -> i_flags_lock );
452
421
if (!drained_dio ) {
453
422
if (* iolock == XFS_IOLOCK_SHARED ) {
454
- xfs_rw_iunlock (ip , * iolock );
423
+ xfs_iunlock (ip , * iolock );
455
424
* iolock = XFS_IOLOCK_EXCL ;
456
- xfs_rw_ilock (ip , * iolock );
425
+ xfs_ilock (ip , * iolock );
457
426
iov_iter_reexpand (from , count );
458
427
}
459
428
/*
@@ -559,7 +528,7 @@ xfs_file_dio_aio_write(
559
528
iolock = XFS_IOLOCK_SHARED ;
560
529
}
561
530
562
- xfs_rw_ilock (ip , iolock );
531
+ xfs_ilock (ip , iolock );
563
532
564
533
ret = xfs_file_aio_write_checks (iocb , from , & iolock );
565
534
if (ret )
@@ -591,7 +560,7 @@ xfs_file_dio_aio_write(
591
560
if (unaligned_io )
592
561
inode_dio_wait (inode );
593
562
else if (iolock == XFS_IOLOCK_EXCL ) {
594
- xfs_rw_ilock_demote (ip , XFS_IOLOCK_EXCL );
563
+ xfs_ilock_demote (ip , XFS_IOLOCK_EXCL );
595
564
iolock = XFS_IOLOCK_SHARED ;
596
565
}
597
566
@@ -621,7 +590,7 @@ xfs_file_dio_aio_write(
621
590
iov_iter_advance (from , ret );
622
591
}
623
592
out :
624
- xfs_rw_iunlock (ip , iolock );
593
+ xfs_iunlock (ip , iolock );
625
594
626
595
/*
627
596
* No fallback to buffered IO on errors for XFS, direct IO will either
@@ -643,7 +612,7 @@ xfs_file_dax_write(
643
612
size_t count ;
644
613
loff_t pos ;
645
614
646
- xfs_rw_ilock (ip , iolock );
615
+ xfs_ilock (ip , iolock );
647
616
ret = xfs_file_aio_write_checks (iocb , from , & iolock );
648
617
if (ret )
649
618
goto out ;
@@ -652,15 +621,13 @@ xfs_file_dax_write(
652
621
count = iov_iter_count (from );
653
622
654
623
trace_xfs_file_dax_write (ip , count , pos );
655
-
656
624
ret = dax_iomap_rw (iocb , from , & xfs_iomap_ops );
657
625
if (ret > 0 && iocb -> ki_pos > i_size_read (inode )) {
658
626
i_size_write (inode , iocb -> ki_pos );
659
627
error = xfs_setfilesize (ip , pos , ret );
660
628
}
661
-
662
629
out :
663
- xfs_rw_iunlock (ip , iolock );
630
+ xfs_iunlock (ip , iolock );
664
631
return error ? error : ret ;
665
632
}
666
633
@@ -677,7 +644,7 @@ xfs_file_buffered_aio_write(
677
644
int enospc = 0 ;
678
645
int iolock = XFS_IOLOCK_EXCL ;
679
646
680
- xfs_rw_ilock (ip , iolock );
647
+ xfs_ilock (ip , iolock );
681
648
682
649
ret = xfs_file_aio_write_checks (iocb , from , & iolock );
683
650
if (ret )
@@ -721,7 +688,7 @@ xfs_file_buffered_aio_write(
721
688
722
689
current -> backing_dev_info = NULL ;
723
690
out :
724
- xfs_rw_iunlock (ip , iolock );
691
+ xfs_iunlock (ip , iolock );
725
692
return ret ;
726
693
}
727
694
@@ -797,7 +764,7 @@ xfs_file_fallocate(
797
764
return - EOPNOTSUPP ;
798
765
799
766
xfs_ilock (ip , iolock );
800
- error = xfs_break_layouts (inode , & iolock , false );
767
+ error = xfs_break_layouts (inode , & iolock );
801
768
if (error )
802
769
goto out_unlock ;
803
770
0 commit comments