@@ -79,6 +79,19 @@ xfs_find_daxdev_for_inode(
79
79
return mp -> m_ddev_targp -> bt_daxdev ;
80
80
}
81
81
82
+ static void
83
+ xfs_finish_page_writeback (
84
+ struct inode * inode ,
85
+ struct bio_vec * bvec ,
86
+ int error )
87
+ {
88
+ if (error ) {
89
+ SetPageError (bvec -> bv_page );
90
+ mapping_set_error (inode -> i_mapping , - EIO );
91
+ }
92
+ end_page_writeback (bvec -> bv_page );
93
+ }
94
+
82
95
/*
83
96
* We're now finished for good with this page. Update the page state via the
84
97
* associated buffer_heads, paying attention to the start and end offsets that
@@ -91,7 +104,7 @@ xfs_find_daxdev_for_inode(
91
104
* and buffers potentially freed after every call to end_buffer_async_write.
92
105
*/
93
106
static void
94
- xfs_finish_page_writeback (
107
+ xfs_finish_buffer_writeback (
95
108
struct inode * inode ,
96
109
struct bio_vec * bvec ,
97
110
int error )
@@ -166,9 +179,12 @@ xfs_destroy_ioend(
166
179
next = bio -> bi_private ;
167
180
168
181
/* walk each page on bio, ending page IO on them */
169
- bio_for_each_segment_all (bvec , bio , i )
170
- xfs_finish_page_writeback (inode , bvec , error );
171
-
182
+ bio_for_each_segment_all (bvec , bio , i ) {
183
+ if (page_has_buffers (bvec -> bv_page ))
184
+ xfs_finish_buffer_writeback (inode , bvec , error );
185
+ else
186
+ xfs_finish_page_writeback (inode , bvec , error );
187
+ }
172
188
bio_put (bio );
173
189
}
174
190
@@ -792,42 +808,51 @@ xfs_writepage_map(
792
808
{
793
809
LIST_HEAD (submit_list );
794
810
struct xfs_ioend * ioend , * next ;
795
- struct buffer_head * bh ;
811
+ struct buffer_head * bh = NULL ;
796
812
ssize_t len = i_blocksize (inode );
797
813
uint64_t file_offset ; /* file offset of page */
798
814
unsigned poffset ; /* offset into page */
799
815
int error = 0 ;
800
816
int count = 0 ;
801
817
818
+ if (page_has_buffers (page ))
819
+ bh = page_buffers (page );
820
+
802
821
/*
803
822
* Walk the blocks on the page, and if we run off the end of the current
804
823
* map or find the current map invalid, grab a new one. We only use
805
824
* bufferheads here to check per-block state - they no longer control
806
825
* the iteration through the page. This allows us to replace the
807
826
* bufferhead with some other state tracking mechanism in future.
808
827
*/
809
- file_offset = page_offset (page );
810
- bh = page_buffers (page );
811
- for (poffset = 0 ;
828
+ for (poffset = 0 , file_offset = page_offset (page );
812
829
poffset < PAGE_SIZE ;
813
- poffset += len , file_offset += len , bh = bh -> b_this_page ) {
830
+ poffset += len , file_offset += len ) {
814
831
/* past the range we are writing, so nothing more to write. */
815
832
if (file_offset >= end_offset )
816
833
break ;
817
834
818
- if (!buffer_uptodate (bh )) {
835
+ if (bh && !buffer_uptodate (bh )) {
819
836
if (PageUptodate (page ))
820
837
ASSERT (buffer_mapped (bh ));
838
+ bh = bh -> b_this_page ;
821
839
continue ;
822
840
}
823
841
824
842
error = xfs_map_blocks (wpc , inode , file_offset );
825
843
if (error )
826
844
break ;
827
- if (wpc -> io_type == XFS_IO_HOLE )
845
+
846
+ if (wpc -> io_type == XFS_IO_HOLE ) {
847
+ if (bh )
848
+ bh = bh -> b_this_page ;
828
849
continue ;
850
+ }
829
851
830
- xfs_map_at_offset (inode , bh , & wpc -> imap , file_offset );
852
+ if (bh ) {
853
+ xfs_map_at_offset (inode , bh , & wpc -> imap , file_offset );
854
+ bh = bh -> b_this_page ;
855
+ }
831
856
xfs_add_to_ioend (inode , file_offset , page , wpc , wbc ,
832
857
& submit_list );
833
858
count ++ ;
@@ -925,8 +950,6 @@ xfs_do_writepage(
925
950
926
951
trace_xfs_writepage (inode , page , 0 , 0 );
927
952
928
- ASSERT (page_has_buffers (page ));
929
-
930
953
/*
931
954
* Refuse to write the page out if we are called from reclaim context.
932
955
*
0 commit comments