@@ -92,26 +92,28 @@ xfs_qm_adjust_dqlimits(
92
92
{
93
93
struct xfs_quotainfo * q = mp -> m_quotainfo ;
94
94
struct xfs_disk_dquot * d = & dq -> q_core ;
95
+ struct xfs_def_quota * defq ;
95
96
int prealloc = 0 ;
96
97
97
98
ASSERT (d -> d_id );
99
+ defq = xfs_get_defquota (dq , q );
98
100
99
- if (q -> qi_bsoftlimit && !d -> d_blk_softlimit ) {
100
- d -> d_blk_softlimit = cpu_to_be64 (q -> qi_bsoftlimit );
101
+ if (defq -> bsoftlimit && !d -> d_blk_softlimit ) {
102
+ d -> d_blk_softlimit = cpu_to_be64 (defq -> bsoftlimit );
101
103
prealloc = 1 ;
102
104
}
103
- if (q -> qi_bhardlimit && !d -> d_blk_hardlimit ) {
104
- d -> d_blk_hardlimit = cpu_to_be64 (q -> qi_bhardlimit );
105
+ if (defq -> bhardlimit && !d -> d_blk_hardlimit ) {
106
+ d -> d_blk_hardlimit = cpu_to_be64 (defq -> bhardlimit );
105
107
prealloc = 1 ;
106
108
}
107
- if (q -> qi_isoftlimit && !d -> d_ino_softlimit )
108
- d -> d_ino_softlimit = cpu_to_be64 (q -> qi_isoftlimit );
109
- if (q -> qi_ihardlimit && !d -> d_ino_hardlimit )
110
- d -> d_ino_hardlimit = cpu_to_be64 (q -> qi_ihardlimit );
111
- if (q -> qi_rtbsoftlimit && !d -> d_rtb_softlimit )
112
- d -> d_rtb_softlimit = cpu_to_be64 (q -> qi_rtbsoftlimit );
113
- if (q -> qi_rtbhardlimit && !d -> d_rtb_hardlimit )
114
- d -> d_rtb_hardlimit = cpu_to_be64 (q -> qi_rtbhardlimit );
109
+ if (defq -> isoftlimit && !d -> d_ino_softlimit )
110
+ d -> d_ino_softlimit = cpu_to_be64 (defq -> isoftlimit );
111
+ if (defq -> ihardlimit && !d -> d_ino_hardlimit )
112
+ d -> d_ino_hardlimit = cpu_to_be64 (defq -> ihardlimit );
113
+ if (defq -> rtbsoftlimit && !d -> d_rtb_softlimit )
114
+ d -> d_rtb_softlimit = cpu_to_be64 (defq -> rtbsoftlimit );
115
+ if (defq -> rtbhardlimit && !d -> d_rtb_hardlimit )
116
+ d -> d_rtb_hardlimit = cpu_to_be64 (defq -> rtbhardlimit );
115
117
116
118
if (prealloc )
117
119
xfs_dquot_set_prealloc_limits (dq );
@@ -232,7 +234,8 @@ xfs_qm_init_dquot_blk(
232
234
{
233
235
struct xfs_quotainfo * q = mp -> m_quotainfo ;
234
236
xfs_dqblk_t * d ;
235
- int curid , i ;
237
+ xfs_dqid_t curid ;
238
+ int i ;
236
239
237
240
ASSERT (tp );
238
241
ASSERT (xfs_buf_islocked (bp ));
@@ -243,7 +246,6 @@ xfs_qm_init_dquot_blk(
243
246
* ID of the first dquot in the block - id's are zero based.
244
247
*/
245
248
curid = id - (id % q -> qi_dqperchunk );
246
- ASSERT (curid >= 0 );
247
249
memset (d , 0 , BBTOB (q -> qi_dqchunklen ));
248
250
for (i = 0 ; i < q -> qi_dqperchunk ; i ++ , d ++ , curid ++ ) {
249
251
d -> dd_diskdq .d_magic = cpu_to_be16 (XFS_DQUOT_MAGIC );
@@ -464,12 +466,13 @@ xfs_qm_dqtobp(
464
466
struct xfs_bmbt_irec map ;
465
467
int nmaps = 1 , error ;
466
468
struct xfs_buf * bp ;
467
- struct xfs_inode * quotip = xfs_dq_to_quota_inode ( dqp ) ;
469
+ struct xfs_inode * quotip ;
468
470
struct xfs_mount * mp = dqp -> q_mount ;
469
471
xfs_dqid_t id = be32_to_cpu (dqp -> q_core .d_id );
470
472
struct xfs_trans * tp = (tpp ? * tpp : NULL );
471
473
uint lock_mode ;
472
474
475
+ quotip = xfs_quota_inode (dqp -> q_mount , dqp -> dq_flags );
473
476
dqp -> q_fileoffset = (xfs_fileoff_t )id / mp -> m_quotainfo -> qi_dqperchunk ;
474
477
475
478
lock_mode = xfs_ilock_data_map_shared (quotip );
@@ -684,6 +687,56 @@ xfs_qm_dqread(
684
687
return error ;
685
688
}
686
689
690
+ /*
691
+ * Advance to the next id in the current chunk, or if at the
692
+ * end of the chunk, skip ahead to first id in next allocated chunk
693
+ * using the SEEK_DATA interface.
694
+ */
695
+ int
696
+ xfs_dq_get_next_id (
697
+ xfs_mount_t * mp ,
698
+ uint type ,
699
+ xfs_dqid_t * id ,
700
+ loff_t eof )
701
+ {
702
+ struct xfs_inode * quotip ;
703
+ xfs_fsblock_t start ;
704
+ loff_t offset ;
705
+ uint lock ;
706
+ xfs_dqid_t next_id ;
707
+ int error = 0 ;
708
+
709
+ /* Simple advance */
710
+ next_id = * id + 1 ;
711
+
712
+ /* If new ID is within the current chunk, advancing it sufficed */
713
+ if (next_id % mp -> m_quotainfo -> qi_dqperchunk ) {
714
+ * id = next_id ;
715
+ return 0 ;
716
+ }
717
+
718
+ /* Nope, next_id is now past the current chunk, so find the next one */
719
+ start = (xfs_fsblock_t )next_id / mp -> m_quotainfo -> qi_dqperchunk ;
720
+
721
+ quotip = xfs_quota_inode (mp , type );
722
+ lock = xfs_ilock_data_map_shared (quotip );
723
+
724
+ offset = __xfs_seek_hole_data (VFS_I (quotip ), XFS_FSB_TO_B (mp , start ),
725
+ eof , SEEK_DATA );
726
+ if (offset < 0 )
727
+ error = offset ;
728
+
729
+ xfs_iunlock (quotip , lock );
730
+
731
+ /* -ENXIO is essentially "no more data" */
732
+ if (error )
733
+ return (error == - ENXIO ? - ENOENT : error );
734
+
735
+ /* Convert next data offset back to a quota id */
736
+ * id = XFS_B_TO_FSB (mp , offset ) * mp -> m_quotainfo -> qi_dqperchunk ;
737
+ return 0 ;
738
+ }
739
+
687
740
/*
688
741
* Given the file system, inode OR id, and type (UDQUOT/GDQUOT), return a
689
742
* a locked dquot, doing an allocation (if requested) as needed.
@@ -704,6 +757,7 @@ xfs_qm_dqget(
704
757
struct xfs_quotainfo * qi = mp -> m_quotainfo ;
705
758
struct radix_tree_root * tree = xfs_dquot_tree (qi , type );
706
759
struct xfs_dquot * dqp ;
760
+ loff_t eof = 0 ;
707
761
int error ;
708
762
709
763
ASSERT (XFS_IS_QUOTA_RUNNING (mp ));
@@ -731,6 +785,21 @@ xfs_qm_dqget(
731
785
}
732
786
#endif
733
787
788
+ /* Get the end of the quota file if we need it */
789
+ if (flags & XFS_QMOPT_DQNEXT ) {
790
+ struct xfs_inode * quotip ;
791
+ xfs_fileoff_t last ;
792
+ uint lock_mode ;
793
+
794
+ quotip = xfs_quota_inode (mp , type );
795
+ lock_mode = xfs_ilock_data_map_shared (quotip );
796
+ error = xfs_bmap_last_offset (quotip , & last , XFS_DATA_FORK );
797
+ xfs_iunlock (quotip , lock_mode );
798
+ if (error )
799
+ return error ;
800
+ eof = XFS_FSB_TO_B (mp , last );
801
+ }
802
+
734
803
restart :
735
804
mutex_lock (& qi -> qi_tree_lock );
736
805
dqp = radix_tree_lookup (tree , id );
@@ -744,6 +813,18 @@ xfs_qm_dqget(
744
813
goto restart ;
745
814
}
746
815
816
+ /* uninit / unused quota found in radix tree, keep looking */
817
+ if (flags & XFS_QMOPT_DQNEXT ) {
818
+ if (XFS_IS_DQUOT_UNINITIALIZED (dqp )) {
819
+ xfs_dqunlock (dqp );
820
+ mutex_unlock (& qi -> qi_tree_lock );
821
+ error = xfs_dq_get_next_id (mp , type , & id , eof );
822
+ if (error )
823
+ return error ;
824
+ goto restart ;
825
+ }
826
+ }
827
+
747
828
dqp -> q_nrefs ++ ;
748
829
mutex_unlock (& qi -> qi_tree_lock );
749
830
@@ -770,6 +851,13 @@ xfs_qm_dqget(
770
851
if (ip )
771
852
xfs_ilock (ip , XFS_ILOCK_EXCL );
772
853
854
+ /* If we are asked to find next active id, keep looking */
855
+ if (error == - ENOENT && (flags & XFS_QMOPT_DQNEXT )) {
856
+ error = xfs_dq_get_next_id (mp , type , & id , eof );
857
+ if (!error )
858
+ goto restart ;
859
+ }
860
+
773
861
if (error )
774
862
return error ;
775
863
@@ -820,6 +908,17 @@ xfs_qm_dqget(
820
908
qi -> qi_dquots ++ ;
821
909
mutex_unlock (& qi -> qi_tree_lock );
822
910
911
+ /* If we are asked to find next active id, keep looking */
912
+ if (flags & XFS_QMOPT_DQNEXT ) {
913
+ if (XFS_IS_DQUOT_UNINITIALIZED (dqp )) {
914
+ xfs_qm_dqput (dqp );
915
+ error = xfs_dq_get_next_id (mp , type , & id , eof );
916
+ if (error )
917
+ return error ;
918
+ goto restart ;
919
+ }
920
+ }
921
+
823
922
dqret :
824
923
ASSERT ((ip == NULL ) || xfs_isilocked (ip , XFS_ILOCK_EXCL ));
825
924
trace_xfs_dqget_miss (dqp );
0 commit comments