@@ -41,7 +41,7 @@ static void amdgpu_flip_callback(struct fence *f, struct fence_cb *cb)
41
41
container_of (cb , struct amdgpu_flip_work , cb );
42
42
43
43
fence_put (f );
44
- schedule_work (& work -> flip_work );
44
+ schedule_work (& work -> flip_work . work );
45
45
}
46
46
47
47
static bool amdgpu_flip_handle_fence (struct amdgpu_flip_work * work ,
@@ -63,16 +63,17 @@ static bool amdgpu_flip_handle_fence(struct amdgpu_flip_work *work,
63
63
64
64
static void amdgpu_flip_work_func (struct work_struct * __work )
65
65
{
66
+ struct delayed_work * delayed_work =
67
+ container_of (__work , struct delayed_work , work );
66
68
struct amdgpu_flip_work * work =
67
- container_of (__work , struct amdgpu_flip_work , flip_work );
69
+ container_of (delayed_work , struct amdgpu_flip_work , flip_work );
68
70
struct amdgpu_device * adev = work -> adev ;
69
71
struct amdgpu_crtc * amdgpuCrtc = adev -> mode_info .crtcs [work -> crtc_id ];
70
72
71
73
struct drm_crtc * crtc = & amdgpuCrtc -> base ;
72
74
unsigned long flags ;
73
- unsigned i , repcnt = 4 ;
74
- int vpos , hpos , stat , min_udelay = 0 ;
75
- struct drm_vblank_crtc * vblank = & crtc -> dev -> vblank [work -> crtc_id ];
75
+ unsigned i ;
76
+ int vpos , hpos ;
76
77
77
78
if (amdgpu_flip_handle_fence (work , & work -> excl ))
78
79
return ;
@@ -81,55 +82,23 @@ static void amdgpu_flip_work_func(struct work_struct *__work)
81
82
if (amdgpu_flip_handle_fence (work , & work -> shared [i ]))
82
83
return ;
83
84
84
- /* We borrow the event spin lock for protecting flip_status */
85
- spin_lock_irqsave (& crtc -> dev -> event_lock , flags );
86
-
87
- /* If this happens to execute within the "virtually extended" vblank
88
- * interval before the start of the real vblank interval then it needs
89
- * to delay programming the mmio flip until the real vblank is entered.
90
- * This prevents completing a flip too early due to the way we fudge
91
- * our vblank counter and vblank timestamps in order to work around the
92
- * problem that the hw fires vblank interrupts before actual start of
93
- * vblank (when line buffer refilling is done for a frame). It
94
- * complements the fudging logic in amdgpu_get_crtc_scanoutpos() for
95
- * timestamping and amdgpu_get_vblank_counter_kms() for vblank counts.
96
- *
97
- * In practice this won't execute very often unless on very fast
98
- * machines because the time window for this to happen is very small.
85
+ /* Wait until we're out of the vertical blank period before the one
86
+ * targeted by the flip
99
87
*/
100
- while (amdgpuCrtc -> enabled && -- repcnt ) {
101
- /* GET_DISTANCE_TO_VBLANKSTART returns distance to real vblank
102
- * start in hpos, and to the "fudged earlier" vblank start in
103
- * vpos.
104
- */
105
- stat = amdgpu_get_crtc_scanoutpos (adev -> ddev , work -> crtc_id ,
106
- GET_DISTANCE_TO_VBLANKSTART ,
107
- & vpos , & hpos , NULL , NULL ,
108
- & crtc -> hwmode );
109
-
110
- if ((stat & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE )) !=
111
- (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_ACCURATE ) ||
112
- !(vpos >= 0 && hpos <= 0 ))
113
- break ;
114
-
115
- /* Sleep at least until estimated real start of hw vblank */
116
- min_udelay = (- hpos + 1 ) * max (vblank -> linedur_ns / 1000 , 5 );
117
- if (min_udelay > vblank -> framedur_ns / 2000 ) {
118
- /* Don't wait ridiculously long - something is wrong */
119
- repcnt = 0 ;
120
- break ;
121
- }
122
- spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
123
- usleep_range (min_udelay , 2 * min_udelay );
124
- spin_lock_irqsave (& crtc -> dev -> event_lock , flags );
88
+ if (amdgpuCrtc -> enabled &&
89
+ (amdgpu_get_crtc_scanoutpos (adev -> ddev , work -> crtc_id , 0 ,
90
+ & vpos , & hpos , NULL , NULL ,
91
+ & crtc -> hwmode )
92
+ & (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK )) ==
93
+ (DRM_SCANOUTPOS_VALID | DRM_SCANOUTPOS_IN_VBLANK ) &&
94
+ (int )(work -> target_vblank -
95
+ amdgpu_get_vblank_counter_kms (adev -> ddev , amdgpuCrtc -> crtc_id )) > 0 ) {
96
+ schedule_delayed_work (& work -> flip_work , usecs_to_jiffies (1000 ));
97
+ return ;
125
98
}
126
99
127
- if (!repcnt )
128
- DRM_DEBUG_DRIVER ("Delay problem on crtc %d: min_udelay %d, "
129
- "framedur %d, linedur %d, stat %d, vpos %d, "
130
- "hpos %d\n" , work -> crtc_id , min_udelay ,
131
- vblank -> framedur_ns / 1000 ,
132
- vblank -> linedur_ns / 1000 , stat , vpos , hpos );
100
+ /* We borrow the event spin lock for protecting flip_status */
101
+ spin_lock_irqsave (& crtc -> dev -> event_lock , flags );
133
102
134
103
/* Do the flip (mmio) */
135
104
adev -> mode_info .funcs -> page_flip (adev , work -> crtc_id , work -> base , work -> async );
@@ -169,10 +138,10 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
169
138
kfree (work );
170
139
}
171
140
172
- int amdgpu_crtc_page_flip (struct drm_crtc * crtc ,
173
- struct drm_framebuffer * fb ,
174
- struct drm_pending_vblank_event * event ,
175
- uint32_t page_flip_flags )
141
+ int amdgpu_crtc_page_flip_target (struct drm_crtc * crtc ,
142
+ struct drm_framebuffer * fb ,
143
+ struct drm_pending_vblank_event * event ,
144
+ uint32_t page_flip_flags , uint32_t target )
176
145
{
177
146
struct drm_device * dev = crtc -> dev ;
178
147
struct amdgpu_device * adev = dev -> dev_private ;
@@ -191,7 +160,7 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
191
160
if (work == NULL )
192
161
return - ENOMEM ;
193
162
194
- INIT_WORK (& work -> flip_work , amdgpu_flip_work_func );
163
+ INIT_DELAYED_WORK (& work -> flip_work , amdgpu_flip_work_func );
195
164
INIT_WORK (& work -> unpin_work , amdgpu_unpin_work_func );
196
165
197
166
work -> event = event ;
@@ -237,20 +206,16 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
237
206
amdgpu_bo_unreserve (new_rbo );
238
207
239
208
work -> base = base ;
240
-
241
- r = drm_crtc_vblank_get (crtc );
242
- if (r ) {
243
- DRM_ERROR ("failed to get vblank before flip\n" );
244
- goto pflip_cleanup ;
245
- }
209
+ work -> target_vblank = target - drm_crtc_vblank_count (crtc ) +
210
+ amdgpu_get_vblank_counter_kms (dev , work -> crtc_id );
246
211
247
212
/* we borrow the event spin lock for protecting flip_wrok */
248
213
spin_lock_irqsave (& crtc -> dev -> event_lock , flags );
249
214
if (amdgpu_crtc -> pflip_status != AMDGPU_FLIP_NONE ) {
250
215
DRM_DEBUG_DRIVER ("flip queue: crtc already busy\n" );
251
216
spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
252
217
r = - EBUSY ;
253
- goto vblank_cleanup ;
218
+ goto pflip_cleanup ;
254
219
}
255
220
256
221
amdgpu_crtc -> pflip_status = AMDGPU_FLIP_PENDING ;
@@ -262,12 +227,9 @@ int amdgpu_crtc_page_flip(struct drm_crtc *crtc,
262
227
/* update crtc fb */
263
228
crtc -> primary -> fb = fb ;
264
229
spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
265
- amdgpu_flip_work_func (& work -> flip_work );
230
+ amdgpu_flip_work_func (& work -> flip_work . work );
266
231
return 0 ;
267
232
268
- vblank_cleanup :
269
- drm_crtc_vblank_put (crtc );
270
-
271
233
pflip_cleanup :
272
234
if (unlikely (amdgpu_bo_reserve (new_rbo , false) != 0 )) {
273
235
DRM_ERROR ("failed to reserve new rbo in error path\n" );
0 commit comments