@@ -138,11 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
138
138
kfree (work );
139
139
}
140
140
141
- int amdgpu_crtc_page_flip_target (struct drm_crtc * crtc ,
142
- struct drm_framebuffer * fb ,
143
- struct drm_pending_vblank_event * event ,
144
- uint32_t page_flip_flags , uint32_t target ,
145
- struct drm_modeset_acquire_ctx * ctx )
141
+
142
+ static void amdgpu_flip_work_cleanup (struct amdgpu_flip_work * work )
143
+ {
144
+ int i ;
145
+
146
+ amdgpu_bo_unref (& work -> old_abo );
147
+ dma_fence_put (work -> excl );
148
+ for (i = 0 ; i < work -> shared_count ; ++ i )
149
+ dma_fence_put (work -> shared [i ]);
150
+ kfree (work -> shared );
151
+ kfree (work );
152
+ }
153
+
154
+ static void amdgpu_flip_cleanup_unreserve (struct amdgpu_flip_work * work ,
155
+ struct amdgpu_bo * new_abo )
156
+ {
157
+ amdgpu_bo_unreserve (new_abo );
158
+ amdgpu_flip_work_cleanup (work );
159
+ }
160
+
161
+ static void amdgpu_flip_cleanup_unpin (struct amdgpu_flip_work * work ,
162
+ struct amdgpu_bo * new_abo )
163
+ {
164
+ if (unlikely (amdgpu_bo_unpin (new_abo ) != 0 ))
165
+ DRM_ERROR ("failed to unpin new abo in error path\n" );
166
+ amdgpu_flip_cleanup_unreserve (work , new_abo );
167
+ }
168
+
169
+ void amdgpu_crtc_cleanup_flip_ctx (struct amdgpu_flip_work * work ,
170
+ struct amdgpu_bo * new_abo )
171
+ {
172
+ if (unlikely (amdgpu_bo_reserve (new_abo , true) != 0 )) {
173
+ DRM_ERROR ("failed to reserve new abo in error path\n" );
174
+ amdgpu_flip_work_cleanup (work );
175
+ return ;
176
+ }
177
+ amdgpu_flip_cleanup_unpin (work , new_abo );
178
+ }
179
+
180
+ int amdgpu_crtc_prepare_flip (struct drm_crtc * crtc ,
181
+ struct drm_framebuffer * fb ,
182
+ struct drm_pending_vblank_event * event ,
183
+ uint32_t page_flip_flags ,
184
+ uint32_t target ,
185
+ struct amdgpu_flip_work * * work_p ,
186
+ struct amdgpu_bo * * new_abo_p )
146
187
{
147
188
struct drm_device * dev = crtc -> dev ;
148
189
struct amdgpu_device * adev = dev -> dev_private ;
@@ -155,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
155
196
unsigned long flags ;
156
197
u64 tiling_flags ;
157
198
u64 base ;
158
- int i , r ;
199
+ int r ;
159
200
160
201
work = kzalloc (sizeof * work , GFP_KERNEL );
161
202
if (work == NULL )
@@ -216,41 +257,80 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
216
257
spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
217
258
r = - EBUSY ;
218
259
goto pflip_cleanup ;
260
+
219
261
}
262
+ spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
263
+
264
+ * work_p = work ;
265
+ * new_abo_p = new_abo ;
266
+
267
+ return 0 ;
268
+
269
+ pflip_cleanup :
270
+ amdgpu_crtc_cleanup_flip_ctx (work , new_abo );
271
+ return r ;
272
+
273
+ unpin :
274
+ amdgpu_flip_cleanup_unpin (work , new_abo );
275
+ return r ;
276
+
277
+ unreserve :
278
+ amdgpu_flip_cleanup_unreserve (work , new_abo );
279
+ return r ;
220
280
281
+ cleanup :
282
+ amdgpu_flip_work_cleanup (work );
283
+ return r ;
284
+
285
+ }
286
+
287
+ void amdgpu_crtc_submit_flip (struct drm_crtc * crtc ,
288
+ struct drm_framebuffer * fb ,
289
+ struct amdgpu_flip_work * work ,
290
+ struct amdgpu_bo * new_abo )
291
+ {
292
+ unsigned long flags ;
293
+ struct amdgpu_crtc * amdgpu_crtc = to_amdgpu_crtc (crtc );
294
+
295
+ spin_lock_irqsave (& crtc -> dev -> event_lock , flags );
221
296
amdgpu_crtc -> pflip_status = AMDGPU_FLIP_PENDING ;
222
297
amdgpu_crtc -> pflip_works = work ;
223
298
224
-
225
- DRM_DEBUG_DRIVER ("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n" ,
226
- amdgpu_crtc -> crtc_id , amdgpu_crtc , work );
227
299
/* update crtc fb */
228
300
crtc -> primary -> fb = fb ;
229
301
spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
302
+
303
+ DRM_DEBUG_DRIVER (
304
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n" ,
305
+ amdgpu_crtc -> crtc_id , amdgpu_crtc , work );
306
+
230
307
amdgpu_flip_work_func (& work -> flip_work .work );
231
- return 0 ;
308
+ }
232
309
233
- pflip_cleanup :
234
- if (unlikely (amdgpu_bo_reserve (new_abo , false) != 0 )) {
235
- DRM_ERROR ("failed to reserve new abo in error path\n" );
236
- goto cleanup ;
237
- }
238
- unpin :
239
- if (unlikely (amdgpu_bo_unpin (new_abo ) != 0 )) {
240
- DRM_ERROR ("failed to unpin new abo in error path\n" );
241
- }
242
- unreserve :
243
- amdgpu_bo_unreserve (new_abo );
310
+ int amdgpu_crtc_page_flip_target (struct drm_crtc * crtc ,
311
+ struct drm_framebuffer * fb ,
312
+ struct drm_pending_vblank_event * event ,
313
+ uint32_t page_flip_flags ,
314
+ uint32_t target ,
315
+ struct drm_modeset_acquire_ctx * ctx )
316
+ {
317
+ struct amdgpu_bo * new_abo ;
318
+ struct amdgpu_flip_work * work ;
319
+ int r ;
244
320
245
- cleanup :
246
- amdgpu_bo_unref (& work -> old_abo );
247
- dma_fence_put (work -> excl );
248
- for (i = 0 ; i < work -> shared_count ; ++ i )
249
- dma_fence_put (work -> shared [i ]);
250
- kfree (work -> shared );
251
- kfree (work );
321
+ r = amdgpu_crtc_prepare_flip (crtc ,
322
+ fb ,
323
+ event ,
324
+ page_flip_flags ,
325
+ target ,
326
+ & work ,
327
+ & new_abo );
328
+ if (r )
329
+ return r ;
252
330
253
- return r ;
331
+ amdgpu_crtc_submit_flip (crtc , fb , work , new_abo );
332
+
333
+ return 0 ;
254
334
}
255
335
256
336
int amdgpu_crtc_set_config (struct drm_mode_set * set ,
0 commit comments