@@ -138,10 +138,52 @@ static void amdgpu_unpin_work_func(struct work_struct *__work)
138
138
kfree (work );
139
139
}
140
140
141
- int amdgpu_crtc_page_flip_target (struct drm_crtc * crtc ,
142
- struct drm_framebuffer * fb ,
143
- struct drm_pending_vblank_event * event ,
144
- uint32_t page_flip_flags , uint32_t target )
141
+
142
+ static void amdgpu_flip_work_cleanup (struct amdgpu_flip_work * work )
143
+ {
144
+ int i ;
145
+
146
+ amdgpu_bo_unref (& work -> old_abo );
147
+ dma_fence_put (work -> excl );
148
+ for (i = 0 ; i < work -> shared_count ; ++ i )
149
+ dma_fence_put (work -> shared [i ]);
150
+ kfree (work -> shared );
151
+ kfree (work );
152
+ }
153
+
154
+ static void amdgpu_flip_cleanup_unreserve (struct amdgpu_flip_work * work ,
155
+ struct amdgpu_bo * new_abo )
156
+ {
157
+ amdgpu_bo_unreserve (new_abo );
158
+ amdgpu_flip_work_cleanup (work );
159
+ }
160
+
161
+ static void amdgpu_flip_cleanup_unpin (struct amdgpu_flip_work * work ,
162
+ struct amdgpu_bo * new_abo )
163
+ {
164
+ if (unlikely (amdgpu_bo_unpin (new_abo ) != 0 ))
165
+ DRM_ERROR ("failed to unpin new abo in error path\n" );
166
+ amdgpu_flip_cleanup_unreserve (work , new_abo );
167
+ }
168
+
169
+ void amdgpu_crtc_cleanup_flip_ctx (struct amdgpu_flip_work * work ,
170
+ struct amdgpu_bo * new_abo )
171
+ {
172
+ if (unlikely (amdgpu_bo_reserve (new_abo , false) != 0 )) {
173
+ DRM_ERROR ("failed to reserve new abo in error path\n" );
174
+ amdgpu_flip_work_cleanup (work );
175
+ return ;
176
+ }
177
+ amdgpu_flip_cleanup_unpin (work , new_abo );
178
+ }
179
+
180
+ int amdgpu_crtc_prepare_flip (struct drm_crtc * crtc ,
181
+ struct drm_framebuffer * fb ,
182
+ struct drm_pending_vblank_event * event ,
183
+ uint32_t page_flip_flags ,
184
+ uint32_t target ,
185
+ struct amdgpu_flip_work * * work_p ,
186
+ struct amdgpu_bo * * new_abo_p )
145
187
{
146
188
struct drm_device * dev = crtc -> dev ;
147
189
struct amdgpu_device * adev = dev -> dev_private ;
@@ -154,7 +196,7 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
154
196
unsigned long flags ;
155
197
u64 tiling_flags ;
156
198
u64 base ;
157
- int i , r ;
199
+ int r ;
158
200
159
201
work = kzalloc (sizeof * work , GFP_KERNEL );
160
202
if (work == NULL )
@@ -215,41 +257,79 @@ int amdgpu_crtc_page_flip_target(struct drm_crtc *crtc,
215
257
spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
216
258
r = - EBUSY ;
217
259
goto pflip_cleanup ;
260
+
218
261
}
262
+ spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
263
+
264
+ * work_p = work ;
265
+ * new_abo_p = new_abo ;
266
+
267
+ return 0 ;
268
+
269
+ pflip_cleanup :
270
+ amdgpu_crtc_cleanup_flip_ctx (work , new_abo );
271
+ return r ;
272
+
273
+ unpin :
274
+ amdgpu_flip_cleanup_unpin (work , new_abo );
275
+ return r ;
276
+
277
+ unreserve :
278
+ amdgpu_flip_cleanup_unreserve (work , new_abo );
279
+ return r ;
219
280
281
+ cleanup :
282
+ amdgpu_flip_work_cleanup (work );
283
+ return r ;
284
+
285
+ }
286
+
287
+ void amdgpu_crtc_submit_flip (struct drm_crtc * crtc ,
288
+ struct drm_framebuffer * fb ,
289
+ struct amdgpu_flip_work * work ,
290
+ struct amdgpu_bo * new_abo )
291
+ {
292
+ unsigned long flags ;
293
+ struct amdgpu_crtc * amdgpu_crtc = to_amdgpu_crtc (crtc );
294
+
295
+ spin_lock_irqsave (& crtc -> dev -> event_lock , flags );
220
296
amdgpu_crtc -> pflip_status = AMDGPU_FLIP_PENDING ;
221
297
amdgpu_crtc -> pflip_works = work ;
222
298
223
-
224
- DRM_DEBUG_DRIVER ("crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n" ,
225
- amdgpu_crtc -> crtc_id , amdgpu_crtc , work );
226
299
/* update crtc fb */
227
300
crtc -> primary -> fb = fb ;
228
301
spin_unlock_irqrestore (& crtc -> dev -> event_lock , flags );
302
+
303
+ DRM_DEBUG_DRIVER (
304
+ "crtc:%d[%p], pflip_stat:AMDGPU_FLIP_PENDING, work: %p,\n" ,
305
+ amdgpu_crtc -> crtc_id , amdgpu_crtc , work );
306
+
229
307
amdgpu_flip_work_func (& work -> flip_work .work );
230
- return 0 ;
308
+ }
231
309
232
- pflip_cleanup :
233
- if (unlikely (amdgpu_bo_reserve (new_abo , false) != 0 )) {
234
- DRM_ERROR ("failed to reserve new abo in error path\n" );
235
- goto cleanup ;
236
- }
237
- unpin :
238
- if (unlikely (amdgpu_bo_unpin (new_abo ) != 0 )) {
239
- DRM_ERROR ("failed to unpin new abo in error path\n" );
240
- }
241
- unreserve :
242
- amdgpu_bo_unreserve (new_abo );
310
+ int amdgpu_crtc_page_flip_target (struct drm_crtc * crtc ,
311
+ struct drm_framebuffer * fb ,
312
+ struct drm_pending_vblank_event * event ,
313
+ uint32_t page_flip_flags ,
314
+ uint32_t target )
315
+ {
316
+ struct amdgpu_bo * new_abo ;
317
+ struct amdgpu_flip_work * work ;
318
+ int r ;
243
319
244
- cleanup :
245
- amdgpu_bo_unref (& work -> old_abo );
246
- dma_fence_put (work -> excl );
247
- for (i = 0 ; i < work -> shared_count ; ++ i )
248
- dma_fence_put (work -> shared [i ]);
249
- kfree (work -> shared );
250
- kfree (work );
320
+ r = amdgpu_crtc_prepare_flip (crtc ,
321
+ fb ,
322
+ event ,
323
+ page_flip_flags ,
324
+ target ,
325
+ & work ,
326
+ & new_abo );
327
+ if (r )
328
+ return r ;
251
329
252
- return r ;
330
+ amdgpu_crtc_submit_flip (crtc , fb , work , new_abo );
331
+
332
+ return 0 ;
253
333
}
254
334
255
335
int amdgpu_crtc_set_config (struct drm_mode_set * set )
0 commit comments