@@ -141,10 +141,10 @@ vc4_save_hang_state(struct drm_device *dev)
141
141
struct vc4_dev * vc4 = to_vc4_dev (dev );
142
142
struct drm_vc4_get_hang_state * state ;
143
143
struct vc4_hang_state * kernel_state ;
144
- struct vc4_exec_info * exec ;
144
+ struct vc4_exec_info * exec [ 2 ] ;
145
145
struct vc4_bo * bo ;
146
146
unsigned long irqflags ;
147
- unsigned int i , unref_list_count ;
147
+ unsigned int i , j , unref_list_count , prev_idx ;
148
148
149
149
kernel_state = kcalloc (1 , sizeof (* kernel_state ), GFP_KERNEL );
150
150
if (!kernel_state )
@@ -153,37 +153,55 @@ vc4_save_hang_state(struct drm_device *dev)
153
153
state = & kernel_state -> user_state ;
154
154
155
155
spin_lock_irqsave (& vc4 -> job_lock , irqflags );
156
- exec = vc4_first_job (vc4 );
157
- if (!exec ) {
156
+ exec [0 ] = vc4_first_bin_job (vc4 );
157
+ exec [1 ] = vc4_first_render_job (vc4 );
158
+ if (!exec [0 ] && !exec [1 ]) {
158
159
spin_unlock_irqrestore (& vc4 -> job_lock , irqflags );
159
160
return ;
160
161
}
161
162
162
- unref_list_count = 0 ;
163
- list_for_each_entry (bo , & exec -> unref_list , unref_head )
164
- unref_list_count ++ ;
163
+ /* Get the bos from both binner and renderer into hang state. */
164
+ state -> bo_count = 0 ;
165
+ for (i = 0 ; i < 2 ; i ++ ) {
166
+ if (!exec [i ])
167
+ continue ;
168
+
169
+ unref_list_count = 0 ;
170
+ list_for_each_entry (bo , & exec [i ]-> unref_list , unref_head )
171
+ unref_list_count ++ ;
172
+ state -> bo_count += exec [i ]-> bo_count + unref_list_count ;
173
+ }
174
+
175
+ kernel_state -> bo = kcalloc (state -> bo_count ,
176
+ sizeof (* kernel_state -> bo ), GFP_ATOMIC );
165
177
166
- state -> bo_count = exec -> bo_count + unref_list_count ;
167
- kernel_state -> bo = kcalloc (state -> bo_count , sizeof (* kernel_state -> bo ),
168
- GFP_ATOMIC );
169
178
if (!kernel_state -> bo ) {
170
179
spin_unlock_irqrestore (& vc4 -> job_lock , irqflags );
171
180
return ;
172
181
}
173
182
174
- for ( i = 0 ; i < exec -> bo_count ; i ++ ) {
175
- drm_gem_object_reference ( & exec -> bo [ i ] -> base );
176
- kernel_state -> bo [ i ] = & exec -> bo [i ]-> base ;
177
- }
183
+ prev_idx = 0 ;
184
+ for ( i = 0 ; i < 2 ; i ++ ) {
185
+ if (! exec [i ])
186
+ continue ;
178
187
179
- list_for_each_entry (bo , & exec -> unref_list , unref_head ) {
180
- drm_gem_object_reference (& bo -> base .base );
181
- kernel_state -> bo [i ] = & bo -> base .base ;
182
- i ++ ;
188
+ for (j = 0 ; j < exec [i ]-> bo_count ; j ++ ) {
189
+ drm_gem_object_reference (& exec [i ]-> bo [j ]-> base );
190
+ kernel_state -> bo [j + prev_idx ] = & exec [i ]-> bo [j ]-> base ;
191
+ }
192
+
193
+ list_for_each_entry (bo , & exec [i ]-> unref_list , unref_head ) {
194
+ drm_gem_object_reference (& bo -> base .base );
195
+ kernel_state -> bo [j + prev_idx ] = & bo -> base .base ;
196
+ j ++ ;
197
+ }
198
+ prev_idx = j + 1 ;
183
199
}
184
200
185
- state -> start_bin = exec -> ct0ca ;
186
- state -> start_render = exec -> ct1ca ;
201
+ if (exec [0 ])
202
+ state -> start_bin = exec [0 ]-> ct0ca ;
203
+ if (exec [1 ])
204
+ state -> start_render = exec [1 ]-> ct1ca ;
187
205
188
206
spin_unlock_irqrestore (& vc4 -> job_lock , irqflags );
189
207
@@ -267,13 +285,15 @@ vc4_hangcheck_elapsed(unsigned long data)
267
285
struct vc4_dev * vc4 = to_vc4_dev (dev );
268
286
uint32_t ct0ca , ct1ca ;
269
287
unsigned long irqflags ;
270
- struct vc4_exec_info * exec ;
288
+ struct vc4_exec_info * bin_exec , * render_exec ;
271
289
272
290
spin_lock_irqsave (& vc4 -> job_lock , irqflags );
273
- exec = vc4_first_job (vc4 );
291
+
292
+ bin_exec = vc4_first_bin_job (vc4 );
293
+ render_exec = vc4_first_render_job (vc4 );
274
294
275
295
/* If idle, we can stop watching for hangs. */
276
- if (!exec ) {
296
+ if (!bin_exec && ! render_exec ) {
277
297
spin_unlock_irqrestore (& vc4 -> job_lock , irqflags );
278
298
return ;
279
299
}
@@ -284,9 +304,12 @@ vc4_hangcheck_elapsed(unsigned long data)
284
304
/* If we've made any progress in execution, rearm the timer
285
305
* and wait.
286
306
*/
287
- if (ct0ca != exec -> last_ct0ca || ct1ca != exec -> last_ct1ca ) {
288
- exec -> last_ct0ca = ct0ca ;
289
- exec -> last_ct1ca = ct1ca ;
307
+ if ((bin_exec && ct0ca != bin_exec -> last_ct0ca ) ||
308
+ (render_exec && ct1ca != render_exec -> last_ct1ca )) {
309
+ if (bin_exec )
310
+ bin_exec -> last_ct0ca = ct0ca ;
311
+ if (render_exec )
312
+ render_exec -> last_ct1ca = ct1ca ;
290
313
spin_unlock_irqrestore (& vc4 -> job_lock , irqflags );
291
314
vc4_queue_hangcheck (dev );
292
315
return ;
@@ -386,11 +409,13 @@ vc4_flush_caches(struct drm_device *dev)
386
409
* The job_lock should be held during this.
387
410
*/
388
411
void
389
- vc4_submit_next_job (struct drm_device * dev )
412
+ vc4_submit_next_bin_job (struct drm_device * dev )
390
413
{
391
414
struct vc4_dev * vc4 = to_vc4_dev (dev );
392
- struct vc4_exec_info * exec = vc4_first_job ( vc4 ) ;
415
+ struct vc4_exec_info * exec ;
393
416
417
+ again :
418
+ exec = vc4_first_bin_job (vc4 );
394
419
if (!exec )
395
420
return ;
396
421
@@ -400,11 +425,40 @@ vc4_submit_next_job(struct drm_device *dev)
400
425
V3D_WRITE (V3D_BPOA , 0 );
401
426
V3D_WRITE (V3D_BPOS , 0 );
402
427
403
- if (exec -> ct0ca != exec -> ct0ea )
428
+ /* Either put the job in the binner if it uses the binner, or
429
+ * immediately move it to the to-be-rendered queue.
430
+ */
431
+ if (exec -> ct0ca != exec -> ct0ea ) {
404
432
submit_cl (dev , 0 , exec -> ct0ca , exec -> ct0ea );
433
+ } else {
434
+ vc4_move_job_to_render (dev , exec );
435
+ goto again ;
436
+ }
437
+ }
438
+
439
+ void
440
+ vc4_submit_next_render_job (struct drm_device * dev )
441
+ {
442
+ struct vc4_dev * vc4 = to_vc4_dev (dev );
443
+ struct vc4_exec_info * exec = vc4_first_render_job (vc4 );
444
+
445
+ if (!exec )
446
+ return ;
447
+
405
448
submit_cl (dev , 1 , exec -> ct1ca , exec -> ct1ea );
406
449
}
407
450
451
+ void
452
+ vc4_move_job_to_render (struct drm_device * dev , struct vc4_exec_info * exec )
453
+ {
454
+ struct vc4_dev * vc4 = to_vc4_dev (dev );
455
+ bool was_empty = list_empty (& vc4 -> render_job_list );
456
+
457
+ list_move_tail (& exec -> head , & vc4 -> render_job_list );
458
+ if (was_empty )
459
+ vc4_submit_next_render_job (dev );
460
+ }
461
+
408
462
static void
409
463
vc4_update_bo_seqnos (struct vc4_exec_info * exec , uint64_t seqno )
410
464
{
@@ -443,14 +497,14 @@ vc4_queue_submit(struct drm_device *dev, struct vc4_exec_info *exec)
443
497
exec -> seqno = seqno ;
444
498
vc4_update_bo_seqnos (exec , seqno );
445
499
446
- list_add_tail (& exec -> head , & vc4 -> job_list );
500
+ list_add_tail (& exec -> head , & vc4 -> bin_job_list );
447
501
448
502
/* If no job was executing, kick ours off. Otherwise, it'll
449
- * get started when the previous job's frame done interrupt
503
+ * get started when the previous job's flush done interrupt
450
504
* occurs.
451
505
*/
452
- if (vc4_first_job (vc4 ) == exec ) {
453
- vc4_submit_next_job (dev );
506
+ if (vc4_first_bin_job (vc4 ) == exec ) {
507
+ vc4_submit_next_bin_job (dev );
454
508
vc4_queue_hangcheck (dev );
455
509
}
456
510
@@ -859,7 +913,8 @@ vc4_gem_init(struct drm_device *dev)
859
913
{
860
914
struct vc4_dev * vc4 = to_vc4_dev (dev );
861
915
862
- INIT_LIST_HEAD (& vc4 -> job_list );
916
+ INIT_LIST_HEAD (& vc4 -> bin_job_list );
917
+ INIT_LIST_HEAD (& vc4 -> render_job_list );
863
918
INIT_LIST_HEAD (& vc4 -> job_done_list );
864
919
INIT_LIST_HEAD (& vc4 -> seqno_cb_list );
865
920
spin_lock_init (& vc4 -> job_lock );
0 commit comments