@@ -415,16 +415,16 @@ static void vmw_cmdbuf_ctx_process(struct vmw_cmdbuf_man *man,
415
415
*
416
416
* Calls vmw_cmdbuf_ctx_process() on all contexts. If any context has
417
417
* command buffers left that are not submitted to hardware, Make sure
418
- * IRQ handling is turned on. Otherwise, make sure it's turned off. This
419
- * function may return -EAGAIN to indicate it should be rerun due to
420
- * possibly missed IRQs if IRQs has just been turned on.
418
+ * IRQ handling is turned on. Otherwise, make sure it's turned off.
421
419
*/
422
- static int vmw_cmdbuf_man_process (struct vmw_cmdbuf_man * man )
420
+ static void vmw_cmdbuf_man_process (struct vmw_cmdbuf_man * man )
423
421
{
424
- int notempty = 0 ;
422
+ int notempty ;
425
423
struct vmw_cmdbuf_context * ctx ;
426
424
int i ;
427
425
426
+ retry :
427
+ notempty = 0 ;
428
428
for_each_cmdbuf_ctx (man , i , ctx )
429
429
vmw_cmdbuf_ctx_process (man , ctx , & notempty );
430
430
@@ -440,10 +440,8 @@ static int vmw_cmdbuf_man_process(struct vmw_cmdbuf_man *man)
440
440
man -> irq_on = true;
441
441
442
442
/* Rerun in case we just missed an irq. */
443
- return - EAGAIN ;
443
+ goto retry ;
444
444
}
445
-
446
- return 0 ;
447
445
}
448
446
449
447
/**
@@ -468,8 +466,7 @@ static void vmw_cmdbuf_ctx_add(struct vmw_cmdbuf_man *man,
468
466
header -> cb_context = cb_context ;
469
467
list_add_tail (& header -> list , & man -> ctx [cb_context ].submitted );
470
468
471
- if (vmw_cmdbuf_man_process (man ) == - EAGAIN )
472
- vmw_cmdbuf_man_process (man );
469
+ vmw_cmdbuf_man_process (man );
473
470
}
474
471
475
472
/**
@@ -488,8 +485,7 @@ static void vmw_cmdbuf_man_tasklet(unsigned long data)
488
485
struct vmw_cmdbuf_man * man = (struct vmw_cmdbuf_man * ) data ;
489
486
490
487
spin_lock (& man -> lock );
491
- if (vmw_cmdbuf_man_process (man ) == - EAGAIN )
492
- (void ) vmw_cmdbuf_man_process (man );
488
+ vmw_cmdbuf_man_process (man );
493
489
spin_unlock (& man -> lock );
494
490
}
495
491
@@ -507,6 +503,7 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
507
503
struct vmw_cmdbuf_man * man =
508
504
container_of (work , struct vmw_cmdbuf_man , work );
509
505
struct vmw_cmdbuf_header * entry , * next ;
506
+ uint32_t dummy ;
510
507
bool restart = false;
511
508
512
509
spin_lock_bh (& man -> lock );
@@ -523,6 +520,8 @@ static void vmw_cmdbuf_work_func(struct work_struct *work)
523
520
if (restart && vmw_cmdbuf_startstop (man , true))
524
521
DRM_ERROR ("Failed restarting command buffer context 0.\n" );
525
522
523
+ /* Send a new fence in case one was removed */
524
+ vmw_fifo_send_fence (man -> dev_priv , & dummy );
526
525
}
527
526
528
527
/**
@@ -682,7 +681,7 @@ static bool vmw_cmdbuf_try_alloc(struct vmw_cmdbuf_man *man,
682
681
DRM_MM_SEARCH_DEFAULT ,
683
682
DRM_MM_CREATE_DEFAULT );
684
683
if (ret ) {
685
- ( void ) vmw_cmdbuf_man_process (man );
684
+ vmw_cmdbuf_man_process (man );
686
685
ret = drm_mm_insert_node_generic (& man -> mm , info -> node ,
687
686
info -> page_size , 0 , 0 ,
688
687
DRM_MM_SEARCH_DEFAULT ,
@@ -1168,7 +1167,14 @@ int vmw_cmdbuf_set_pool_size(struct vmw_cmdbuf_man *man,
1168
1167
drm_mm_init (& man -> mm , 0 , size >> PAGE_SHIFT );
1169
1168
1170
1169
man -> has_pool = true;
1171
- man -> default_size = default_size ;
1170
+
1171
+ /*
1172
+ * For now, set the default size to VMW_CMDBUF_INLINE_SIZE to
1173
+ * prevent deadlocks from happening when vmw_cmdbuf_space_pool()
1174
+ * needs to wait for space and we block on further command
1175
+ * submissions to be able to free up space.
1176
+ */
1177
+ man -> default_size = VMW_CMDBUF_INLINE_SIZE ;
1172
1178
DRM_INFO ("Using command buffers with %s pool.\n" ,
1173
1179
(man -> using_mob ) ? "MOB" : "DMA" );
1174
1180
0 commit comments