@@ -125,6 +125,14 @@ typedef struct _Render {
125
125
VipsSinkNotify notify ; /* Tell caller about paints here */
126
126
void * a ;
127
127
128
+ /* This render has it's own threadpool and is not on the shared list.
129
+ *
130
+ * This private threadpool needs a semaphore to wait on for dirty tiles
131
+ * to arrive.
132
+ */
133
+ gboolean private_threadpool ;
134
+ VipsSemaphore dirty_sem ;
135
+
128
136
/* Lock here before reading or modifying the tile structure.
129
137
*/
130
138
GMutex lock ;
@@ -246,6 +254,9 @@ render_free(Render *render)
246
254
#endif
247
255
g_mutex_clear (& render -> lock );
248
256
257
+ if (render -> private_threadpool )
258
+ vips_semaphore_destroy (& render -> dirty_sem );
259
+
249
260
vips_slist_map2 (render -> all , (VipsSListMap2Fn ) tile_free , NULL , NULL );
250
261
VIPS_FREEF (g_slist_free , render -> all );
251
262
render -> ntiles = 0 ;
@@ -384,6 +395,7 @@ render_allocate(VipsThreadState *state, void *a, gboolean *stop)
384
395
{
385
396
Render * render = (Render * ) a ;
386
397
RenderThreadState * rstate = (RenderThreadState * ) state ;
398
+
387
399
Tile * tile ;
388
400
389
401
g_mutex_lock (& render -> lock );
@@ -468,22 +480,28 @@ render_dirty_sort(Render *a, Render *b, void *user_data)
468
480
static void
469
481
render_dirty_put (Render * render )
470
482
{
471
- g_mutex_lock (& render_dirty_lock );
472
-
473
- if (render -> dirty ) {
474
- if (!g_slist_find (render_dirty_all , render )) {
475
- render_dirty_all = g_slist_prepend (render_dirty_all , render );
476
- render_dirty_all = g_slist_sort (render_dirty_all ,
477
- (GCompareFunc ) render_dirty_sort );
483
+ if (render -> private_threadpool )
484
+ // set our private renderer going
485
+ vips_semaphore_up (& render -> dirty_sem );
486
+ else {
487
+ // add to the worklist for thwe global renderer
488
+ g_mutex_lock (& render_dirty_lock );
478
489
479
- /* Tell the bg render thread we have one more dirty
480
- * render on there.
481
- */
482
- vips_semaphore_up (& n_render_dirty_sem );
490
+ if (render -> dirty ) {
491
+ if (!g_slist_find (render_dirty_all , render )) {
492
+ render_dirty_all = g_slist_prepend (render_dirty_all , render );
493
+ render_dirty_all = g_slist_sort (render_dirty_all ,
494
+ (GCompareFunc ) render_dirty_sort );
495
+
496
+ /* Tell the bg render thread we have one more dirty
497
+ * render on there.
498
+ */
499
+ vips_semaphore_up (& n_render_dirty_sem );
500
+ }
483
501
}
484
- }
485
502
486
- g_mutex_unlock (& render_dirty_lock );
503
+ g_mutex_unlock (& render_dirty_lock );
504
+ }
487
505
}
488
506
489
507
static guint
@@ -519,13 +537,21 @@ render_close_cb(VipsImage *image, Render *render)
519
537
*/
520
538
render -> shutdown = TRUE;
521
539
522
- render_unref (render );
540
+ if (render -> private_threadpool ) {
541
+ /* Nudge the bg thread (if any) for this pool.
542
+ */
543
+ VIPS_DEBUG_MSG_GREEN ("render_close_cb: nudge private worker\n" );
544
+ vips_semaphore_up (& render -> dirty_sem );
545
+ }
546
+ else {
547
+ /* If this render is being worked on, we want to jog the bg thread,
548
+ * make it drop it's ref and think again.
549
+ */
550
+ VIPS_DEBUG_MSG_GREEN ("render_close_cb: reschedule\n" );
551
+ render_reschedule = TRUE;
552
+ }
523
553
524
- /* If this render is being worked on, we want to jog the bg thread,
525
- * make it drop it's ref and think again.
526
- */
527
- VIPS_DEBUG_MSG_GREEN ("render_close_cb: reschedule\n" );
528
- render_reschedule = TRUE;
554
+ render_unref (render );
529
555
}
530
556
531
557
static Render *
@@ -564,6 +590,11 @@ render_new(VipsImage *in, VipsImage *out, VipsImage *mask,
564
590
render -> notify = notify ;
565
591
render -> a = a ;
566
592
593
+ if (render -> priority < 0 ) {
594
+ render -> private_threadpool = TRUE;
595
+ vips_semaphore_init (& render -> dirty_sem , 0 , "dirty_sem" );
596
+ }
597
+
567
598
g_mutex_init (& render -> lock );
568
599
569
600
render -> all = NULL ;
@@ -1025,6 +1056,58 @@ render_thread_main(void *client)
1025
1056
return NULL ;
1026
1057
}
1027
1058
1059
+ static int
1060
+ render_allocate_private (VipsThreadState * state , void * a , gboolean * stop )
1061
+ {
1062
+ Render * render = (Render * ) a ;
1063
+ RenderThreadState * rstate = (RenderThreadState * ) state ;
1064
+
1065
+ /* Wait for a dirty tile to arrive on this render.
1066
+ */
1067
+ vips_semaphore_down (& render -> dirty_sem );
1068
+
1069
+ /* The mask or image is closing, we must exit.
1070
+ */
1071
+ if (render -> shutdown ) {
1072
+ * stop = TRUE;
1073
+ rstate -> tile = NULL ;
1074
+ return 0 ;
1075
+ }
1076
+
1077
+ /* Get the dirty tile, if any.
1078
+ */
1079
+ g_mutex_lock (& render -> lock );
1080
+ rstate -> tile = render_tile_dirty_get (render );
1081
+ VIPS_DEBUG_MSG_AMBER ("render_allocate_private: allocated tile %p\n" ,
1082
+ rstate -> tile );
1083
+ g_mutex_unlock (& render -> lock );
1084
+
1085
+ return 0 ;
1086
+ }
1087
+
1088
+ /* A worker for a private render.
1089
+ */
1090
+ static void
1091
+ render_work_private (void * data , void * null )
1092
+ {
1093
+ VIPS_DEBUG_MSG_AMBER ("render_work_private: start\n" );
1094
+
1095
+ Render * render = (Render * ) data ;
1096
+
1097
+ // this will quit on ->shutdown == TRUE
1098
+ if (vips_threadpool_run (render -> in ,
1099
+ render_thread_state_new ,
1100
+ render_allocate_private ,
1101
+ render_work ,
1102
+ NULL ,
1103
+ render ))
1104
+ VIPS_DEBUG_MSG_RED ("render_work_private: threadpool_run failed\n" );
1105
+
1106
+ render_unref (render );
1107
+
1108
+ VIPS_DEBUG_MSG_AMBER ("render_work_private: stop\n" );
1109
+ }
1110
+
1028
1111
static void *
1029
1112
vips__sink_screen_once (void * data )
1030
1113
{
@@ -1052,34 +1135,33 @@ vips__sink_screen_once(void *data)
1052
1135
* @notify_fn: (scope call) (nullable): pixels are ready notification callback
1053
1136
* @a: (closure notify_fn) (nullable): client data for callback
1054
1137
*
1055
- * This operation renders @in in the background, making pixels available on
1056
- * @out as they are calculated. The @notify_fn callback is run every time a new
1057
- * set of pixels are available. Calculated pixels are kept in a cache with
1058
- * tiles sized @tile_width by @tile_height pixels and with at most @max_tiles
1059
- * tiles.
1060
- * If @max_tiles is -1, the cache is of unlimited size (up to the maximum image
1061
- * size).
1062
- * The @mask image is a one-band uchar image and has 255 for pixels which are
1063
- * currently in cache and 0 for uncalculated pixels.
1138
+ * This operation renders @in in the background, making pixels available
1139
+ * on @out as they are calculated. The @notify_fn callback is run every
1140
+ * time a new set of pixels are available. Calculated pixels are kept in
1141
+ * a cache with tiles sized @tile_width by @tile_height pixels and with at
1142
+ * most @max_tiles tiles. If @max_tiles is -1, the cache is of unlimited
1143
+ * size (up to the maximum image * size). The @mask image is a one-band
1144
+ * uchar image and has 255 for pixels which are currently in cache and 0
1145
+ * for uncalculated pixels.
1146
+ *
1147
+ * Renders with a positive priority are assumed to be large, gh-priority,
1148
+ * foreground images. Although there can be many of these, only one is ever
1149
+ * active, to avoid overcommitting threads.
1064
1150
*
1065
- * Only a single sink is calculated at any one time, though many may be
1066
- * alive. Use @priority to indicate which renders are more important:
1067
- * zero means normal
1068
- * priority, negative numbers are low priority, positive numbers high
1069
- * priority.
1151
+ * Renders with a negative priority are assumed to be small, thumbnail images
1152
+ * consisting of a single tile. Single tile images are effectively
1153
+ * single-threaded, so all these renders are evaluated together.
1070
1154
*
1071
1155
* Calls to vips_region_prepare() on @out return immediately and hold
1072
- * whatever is
1073
- * currently in cache for that #VipsRect (check @mask to see which parts of the
1074
- * #VipsRect are valid). Any pixels in the #VipsRect which are not in
1075
- * cache are added
1076
- * to a queue, and the @notify_fn callback will trigger when those pixels are
1077
- * ready.
1156
+ * whatever is currently in cache for that #VipsRect (check @mask to see
1157
+ * which parts of the #VipsRect are valid). Any pixels in the #VipsRect
1158
+ * which are not in cache are added to a queue, and the @notify_fn
1159
+ * callback will trigger when those pixels are ready.
1078
1160
*
1079
1161
* The @notify_fn callback is run from one of the background threads. In the
1080
- * callback
1081
- * you need to somehow send a message to the main thread that the pixels are
1082
- * ready. In a glib-based application, this is easily done with g_idle_add().
1162
+ * callback you need to somehow send a message to the main thread that the
1163
+ * pixels are ready. In a glib-based application, this is easily done with
1164
+ * g_idle_add().
1083
1165
*
1084
1166
* If @notify_fn is %NULL then vips_sink_screen() runs synchronously.
1085
1167
* vips_region_prepare() on @out will always block until the pixels have been
@@ -1129,6 +1211,11 @@ vips_sink_screen(VipsImage *in, VipsImage *out, VipsImage *mask,
1129
1211
1130
1212
VIPS_DEBUG_MSG ("vips_sink_screen: max = %d, %p\n" , max_tiles , render );
1131
1213
1214
+ if (render -> private_threadpool ) {
1215
+ render_ref (render );
1216
+ vips_thread_execute ("private threadpool" , render_work_private , render );
1217
+ }
1218
+
1132
1219
if (vips_image_generate (out ,
1133
1220
vips_start_one , image_fill , vips_stop_one , in , render ))
1134
1221
return -1 ;
0 commit comments