@@ -245,18 +245,29 @@ struct special_linkstruct {
245
245
246
246
typedef void * PySpecial_lock_type ;
247
247
248
+ #define PRIOS 3
249
+ #define PRIO_OVERFLOW 10
250
+
248
251
typedef struct {
249
252
PySpecial_lock_type the_lock ;
250
- struct special_linkstruct * wait_queue , * wait_last , * free_queue ;
253
+ struct special_linkstruct * wait_queue [PRIOS ], * wait_last [PRIOS ], * free_queue , * current ;
254
+ int overflow [PRIOS ];
255
+ int max_prio ;
251
256
} PySpecialSemaphore ;
252
257
253
258
void
254
259
PySpecial_init (PySpecialSemaphore * s )
255
260
{
261
+ int i ;
256
262
s -> the_lock = PyThread_mutex_alloc ();
257
- s -> wait_queue = NULL ;
258
- s -> wait_last = NULL ;
263
+ for (i = 0 ; i < PRIOS ; i ++ ) {
264
+ s -> wait_queue [i ] = NULL ;
265
+ s -> wait_last [i ] = NULL ;
266
+ s -> overflow [i ] = 0 ;
267
+ }
259
268
s -> free_queue = NULL ;
269
+ s -> current = NULL ;
270
+ s -> max_prio = PRIOS ;
260
271
}
261
272
262
273
static PySpecialSemaphore * interpreter_lock = NULL ; /* This is the GIL */
@@ -286,10 +297,54 @@ static struct special_linkstruct *allocate_special_linkstruct(void)
286
297
return ls ;
287
298
}
288
299
289
- static void PySpecial_Lock (PySpecialSemaphore * s )
300
+ static struct special_linkstruct * special_pick_candidate (PySpecialSemaphore * s )
301
+ {
302
+ struct special_linkstruct * next = NULL ;
303
+
304
+ if (s -> max_prio == PRIOS )
305
+ return NULL ;
306
+
307
+ int m = PRIOS ;
308
+ int p = s -> max_prio ;
309
+ for (; p < PRIOS ; p ++ ) {
310
+ if (s -> wait_queue [p ]) {
311
+ if (p < m )
312
+ m = p ;
313
+ if (++ s -> overflow [p ] < PRIO_OVERFLOW ) {
314
+ next = s -> wait_queue [p ];
315
+ break ;
316
+ }
317
+ while (s -> overflow [p ] >= PRIO_OVERFLOW )
318
+ s -> overflow [p ] -= PRIO_OVERFLOW ;
319
+ }
320
+ }
321
+ /* might have skipped a candidate because of the overflow counters */
322
+ if (m < PRIOS && !next ) {
323
+ next = s -> wait_queue [m ];
324
+ p = m ;
325
+ }
326
+
327
+ if (next ) {
328
+ s -> current = next ;
329
+ s -> wait_queue [p ] = next -> queue_next ;
330
+ next -> queue_next = NULL ;
331
+ PyThread_cond_signal (next -> wait );
332
+ }
333
+
334
+ /* update exact max_prio */
335
+ for (; m < PRIOS ; m ++ )
336
+ if (s -> wait_queue [m ])
337
+ break ;
338
+ s -> max_prio = m ;
339
+
340
+ return next ;
341
+ }
342
+
343
+ static void PySpecial_Lock (PySpecialSemaphore * s , int prio )
290
344
{
291
345
struct special_linkstruct * ls ;
292
346
347
+ assert (prio >= 0 && prio <= PRIOS );
293
348
PyThread_mutex_lock (s -> the_lock );
294
349
295
350
if (!s -> free_queue )
@@ -298,43 +353,62 @@ static void PySpecial_Lock(PySpecialSemaphore *s)
298
353
ls = s -> free_queue ;
299
354
s -> free_queue = ls -> free_next ;
300
355
301
- if (!s -> wait_queue )
302
- {
303
- ls -> in_use = 1 ;
304
- s -> wait_queue = ls ;
305
- s -> wait_last = ls ;
306
- PyThread_mutex_unlock (s -> the_lock );
307
- return ;
356
+ if (!s -> current ) {
357
+ /* lock is free, find someone to run */
358
+ struct special_linkstruct * next = NULL ;
359
+ if (s -> max_prio > prio && (s -> max_prio == PRIOS || ++ s -> overflow [prio ] < PRIO_OVERFLOW ))
360
+ ;
361
+ else {
362
+ while (s -> overflow [prio ] >= PRIO_OVERFLOW )
363
+ s -> overflow [prio ] -= PRIO_OVERFLOW ;
364
+ /* find and schedule another candidate */
365
+ next = special_pick_candidate (s );
366
+ }
367
+
368
+ if (!next ) {
369
+ /* noone else there - we can run */
370
+ assert (!ls -> in_use );
371
+ ls -> in_use = 1 ;
372
+ s -> current = ls ;
373
+ PyThread_mutex_unlock (s -> the_lock );
374
+ return ;
375
+ }
308
376
}
309
377
310
- assert (s -> wait_queue != ls );
311
- assert (s -> wait_last != ls );
312
- assert (s -> wait_last -> queue_next == NULL );
313
- assert (!ls -> in_use );
314
- s -> wait_last -> queue_next = ls ;
315
- s -> wait_last = ls ;
378
+ /* we need to wait */
379
+ assert (s -> wait_queue [prio ] != ls );
380
+ if (s -> wait_queue [prio ]) {
381
+ assert (s -> wait_last [prio ] != ls );
382
+ assert (s -> wait_last [prio ]-> queue_next == NULL );
383
+ s -> wait_last [prio ]-> queue_next = ls ;
384
+ s -> wait_last [prio ] = ls ;
385
+ } else {
386
+ s -> wait_queue [prio ] = ls ;
387
+ s -> wait_last [prio ] = ls ;
388
+ }
316
389
ls -> in_use = 1 ;
390
+ if (prio < s -> max_prio )
391
+ s -> max_prio = prio ;
317
392
318
- while (s -> wait_queue != ls )
393
+ while (s -> current != ls )
319
394
PyThread_cond_wait (ls -> wait , s -> the_lock );
320
395
321
396
PyThread_mutex_unlock (s -> the_lock );
322
397
}
323
398
324
399
static void PySpecial_Unlock (PySpecialSemaphore * s )
325
400
{
326
- struct special_linkstruct * ls ;
401
+ struct special_linkstruct * ls , * next ;
327
402
328
403
PyThread_mutex_lock (s -> the_lock );
329
- ls = s -> wait_queue ;
404
+ ls = s -> current ;
330
405
assert (ls -> in_use );
331
406
332
- s -> wait_queue = ls -> queue_next ;
333
- if (s -> wait_queue )
334
- {
335
- ls -> queue_next = NULL ;
336
- PyThread_cond_signal (s -> wait_queue -> wait );
337
- }
407
+ next = special_pick_candidate (s );
408
+
409
+ if (!next )
410
+ s -> current = NULL ;
411
+
338
412
ls -> in_use = 0 ;
339
413
340
414
ls -> free_next = s -> free_queue ;
@@ -349,14 +423,14 @@ PyEval_InitThreads(void)
349
423
if (interpreter_lock )
350
424
return ;
351
425
interpreter_lock = allocate_special ();
352
- PySpecial_Lock (interpreter_lock );
426
+ PySpecial_Lock (interpreter_lock , 0 );
353
427
main_thread = PyThread_get_thread_ident ();
354
428
}
355
429
356
430
void
357
431
PyEval_AcquireLock (void )
358
432
{
359
- PySpecial_Lock (interpreter_lock );
433
+ PySpecial_Lock (interpreter_lock , 1 );
360
434
}
361
435
362
436
void
@@ -372,7 +446,7 @@ PyEval_AcquireThread(PyThreadState *tstate)
372
446
Py_FatalError ("PyEval_AcquireThread: NULL new thread state" );
373
447
/* Check someone has called PyEval_InitThreads() to create the lock */
374
448
assert (interpreter_lock );
375
- PySpecial_Lock (interpreter_lock );
449
+ PySpecial_Lock (interpreter_lock , tstate -> thread_prio );
376
450
if (PyThreadState_Swap (tstate ) != NULL )
377
451
Py_FatalError (
378
452
"PyEval_AcquireThread: non-NULL old thread state" );
@@ -407,7 +481,7 @@ PyEval_ReInitThreads(void)
407
481
create a new lock and waste a little bit of memory */
408
482
interpreter_lock = allocate_special ();
409
483
pending_lock = PyThread_allocate_lock ();
410
- PySpecial_Lock (interpreter_lock );
484
+ PySpecial_Lock (interpreter_lock , 0 );
411
485
main_thread = PyThread_get_thread_ident ();
412
486
413
487
/* Update the threading module with the new state.
@@ -454,7 +528,7 @@ PyEval_RestoreThread(PyThreadState *tstate)
454
528
#ifdef WITH_THREAD
455
529
if (interpreter_lock ) {
456
530
int err = errno ;
457
- PySpecial_Lock (interpreter_lock );
531
+ PySpecial_Lock (interpreter_lock , tstate -> thread_prio );
458
532
errno = err ;
459
533
}
460
534
#endif
@@ -760,6 +834,8 @@ static int _Py_TracingPossible = 0;
760
834
per thread, now just a pair o' globals */
761
835
int _Py_CheckInterval = 100 ;
762
836
volatile int _Py_Ticker = 0 ; /* so that we hit a "tick" first thing */
837
+ int _ticker_overflow = 0 ; /* for switching to higher-prio thread */
838
+ int _ticker_overflow_lower = 0 ; /* for switching to lower-prio thread */
763
839
764
840
PyObject *
765
841
PyEval_EvalCode (PyCodeObject * co , PyObject * globals , PyObject * locals )
@@ -1217,23 +1293,29 @@ PyEval_EvalFrameEx(PyFrameObject *f, int throwflag)
1217
1293
_Py_Ticker = 0 ;
1218
1294
}
1219
1295
#ifdef WITH_THREAD
1220
- if (interpreter_lock && interpreter_lock -> wait_queue ) {
1221
- /* Give another thread a chance */
1296
+ if (interpreter_lock &&
1297
+ (interpreter_lock -> max_prio < tstate -> thread_prio && ++ _ticker_overflow < PRIO_OVERFLOW ||
1298
+ interpreter_lock -> max_prio != PRIOS && ++ _ticker_overflow_lower >= PRIO_OVERFLOW )) {
1299
+ /* Give another thread a chance */
1222
1300
1223
1301
if (PyThreadState_Swap (NULL ) != tstate )
1224
1302
Py_FatalError ("ceval: tstate mix-up" );
1225
1303
PySpecial_Unlock (interpreter_lock );
1226
1304
1227
1305
/* Other threads may run now */
1228
1306
1229
- PySpecial_Lock (interpreter_lock );
1307
+ PySpecial_Lock (interpreter_lock , tstate -> thread_prio );
1230
1308
1231
1309
if (PyThreadState_Swap (tstate ) != NULL )
1232
1310
Py_FatalError ("ceval: orphan tstate" );
1233
1311
1234
1312
}
1235
1313
1236
- if (interpreter_lock ) {
1314
+ if (interpreter_lock ) {
1315
+ while (_ticker_overflow >= PRIO_OVERFLOW )
1316
+ _ticker_overflow -= PRIO_OVERFLOW ;
1317
+ while (_ticker_overflow_lower >= PRIO_OVERFLOW )
1318
+ _ticker_overflow_lower -= PRIO_OVERFLOW ;
1237
1319
/* Check for thread interrupts */
1238
1320
1239
1321
if (tstate -> async_exc != NULL ) {
0 commit comments