32
32
// sources such as interrupts and UNIX signal handlers).
33
33
void MICROPY_WRAP_MP_SCHED_EXCEPTION (mp_sched_exception )(mp_obj_t exc ) {
34
34
MP_STATE_MAIN_THREAD (mp_pending_exception ) = exc ;
35
- #if MICROPY_ENABLE_SCHEDULER
35
+
36
+ #if MICROPY_ENABLE_SCHEDULER && !MICROPY_PY_THREAD
37
+ // Optimisation for the case where we have scheduler but no threading.
38
+ // Allows the VM to do a single check to exclude both pending exception
39
+ // and queued tasks.
36
40
if (MP_STATE_VM (sched_state ) == MP_SCHED_IDLE ) {
37
41
MP_STATE_VM (sched_state ) = MP_SCHED_PENDING ;
38
42
}
@@ -62,33 +66,19 @@ static inline bool mp_sched_empty(void) {
62
66
return mp_sched_num_pending () == 0 ;
63
67
}
64
68
65
- // A variant of this is inlined in the VM at the pending exception check
66
- void mp_handle_pending (bool raise_exc ) {
67
- if (MP_STATE_VM (sched_state ) == MP_SCHED_PENDING ) {
68
- mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
69
- // Re-check state is still pending now that we're in the atomic section.
70
- if (MP_STATE_VM (sched_state ) == MP_SCHED_PENDING ) {
71
- mp_obj_t obj = MP_STATE_THREAD (mp_pending_exception );
72
- if (obj != MP_OBJ_NULL ) {
73
- MP_STATE_THREAD (mp_pending_exception ) = MP_OBJ_NULL ;
74
- if (!mp_sched_num_pending ()) {
75
- MP_STATE_VM (sched_state ) = MP_SCHED_IDLE ;
76
- }
77
- if (raise_exc ) {
78
- MICROPY_END_ATOMIC_SECTION (atomic_state );
79
- nlr_raise (obj );
80
- }
81
- }
82
- mp_handle_pending_tail (atomic_state );
83
- } else {
84
- MICROPY_END_ATOMIC_SECTION (atomic_state );
85
- }
69
+ // This function should only be called by mp_handle_pending, or by the VM's
70
+ // inlined version, after checking that the scheduler state is pending.
71
+ void mp_sched_run_pending (void ) {
72
+ mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
73
+ if (MP_STATE_VM (sched_state ) != MP_SCHED_PENDING ) {
74
+ // Something else (e.g. hard IRQ) locked the scheduler while we
75
+ // acquired the lock.
76
+ MICROPY_END_ATOMIC_SECTION (atomic_state );
77
+ return ;
86
78
}
87
- }
88
79
89
- // This function should only be called by mp_handle_pending,
90
- // or by the VM's inlined version of that function.
91
- void mp_handle_pending_tail (mp_uint_t atomic_state ) {
80
+ // Equivalent to mp_sched_lock(), but we're already in the atomic
81
+ // section and know that we're pending.
92
82
MP_STATE_VM (sched_state ) = MP_SCHED_LOCKED ;
93
83
94
84
#if MICROPY_SCHEDULER_STATIC_NODES
@@ -118,14 +108,21 @@ void mp_handle_pending_tail(mp_uint_t atomic_state) {
118
108
MICROPY_END_ATOMIC_SECTION (atomic_state );
119
109
}
120
110
111
+ // Restore MP_STATE_VM(sched_state) to idle (or pending if there are still
112
+ // tasks in the queue).
121
113
mp_sched_unlock ();
122
114
}
123
115
116
+ // Locking the scheduler prevents tasks from executing (does not prevent new
117
+ // tasks from being added). We lock the scheduler while executing scheduled
118
+ // tasks and also in hard interrupts or GC finalisers.
124
119
void mp_sched_lock (void ) {
125
120
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
126
121
if (MP_STATE_VM (sched_state ) < 0 ) {
122
+ // Already locked, increment lock (recursive lock).
127
123
-- MP_STATE_VM (sched_state );
128
124
} else {
125
+ // Pending or idle.
129
126
MP_STATE_VM (sched_state ) = MP_SCHED_LOCKED ;
130
127
}
131
128
MICROPY_END_ATOMIC_SECTION (atomic_state );
@@ -135,12 +132,17 @@ void mp_sched_unlock(void) {
135
132
mp_uint_t atomic_state = MICROPY_BEGIN_ATOMIC_SECTION ();
136
133
assert (MP_STATE_VM (sched_state ) < 0 );
137
134
if (++ MP_STATE_VM (sched_state ) == 0 ) {
138
- // vm became unlocked
139
- if (MP_STATE_THREAD (mp_pending_exception ) != MP_OBJ_NULL
135
+ // Scheduler became unlocked. Check if there are still tasks in the
136
+ // queue and set sched_state accordingly.
137
+ if (
138
+ #if !MICROPY_PY_THREAD
139
+ // See optimisation in mp_sched_exception.
140
+ MP_STATE_THREAD (mp_pending_exception ) != MP_OBJ_NULL ||
141
+ #endif
140
142
#if MICROPY_SCHEDULER_STATIC_NODES
141
- || MP_STATE_VM (sched_head ) != NULL
143
+ MP_STATE_VM (sched_head ) != NULL ||
142
144
#endif
143
- || mp_sched_num_pending ()) {
145
+ mp_sched_num_pending ()) {
144
146
MP_STATE_VM (sched_state ) = MP_SCHED_PENDING ;
145
147
} else {
146
148
MP_STATE_VM (sched_state ) = MP_SCHED_IDLE ;
@@ -196,9 +198,9 @@ bool mp_sched_schedule_node(mp_sched_node_t *node, mp_sched_callback_t callback)
196
198
}
197
199
#endif
198
200
199
- #else // MICROPY_ENABLE_SCHEDULER
201
+ #endif // MICROPY_ENABLE_SCHEDULER
200
202
201
- // A variant of this is inlined in the VM at the pending exception check
203
+ // This is also inlined in the VM at the pending exception check.
202
204
void mp_handle_pending (bool raise_exc ) {
203
205
if (MP_STATE_THREAD (mp_pending_exception ) != MP_OBJ_NULL ) {
204
206
mp_obj_t obj = MP_STATE_THREAD (mp_pending_exception );
@@ -207,6 +209,9 @@ void mp_handle_pending(bool raise_exc) {
207
209
nlr_raise (obj );
208
210
}
209
211
}
212
+ #if MICROPY_ENABLE_SCHEDULER
213
+ if (MP_STATE_VM (sched_state ) == MP_SCHED_PENDING ) {
214
+ mp_sched_run_pending ();
215
+ }
216
+ #endif
210
217
}
211
-
212
- #endif // MICROPY_ENABLE_SCHEDULER
0 commit comments