25
25
26
26
struct acpi_gpio_event {
27
27
struct list_head node ;
28
- struct list_head initial_sync_list ;
29
28
acpi_handle handle ;
30
29
unsigned int pin ;
31
30
unsigned int irq ;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
49
48
struct mutex conn_lock ;
50
49
struct gpio_chip * chip ;
51
50
struct list_head events ;
51
+ struct list_head deferred_req_irqs_list_entry ;
52
52
};
53
53
54
- static LIST_HEAD (acpi_gpio_initial_sync_list );
55
- static DEFINE_MUTEX (acpi_gpio_initial_sync_list_lock );
54
+ /*
55
+ * For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
56
+ * (so builtin drivers) we register the ACPI GpioInt event handlers from a
57
+ * late_initcall_sync handler, so that other builtin drivers can register their
58
+ * OpRegions before the event handlers can run. This list contains gpiochips
59
+ * for which the acpi_gpiochip_request_interrupts() has been deferred.
60
+ */
61
+ static DEFINE_MUTEX (acpi_gpio_deferred_req_irqs_lock );
62
+ static LIST_HEAD (acpi_gpio_deferred_req_irqs_list );
63
+ static bool acpi_gpio_deferred_req_irqs_done ;
56
64
57
65
static int acpi_gpiochip_find (struct gpio_chip * gc , void * data )
58
66
{
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
89
97
return gpiochip_get_desc (chip , pin );
90
98
}
91
99
92
- static void acpi_gpio_add_to_initial_sync_list (struct acpi_gpio_event * event )
93
- {
94
- mutex_lock (& acpi_gpio_initial_sync_list_lock );
95
- list_add (& event -> initial_sync_list , & acpi_gpio_initial_sync_list );
96
- mutex_unlock (& acpi_gpio_initial_sync_list_lock );
97
- }
98
-
99
- static void acpi_gpio_del_from_initial_sync_list (struct acpi_gpio_event * event )
100
- {
101
- mutex_lock (& acpi_gpio_initial_sync_list_lock );
102
- if (!list_empty (& event -> initial_sync_list ))
103
- list_del_init (& event -> initial_sync_list );
104
- mutex_unlock (& acpi_gpio_initial_sync_list_lock );
105
- }
106
-
107
100
static irqreturn_t acpi_gpio_irq_handler (int irq , void * data )
108
101
{
109
102
struct acpi_gpio_event * event = data ;
@@ -186,7 +179,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
186
179
187
180
gpiod_direction_input (desc );
188
181
189
- value = gpiod_get_value (desc );
182
+ value = gpiod_get_value_cansleep (desc );
190
183
191
184
ret = gpiochip_lock_as_irq (chip , pin );
192
185
if (ret ) {
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
229
222
event -> irq = irq ;
230
223
event -> pin = pin ;
231
224
event -> desc = desc ;
232
- INIT_LIST_HEAD (& event -> initial_sync_list );
233
225
234
226
ret = request_threaded_irq (event -> irq , NULL , handler , irqflags ,
235
227
"ACPI:Event" , event );
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
251
243
* may refer to OperationRegions from other (builtin) drivers which
252
244
* may be probed after us.
253
245
*/
254
- if (handler == acpi_gpio_irq_handler &&
255
- (((irqflags & IRQF_TRIGGER_RISING ) && value == 1 ) ||
256
- ((irqflags & IRQF_TRIGGER_FALLING ) && value == 0 )))
257
- acpi_gpio_add_to_initial_sync_list (event );
246
+ if (((irqflags & IRQF_TRIGGER_RISING ) && value == 1 ) ||
247
+ ((irqflags & IRQF_TRIGGER_FALLING ) && value == 0 ))
248
+ handler (event -> irq , event );
258
249
259
250
return AE_OK ;
260
251
@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
283
274
struct acpi_gpio_chip * acpi_gpio ;
284
275
acpi_handle handle ;
285
276
acpi_status status ;
277
+ bool defer ;
286
278
287
279
if (!chip -> parent || !chip -> to_irq )
288
280
return ;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
295
287
if (ACPI_FAILURE (status ))
296
288
return ;
297
289
290
+ mutex_lock (& acpi_gpio_deferred_req_irqs_lock );
291
+ defer = !acpi_gpio_deferred_req_irqs_done ;
292
+ if (defer )
293
+ list_add (& acpi_gpio -> deferred_req_irqs_list_entry ,
294
+ & acpi_gpio_deferred_req_irqs_list );
295
+ mutex_unlock (& acpi_gpio_deferred_req_irqs_lock );
296
+
297
+ if (defer )
298
+ return ;
299
+
298
300
acpi_walk_resources (handle , "_AEI" ,
299
301
acpi_gpiochip_request_interrupt , acpi_gpio );
300
302
}
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
325
327
if (ACPI_FAILURE (status ))
326
328
return ;
327
329
330
+ mutex_lock (& acpi_gpio_deferred_req_irqs_lock );
331
+ if (!list_empty (& acpi_gpio -> deferred_req_irqs_list_entry ))
332
+ list_del_init (& acpi_gpio -> deferred_req_irqs_list_entry );
333
+ mutex_unlock (& acpi_gpio_deferred_req_irqs_lock );
334
+
328
335
list_for_each_entry_safe_reverse (event , ep , & acpi_gpio -> events , node ) {
329
336
struct gpio_desc * desc ;
330
337
331
- acpi_gpio_del_from_initial_sync_list (event );
332
-
333
338
if (irqd_is_wakeup_set (irq_get_irq_data (event -> irq )))
334
339
disable_irq_wake (event -> irq );
335
340
@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
1052
1057
1053
1058
acpi_gpio -> chip = chip ;
1054
1059
INIT_LIST_HEAD (& acpi_gpio -> events );
1060
+ INIT_LIST_HEAD (& acpi_gpio -> deferred_req_irqs_list_entry );
1055
1061
1056
1062
status = acpi_attach_data (handle , acpi_gpio_chip_dh , acpi_gpio );
1057
1063
if (ACPI_FAILURE (status )) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1198
1204
return con_id == NULL ;
1199
1205
}
1200
1206
1201
- /* Sync the initial state of handlers after all builtin drivers have probed */
1202
- static int acpi_gpio_initial_sync (void )
1207
+ /* Run deferred acpi_gpiochip_request_interrupts() */
1208
+ static int acpi_gpio_handle_deferred_request_interrupts (void )
1203
1209
{
1204
- struct acpi_gpio_event * event , * ep ;
1210
+ struct acpi_gpio_chip * acpi_gpio , * tmp ;
1211
+
1212
+ mutex_lock (& acpi_gpio_deferred_req_irqs_lock );
1213
+ list_for_each_entry_safe (acpi_gpio , tmp ,
1214
+ & acpi_gpio_deferred_req_irqs_list ,
1215
+ deferred_req_irqs_list_entry ) {
1216
+ acpi_handle handle ;
1205
1217
1206
- mutex_lock ( & acpi_gpio_initial_sync_list_lock );
1207
- list_for_each_entry_safe ( event , ep , & acpi_gpio_initial_sync_list ,
1208
- initial_sync_list ) {
1209
- acpi_evaluate_object ( event -> handle , NULL , NULL , NULL );
1210
- list_del_init (& event -> initial_sync_list );
1218
+ handle = ACPI_HANDLE ( acpi_gpio -> chip -> parent );
1219
+ acpi_walk_resources ( handle , "_AEI" ,
1220
+ acpi_gpiochip_request_interrupt , acpi_gpio );
1221
+
1222
+ list_del_init (& acpi_gpio -> deferred_req_irqs_list_entry );
1211
1223
}
1212
- mutex_unlock (& acpi_gpio_initial_sync_list_lock );
1224
+
1225
+ acpi_gpio_deferred_req_irqs_done = true;
1226
+ mutex_unlock (& acpi_gpio_deferred_req_irqs_lock );
1213
1227
1214
1228
return 0 ;
1215
1229
}
1216
1230
/* We must use _sync so that this runs after the first deferred_probe run */
1217
- late_initcall_sync (acpi_gpio_initial_sync );
1231
+ late_initcall_sync (acpi_gpio_handle_deferred_request_interrupts );
0 commit comments