19
19
20
20
#include "gpiolib.h"
21
21
22
+ /**
23
+ * struct acpi_gpio_event - ACPI GPIO event handler data
24
+ *
25
+ * @node: list-entry of the events list of the struct acpi_gpio_chip
26
+ * @handle: handle of ACPI method to execute when the IRQ triggers
27
+ * @handler: irq_handler to pass to request_irq when requesting the IRQ
28
+ * @pin: GPIO pin number on the gpio_chip
29
+ * @irq: Linux IRQ number for the event, for request_ / free_irq
30
+ * @irqflags: flags to pass to request_irq when requesting the IRQ
31
+ * @irq_is_wake: If the ACPI flags indicate the IRQ is a wakeup source
32
+ * @is_requested: True if request_irq has been done
33
+ * @desc: gpio_desc for the GPIO pin for this event
34
+ */
22
35
struct acpi_gpio_event {
23
36
struct list_head node ;
24
37
acpi_handle handle ;
38
+ irq_handler_t handler ;
25
39
unsigned int pin ;
26
40
unsigned int irq ;
41
+ unsigned long irqflags ;
42
+ bool irq_is_wake ;
43
+ bool irq_requested ;
27
44
struct gpio_desc * desc ;
28
45
};
29
46
@@ -49,10 +66,10 @@ struct acpi_gpio_chip {
49
66
50
67
/*
51
68
* For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
52
- * (so builtin drivers) we register the ACPI GpioInt event handlers from a
69
+ * (so builtin drivers) we register the ACPI GpioInt IRQ handlers from a
53
70
* late_initcall_sync handler, so that other builtin drivers can register their
54
71
* OpRegions before the event handlers can run. This list contains gpiochips
55
- * for which the acpi_gpiochip_request_interrupts() has been deferred.
72
+ * for which the acpi_gpiochip_request_irqs() call has been deferred.
56
73
*/
57
74
static DEFINE_MUTEX (acpi_gpio_deferred_req_irqs_lock );
58
75
static LIST_HEAD (acpi_gpio_deferred_req_irqs_list );
@@ -133,8 +150,42 @@ bool acpi_gpio_get_irq_resource(struct acpi_resource *ares,
133
150
}
134
151
EXPORT_SYMBOL_GPL (acpi_gpio_get_irq_resource );
135
152
136
- static acpi_status acpi_gpiochip_request_interrupt (struct acpi_resource * ares ,
137
- void * context )
153
+ static void acpi_gpiochip_request_irq (struct acpi_gpio_chip * acpi_gpio ,
154
+ struct acpi_gpio_event * event )
155
+ {
156
+ int ret , value ;
157
+
158
+ ret = request_threaded_irq (event -> irq , NULL , event -> handler ,
159
+ event -> irqflags , "ACPI:Event" , event );
160
+ if (ret ) {
161
+ dev_err (acpi_gpio -> chip -> parent ,
162
+ "Failed to setup interrupt handler for %d\n" ,
163
+ event -> irq );
164
+ return ;
165
+ }
166
+
167
+ if (event -> irq_is_wake )
168
+ enable_irq_wake (event -> irq );
169
+
170
+ event -> irq_requested = true;
171
+
172
+ /* Make sure we trigger the initial state of edge-triggered IRQs */
173
+ value = gpiod_get_raw_value_cansleep (event -> desc );
174
+ if (((event -> irqflags & IRQF_TRIGGER_RISING ) && value == 1 ) ||
175
+ ((event -> irqflags & IRQF_TRIGGER_FALLING ) && value == 0 ))
176
+ event -> handler (event -> irq , event );
177
+ }
178
+
179
+ static void acpi_gpiochip_request_irqs (struct acpi_gpio_chip * acpi_gpio )
180
+ {
181
+ struct acpi_gpio_event * event ;
182
+
183
+ list_for_each_entry (event , & acpi_gpio -> events , node )
184
+ acpi_gpiochip_request_irq (acpi_gpio , event );
185
+ }
186
+
187
+ static acpi_status acpi_gpiochip_alloc_event (struct acpi_resource * ares ,
188
+ void * context )
138
189
{
139
190
struct acpi_gpio_chip * acpi_gpio = context ;
140
191
struct gpio_chip * chip = acpi_gpio -> chip ;
@@ -143,8 +194,7 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
143
194
struct acpi_gpio_event * event ;
144
195
irq_handler_t handler = NULL ;
145
196
struct gpio_desc * desc ;
146
- unsigned long irqflags ;
147
- int ret , pin , irq , value ;
197
+ int ret , pin , irq ;
148
198
149
199
if (!acpi_gpio_get_irq_resource (ares , & agpio ))
150
200
return AE_OK ;
@@ -175,8 +225,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
175
225
176
226
gpiod_direction_input (desc );
177
227
178
- value = gpiod_get_value_cansleep (desc );
179
-
180
228
ret = gpiochip_lock_as_irq (chip , pin );
181
229
if (ret ) {
182
230
dev_err (chip -> parent , "Failed to lock GPIO as interrupt\n" );
@@ -189,64 +237,42 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
189
237
goto fail_unlock_irq ;
190
238
}
191
239
192
- irqflags = IRQF_ONESHOT ;
240
+ event = kzalloc (sizeof (* event ), GFP_KERNEL );
241
+ if (!event )
242
+ goto fail_unlock_irq ;
243
+
244
+ event -> irqflags = IRQF_ONESHOT ;
193
245
if (agpio -> triggering == ACPI_LEVEL_SENSITIVE ) {
194
246
if (agpio -> polarity == ACPI_ACTIVE_HIGH )
195
- irqflags |= IRQF_TRIGGER_HIGH ;
247
+ event -> irqflags |= IRQF_TRIGGER_HIGH ;
196
248
else
197
- irqflags |= IRQF_TRIGGER_LOW ;
249
+ event -> irqflags |= IRQF_TRIGGER_LOW ;
198
250
} else {
199
251
switch (agpio -> polarity ) {
200
252
case ACPI_ACTIVE_HIGH :
201
- irqflags |= IRQF_TRIGGER_RISING ;
253
+ event -> irqflags |= IRQF_TRIGGER_RISING ;
202
254
break ;
203
255
case ACPI_ACTIVE_LOW :
204
- irqflags |= IRQF_TRIGGER_FALLING ;
256
+ event -> irqflags |= IRQF_TRIGGER_FALLING ;
205
257
break ;
206
258
default :
207
- irqflags |= IRQF_TRIGGER_RISING |
208
- IRQF_TRIGGER_FALLING ;
259
+ event -> irqflags |= IRQF_TRIGGER_RISING |
260
+ IRQF_TRIGGER_FALLING ;
209
261
break ;
210
262
}
211
263
}
212
264
213
- event = kzalloc (sizeof (* event ), GFP_KERNEL );
214
- if (!event )
215
- goto fail_unlock_irq ;
216
-
217
265
event -> handle = evt_handle ;
266
+ event -> handler = handler ;
218
267
event -> irq = irq ;
268
+ event -> irq_is_wake = agpio -> wake_capable == ACPI_WAKE_CAPABLE ;
219
269
event -> pin = pin ;
220
270
event -> desc = desc ;
221
271
222
- ret = request_threaded_irq (event -> irq , NULL , handler , irqflags ,
223
- "ACPI:Event" , event );
224
- if (ret ) {
225
- dev_err (chip -> parent ,
226
- "Failed to setup interrupt handler for %d\n" ,
227
- event -> irq );
228
- goto fail_free_event ;
229
- }
230
-
231
- if (agpio -> wake_capable == ACPI_WAKE_CAPABLE )
232
- enable_irq_wake (irq );
233
-
234
272
list_add_tail (& event -> node , & acpi_gpio -> events );
235
273
236
- /*
237
- * Make sure we trigger the initial state of the IRQ when using RISING
238
- * or FALLING. Note we run the handlers on late_init, the AML code
239
- * may refer to OperationRegions from other (builtin) drivers which
240
- * may be probed after us.
241
- */
242
- if (((irqflags & IRQF_TRIGGER_RISING ) && value == 1 ) ||
243
- ((irqflags & IRQF_TRIGGER_FALLING ) && value == 0 ))
244
- handler (event -> irq , event );
245
-
246
274
return AE_OK ;
247
275
248
- fail_free_event :
249
- kfree (event );
250
276
fail_unlock_irq :
251
277
gpiochip_unlock_as_irq (chip , pin );
252
278
fail_free_desc :
@@ -283,6 +309,9 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
283
309
if (ACPI_FAILURE (status ))
284
310
return ;
285
311
312
+ acpi_walk_resources (handle , "_AEI" ,
313
+ acpi_gpiochip_alloc_event , acpi_gpio );
314
+
286
315
mutex_lock (& acpi_gpio_deferred_req_irqs_lock );
287
316
defer = !acpi_gpio_deferred_req_irqs_done ;
288
317
if (defer )
@@ -293,8 +322,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
293
322
if (defer )
294
323
return ;
295
324
296
- acpi_walk_resources (handle , "_AEI" ,
297
- acpi_gpiochip_request_interrupt , acpi_gpio );
325
+ acpi_gpiochip_request_irqs (acpi_gpio );
298
326
}
299
327
EXPORT_SYMBOL_GPL (acpi_gpiochip_request_interrupts );
300
328
@@ -331,10 +359,13 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
331
359
list_for_each_entry_safe_reverse (event , ep , & acpi_gpio -> events , node ) {
332
360
struct gpio_desc * desc ;
333
361
334
- if (irqd_is_wakeup_set (irq_get_irq_data (event -> irq )))
335
- disable_irq_wake (event -> irq );
362
+ if (event -> irq_requested ) {
363
+ if (event -> irq_is_wake )
364
+ disable_irq_wake (event -> irq );
365
+
366
+ free_irq (event -> irq , event );
367
+ }
336
368
337
- free_irq (event -> irq , event );
338
369
desc = event -> desc ;
339
370
if (WARN_ON (IS_ERR (desc )))
340
371
continue ;
@@ -1200,28 +1231,21 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
1200
1231
return con_id == NULL ;
1201
1232
}
1202
1233
1203
- /* Run deferred acpi_gpiochip_request_interrupts () */
1204
- static int acpi_gpio_handle_deferred_request_interrupts (void )
1234
+ /* Run deferred acpi_gpiochip_request_irqs () */
1235
+ static int acpi_gpio_handle_deferred_request_irqs (void )
1205
1236
{
1206
1237
struct acpi_gpio_chip * acpi_gpio , * tmp ;
1207
1238
1208
1239
mutex_lock (& acpi_gpio_deferred_req_irqs_lock );
1209
1240
list_for_each_entry_safe (acpi_gpio , tmp ,
1210
1241
& acpi_gpio_deferred_req_irqs_list ,
1211
- deferred_req_irqs_list_entry ) {
1212
- acpi_handle handle ;
1213
-
1214
- handle = ACPI_HANDLE (acpi_gpio -> chip -> parent );
1215
- acpi_walk_resources (handle , "_AEI" ,
1216
- acpi_gpiochip_request_interrupt , acpi_gpio );
1217
-
1218
- list_del_init (& acpi_gpio -> deferred_req_irqs_list_entry );
1219
- }
1242
+ deferred_req_irqs_list_entry )
1243
+ acpi_gpiochip_request_irqs (acpi_gpio );
1220
1244
1221
1245
acpi_gpio_deferred_req_irqs_done = true;
1222
1246
mutex_unlock (& acpi_gpio_deferred_req_irqs_lock );
1223
1247
1224
1248
return 0 ;
1225
1249
}
1226
1250
/* We must use _sync so that this runs after the first deferred_probe run */
1227
- late_initcall_sync (acpi_gpio_handle_deferred_request_interrupts );
1251
+ late_initcall_sync (acpi_gpio_handle_deferred_request_irqs );
0 commit comments