Skip to content

Commit 78d3a92

Browse files
jwrdegoedelinusw
authored andcommitted
gpiolib-acpi: Register GpioInt ACPI event handlers from a late_initcall
GpioInt ACPI event handlers may see there IRQ triggered immediately after requesting the IRQ (esp. level triggered ones). This means that they may run before any other (builtin) drivers have had a chance to register their OpRegion handlers, leading to errors like this: [ 1.133274] ACPI Error: No handler for Region [PMOP] ((____ptrval____)) [UserDefinedRegion] (20180531/evregion-132) [ 1.133286] ACPI Error: Region UserDefinedRegion (ID=141) has no handler (20180531/exfldio-265) [ 1.133297] ACPI Error: Method parse/execution failed \_SB.GPO2._L01, AE_NOT_EXIST (20180531/psparse-516) We already defer the manual initial trigger of edge triggered interrupts by running it from a late_initcall handler, this commit replaces this with deferring the entire acpi_gpiochip_request_interrupts() call till then, fixing the problem of some OpRegions not being registered yet. Note that this removes the need to have a list of edge triggered handlers which need to run, since the entire acpi_gpiochip_request_interrupts() call is now delayed, acpi_gpiochip_request_interrupt() can call these directly now. Acked-by: Mika Westerberg <mika.westerberg@linux.intel.com> Signed-off-by: Hans de Goede <hdegoede@redhat.com> Signed-off-by: Linus Walleij <linus.walleij@linaro.org>
1 parent 993b9bc commit 78d3a92

File tree

1 file changed

+49
-35
lines changed

1 file changed

+49
-35
lines changed

drivers/gpio/gpiolib-acpi.c

Lines changed: 49 additions & 35 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,6 @@
2525

2626
struct acpi_gpio_event {
2727
struct list_head node;
28-
struct list_head initial_sync_list;
2928
acpi_handle handle;
3029
unsigned int pin;
3130
unsigned int irq;
@@ -49,10 +48,19 @@ struct acpi_gpio_chip {
4948
struct mutex conn_lock;
5049
struct gpio_chip *chip;
5150
struct list_head events;
51+
struct list_head deferred_req_irqs_list_entry;
5252
};
5353

54-
static LIST_HEAD(acpi_gpio_initial_sync_list);
55-
static DEFINE_MUTEX(acpi_gpio_initial_sync_list_lock);
54+
/*
55+
* For gpiochips which call acpi_gpiochip_request_interrupts() before late_init
56+
* (so builtin drivers) we register the ACPI GpioInt event handlers from a
57+
* late_initcall_sync handler, so that other builtin drivers can register their
58+
* OpRegions before the event handlers can run. This list contains gpiochips
59+
* for which the acpi_gpiochip_request_interrupts() has been deferred.
60+
*/
61+
static DEFINE_MUTEX(acpi_gpio_deferred_req_irqs_lock);
62+
static LIST_HEAD(acpi_gpio_deferred_req_irqs_list);
63+
static bool acpi_gpio_deferred_req_irqs_done;
5664

5765
static int acpi_gpiochip_find(struct gpio_chip *gc, void *data)
5866
{
@@ -89,21 +97,6 @@ static struct gpio_desc *acpi_get_gpiod(char *path, int pin)
8997
return gpiochip_get_desc(chip, pin);
9098
}
9199

92-
static void acpi_gpio_add_to_initial_sync_list(struct acpi_gpio_event *event)
93-
{
94-
mutex_lock(&acpi_gpio_initial_sync_list_lock);
95-
list_add(&event->initial_sync_list, &acpi_gpio_initial_sync_list);
96-
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
97-
}
98-
99-
static void acpi_gpio_del_from_initial_sync_list(struct acpi_gpio_event *event)
100-
{
101-
mutex_lock(&acpi_gpio_initial_sync_list_lock);
102-
if (!list_empty(&event->initial_sync_list))
103-
list_del_init(&event->initial_sync_list);
104-
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
105-
}
106-
107100
static irqreturn_t acpi_gpio_irq_handler(int irq, void *data)
108101
{
109102
struct acpi_gpio_event *event = data;
@@ -229,7 +222,6 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
229222
event->irq = irq;
230223
event->pin = pin;
231224
event->desc = desc;
232-
INIT_LIST_HEAD(&event->initial_sync_list);
233225

234226
ret = request_threaded_irq(event->irq, NULL, handler, irqflags,
235227
"ACPI:Event", event);
@@ -251,10 +243,9 @@ static acpi_status acpi_gpiochip_request_interrupt(struct acpi_resource *ares,
251243
* may refer to OperationRegions from other (builtin) drivers which
252244
* may be probed after us.
253245
*/
254-
if (handler == acpi_gpio_irq_handler &&
255-
(((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
256-
((irqflags & IRQF_TRIGGER_FALLING) && value == 0)))
257-
acpi_gpio_add_to_initial_sync_list(event);
246+
if (((irqflags & IRQF_TRIGGER_RISING) && value == 1) ||
247+
((irqflags & IRQF_TRIGGER_FALLING) && value == 0))
248+
handler(event->irq, event);
258249

259250
return AE_OK;
260251

@@ -283,6 +274,7 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
283274
struct acpi_gpio_chip *acpi_gpio;
284275
acpi_handle handle;
285276
acpi_status status;
277+
bool defer;
286278

287279
if (!chip->parent || !chip->to_irq)
288280
return;
@@ -295,6 +287,16 @@ void acpi_gpiochip_request_interrupts(struct gpio_chip *chip)
295287
if (ACPI_FAILURE(status))
296288
return;
297289

290+
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
291+
defer = !acpi_gpio_deferred_req_irqs_done;
292+
if (defer)
293+
list_add(&acpi_gpio->deferred_req_irqs_list_entry,
294+
&acpi_gpio_deferred_req_irqs_list);
295+
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
296+
297+
if (defer)
298+
return;
299+
298300
acpi_walk_resources(handle, "_AEI",
299301
acpi_gpiochip_request_interrupt, acpi_gpio);
300302
}
@@ -325,11 +327,14 @@ void acpi_gpiochip_free_interrupts(struct gpio_chip *chip)
325327
if (ACPI_FAILURE(status))
326328
return;
327329

330+
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
331+
if (!list_empty(&acpi_gpio->deferred_req_irqs_list_entry))
332+
list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
333+
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
334+
328335
list_for_each_entry_safe_reverse(event, ep, &acpi_gpio->events, node) {
329336
struct gpio_desc *desc;
330337

331-
acpi_gpio_del_from_initial_sync_list(event);
332-
333338
if (irqd_is_wakeup_set(irq_get_irq_data(event->irq)))
334339
disable_irq_wake(event->irq);
335340

@@ -1052,6 +1057,7 @@ void acpi_gpiochip_add(struct gpio_chip *chip)
10521057

10531058
acpi_gpio->chip = chip;
10541059
INIT_LIST_HEAD(&acpi_gpio->events);
1060+
INIT_LIST_HEAD(&acpi_gpio->deferred_req_irqs_list_entry);
10551061

10561062
status = acpi_attach_data(handle, acpi_gpio_chip_dh, acpi_gpio);
10571063
if (ACPI_FAILURE(status)) {
@@ -1198,20 +1204,28 @@ bool acpi_can_fallback_to_crs(struct acpi_device *adev, const char *con_id)
11981204
return con_id == NULL;
11991205
}
12001206

1201-
/* Sync the initial state of handlers after all builtin drivers have probed */
1202-
static int acpi_gpio_initial_sync(void)
1207+
/* Run deferred acpi_gpiochip_request_interrupts() */
1208+
static int acpi_gpio_handle_deferred_request_interrupts(void)
12031209
{
1204-
struct acpi_gpio_event *event, *ep;
1210+
struct acpi_gpio_chip *acpi_gpio, *tmp;
1211+
1212+
mutex_lock(&acpi_gpio_deferred_req_irqs_lock);
1213+
list_for_each_entry_safe(acpi_gpio, tmp,
1214+
&acpi_gpio_deferred_req_irqs_list,
1215+
deferred_req_irqs_list_entry) {
1216+
acpi_handle handle;
12051217

1206-
mutex_lock(&acpi_gpio_initial_sync_list_lock);
1207-
list_for_each_entry_safe(event, ep, &acpi_gpio_initial_sync_list,
1208-
initial_sync_list) {
1209-
acpi_evaluate_object(event->handle, NULL, NULL, NULL);
1210-
list_del_init(&event->initial_sync_list);
1218+
handle = ACPI_HANDLE(acpi_gpio->chip->parent);
1219+
acpi_walk_resources(handle, "_AEI",
1220+
acpi_gpiochip_request_interrupt, acpi_gpio);
1221+
1222+
list_del_init(&acpi_gpio->deferred_req_irqs_list_entry);
12111223
}
1212-
mutex_unlock(&acpi_gpio_initial_sync_list_lock);
1224+
1225+
acpi_gpio_deferred_req_irqs_done = true;
1226+
mutex_unlock(&acpi_gpio_deferred_req_irqs_lock);
12131227

12141228
return 0;
12151229
}
12161230
/* We must use _sync so that this runs after the first deferred_probe run */
1217-
late_initcall_sync(acpi_gpio_initial_sync);
1231+
late_initcall_sync(acpi_gpio_handle_deferred_request_interrupts);

0 commit comments

Comments
 (0)