Skip to content

Commit 472d900

Browse files
Mstrodlgregkh
authored andcommitted
gpio: mpsse: ensure worker is torn down
[ Upstream commit 179ef11 ] When an IRQ worker is running, unplugging the device would cause a crash. The sealevel hardware this driver was written for was not hotpluggable, so I never realized it. This change uses a spinlock to protect a list of workers, which it tears down on disconnect. Signed-off-by: Mary Strodl <mstrodl@csh.rit.edu> Reviewed-by: Linus Walleij <linus.walleij@linaro.org> Link: https://lore.kernel.org/r/20251014133530.3592716-3-mstrodl@csh.rit.edu Signed-off-by: Bartosz Golaszewski <bartosz.golaszewski@linaro.org> Stable-dep-of: 1e876e5 ("gpio: mpsse: fix reference leak in gpio_mpsse_probe() error paths") Signed-off-by: Sasha Levin <sashal@kernel.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
1 parent 4374a1c commit 472d900

1 file changed

Lines changed: 99 additions & 7 deletions

File tree

drivers/gpio/gpio-mpsse.c

Lines changed: 99 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -10,15 +10,18 @@
1010
#include <linux/cleanup.h>
1111
#include <linux/gpio/driver.h>
1212
#include <linux/mutex.h>
13+
#include <linux/spinlock.h>
1314
#include <linux/usb.h>
1415

1516
struct mpsse_priv {
1617
struct gpio_chip gpio;
1718
struct usb_device *udev; /* USB device encompassing all MPSSEs */
1819
struct usb_interface *intf; /* USB interface for this MPSSE */
1920
u8 intf_id; /* USB interface number for this MPSSE */
20-
struct work_struct irq_work; /* polling work thread */
21+
struct list_head workers; /* polling work threads */
2122
struct mutex irq_mutex; /* lock over irq_data */
23+
struct mutex irq_race; /* race for polling worker teardown */
24+
raw_spinlock_t irq_spin; /* protects worker list */
2225
atomic_t irq_type[16]; /* pin -> edge detection type */
2326
atomic_t irq_enabled;
2427
int id;
@@ -34,6 +37,14 @@ struct mpsse_priv {
3437
struct mutex io_mutex; /* sync I/O with disconnect */
3538
};
3639

40+
struct mpsse_worker {
41+
struct mpsse_priv *priv;
42+
struct work_struct work;
43+
atomic_t cancelled;
44+
struct list_head list; /* linked list */
45+
struct list_head destroy; /* teardown linked list */
46+
};
47+
3748
struct bulk_desc {
3849
bool tx; /* direction of bulk transfer */
3950
u8 *data; /* input (tx) or output (rx) */
@@ -284,18 +295,62 @@ static int gpio_mpsse_get_direction(struct gpio_chip *chip,
284295
return ret;
285296
}
286297

287-
static void gpio_mpsse_poll(struct work_struct *work)
298+
/*
299+
* Stops all workers except `my_worker`.
300+
* Safe to call only when `irq_race` is held.
301+
*/
302+
static void gpio_mpsse_stop_all_except(struct mpsse_priv *priv,
303+
struct mpsse_worker *my_worker)
304+
{
305+
struct mpsse_worker *worker, *worker_tmp;
306+
struct list_head destructors = LIST_HEAD_INIT(destructors);
307+
308+
scoped_guard(raw_spinlock_irqsave, &priv->irq_spin) {
309+
list_for_each_entry_safe(worker, worker_tmp,
310+
&priv->workers, list) {
311+
/* Don't stop ourselves */
312+
if (worker == my_worker)
313+
continue;
314+
315+
list_del(&worker->list);
316+
317+
/* Give worker a chance to terminate itself */
318+
atomic_set(&worker->cancelled, 1);
319+
/* Keep track of stuff to cancel */
320+
INIT_LIST_HEAD(&worker->destroy);
321+
list_add(&worker->destroy, &destructors);
322+
}
323+
}
324+
325+
list_for_each_entry_safe(worker, worker_tmp,
326+
&destructors, destroy) {
327+
list_del(&worker->destroy);
328+
cancel_work_sync(&worker->work);
329+
kfree(worker);
330+
}
331+
}
332+
333+
static void gpio_mpsse_poll(struct work_struct *my_work)
288334
{
289335
unsigned long pin_mask, pin_states, flags;
290336
int irq_enabled, offset, err, value, fire_irq,
291337
irq, old_value[16], irq_type[16];
292-
struct mpsse_priv *priv = container_of(work, struct mpsse_priv,
293-
irq_work);
338+
struct mpsse_worker *my_worker = container_of(my_work, struct mpsse_worker, work);
339+
struct mpsse_priv *priv = my_worker->priv;
294340

295341
for (offset = 0; offset < priv->gpio.ngpio; ++offset)
296342
old_value[offset] = -1;
297343

298-
while ((irq_enabled = atomic_read(&priv->irq_enabled))) {
344+
/*
345+
* We only want one worker. Workers race to acquire irq_race and tear
346+
* down all other workers. This is a cond guard so that we don't deadlock
347+
* trying to cancel a worker.
348+
*/
349+
scoped_cond_guard(mutex_try, return, &priv->irq_race)
350+
gpio_mpsse_stop_all_except(priv, my_worker);
351+
352+
while ((irq_enabled = atomic_read(&priv->irq_enabled)) &&
353+
!atomic_read(&my_worker->cancelled)) {
299354
usleep_range(MPSSE_POLL_INTERVAL, MPSSE_POLL_INTERVAL + 1000);
300355
/* Cleanup will trigger at the end of the loop */
301356
guard(mutex)(&priv->irq_mutex);
@@ -370,21 +425,45 @@ static int gpio_mpsse_set_irq_type(struct irq_data *irqd, unsigned int type)
370425

371426
static void gpio_mpsse_irq_disable(struct irq_data *irqd)
372427
{
428+
struct mpsse_worker *worker;
373429
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
374430

375431
atomic_and(~BIT(irqd->hwirq), &priv->irq_enabled);
376432
gpiochip_disable_irq(&priv->gpio, irqd->hwirq);
433+
434+
/*
435+
* Can't actually do teardown in IRQ context (it blocks).
436+
* As a result, these workers will stick around until irq is reenabled
437+
* or device gets disconnected
438+
*/
439+
scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
440+
list_for_each_entry(worker, &priv->workers, list)
441+
atomic_set(&worker->cancelled, 1);
377442
}
378443

379444
static void gpio_mpsse_irq_enable(struct irq_data *irqd)
380445
{
446+
struct mpsse_worker *worker;
381447
struct mpsse_priv *priv = irq_data_get_irq_chip_data(irqd);
382448

383449
gpiochip_enable_irq(&priv->gpio, irqd->hwirq);
384450
/* If no-one else was using the IRQ, enable it */
385451
if (!atomic_fetch_or(BIT(irqd->hwirq), &priv->irq_enabled)) {
386-
INIT_WORK(&priv->irq_work, gpio_mpsse_poll);
387-
schedule_work(&priv->irq_work);
452+
/*
453+
* Can't be devm because it uses a non-raw spinlock (illegal in
454+
* this context, where a raw spinlock is held by our caller)
455+
*/
456+
worker = kzalloc(sizeof(*worker), GFP_NOWAIT);
457+
if (!worker)
458+
return;
459+
460+
worker->priv = priv;
461+
INIT_LIST_HEAD(&worker->list);
462+
INIT_WORK(&worker->work, gpio_mpsse_poll);
463+
schedule_work(&worker->work);
464+
465+
scoped_guard(raw_spinlock_irqsave, &priv->irq_spin)
466+
list_add(&worker->list, &priv->workers);
388467
}
389468
}
390469

@@ -436,6 +515,12 @@ static int gpio_mpsse_probe(struct usb_interface *interface,
436515
if (err)
437516
return err;
438517

518+
err = devm_mutex_init(dev, &priv->irq_race);
519+
if (err)
520+
return err;
521+
522+
raw_spin_lock_init(&priv->irq_spin);
523+
439524
priv->gpio.label = devm_kasprintf(dev, GFP_KERNEL,
440525
"gpio-mpsse.%d.%d",
441526
priv->id, priv->intf_id);
@@ -506,6 +591,13 @@ static void gpio_mpsse_disconnect(struct usb_interface *intf)
506591
{
507592
struct mpsse_priv *priv = usb_get_intfdata(intf);
508593

594+
/*
595+
* Lock prevents double-free of worker from here and the teardown
596+
* step at the beginning of gpio_mpsse_poll
597+
*/
598+
scoped_guard(mutex, &priv->irq_race)
599+
gpio_mpsse_stop_all_except(priv, NULL);
600+
509601
priv->intf = NULL;
510602
usb_set_intfdata(intf, NULL);
511603
usb_put_dev(priv->udev);

0 commit comments

Comments
 (0)