Skip to content

Commit bccdd80

Browse files
johnstultz-workIngo Molnar
authored and
Ingo Molnar
committed
locking/ww_mutex/test: Fix potential workqueue corruption
In some cases running with the test-ww_mutex code, I was seeing odd behavior where sometimes it seemed flush_workqueue was returning before all the work threads were finished. Often this would cause strange crashes as the mutexes would be freed while they were being used. Looking at the code, there is a lifetime problem as the controlling thread that spawns the work allocates the "struct stress" structures that are passed to the workqueue threads. Then when the workqueue threads are finished, they free the stress struct that was passed to them. Unfortunately the workqueue work_struct node is in the stress struct. Which means the work_struct is freed before the work thread returns and while flush_workqueue is waiting. It seems like a better idea to have the controlling thread both allocate and free the stress structures, so that we can be sure we don't corrupt the workqueue by freeing the structure prematurely. So this patch reworks the test to do so, and with this change I no longer see the early flush_workqueue returns. Signed-off-by: John Stultz <[email protected]> Signed-off-by: Ingo Molnar <[email protected]> Link: https://lore.kernel.org/r/[email protected]
1 parent 4812c54 commit bccdd80

File tree

1 file changed

+12
-8
lines changed

1 file changed

+12
-8
lines changed

kernel/locking/test-ww_mutex.c

Lines changed: 12 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -479,7 +479,6 @@ static void stress_inorder_work(struct work_struct *work)
479479
} while (!time_after(jiffies, stress->timeout));
480480

481481
kfree(order);
482-
kfree(stress);
483482
}
484483

485484
struct reorder_lock {
@@ -544,7 +543,6 @@ static void stress_reorder_work(struct work_struct *work)
544543
list_for_each_entry_safe(ll, ln, &locks, link)
545544
kfree(ll);
546545
kfree(order);
547-
kfree(stress);
548546
}
549547

550548
static void stress_one_work(struct work_struct *work)
@@ -565,8 +563,6 @@ static void stress_one_work(struct work_struct *work)
565563
break;
566564
}
567565
} while (!time_after(jiffies, stress->timeout));
568-
569-
kfree(stress);
570566
}
571567

572568
#define STRESS_INORDER BIT(0)
@@ -577,15 +573,24 @@ static void stress_one_work(struct work_struct *work)
577573
static int stress(int nlocks, int nthreads, unsigned int flags)
578574
{
579575
struct ww_mutex *locks;
580-
int n;
576+
struct stress *stress_array;
577+
int n, count;
581578

582579
locks = kmalloc_array(nlocks, sizeof(*locks), GFP_KERNEL);
583580
if (!locks)
584581
return -ENOMEM;
585582

583+
stress_array = kmalloc_array(nthreads, sizeof(*stress_array),
584+
GFP_KERNEL);
585+
if (!stress_array) {
586+
kfree(locks);
587+
return -ENOMEM;
588+
}
589+
586590
for (n = 0; n < nlocks; n++)
587591
ww_mutex_init(&locks[n], &ww_class);
588592

593+
count = 0;
589594
for (n = 0; nthreads; n++) {
590595
struct stress *stress;
591596
void (*fn)(struct work_struct *work);
@@ -609,9 +614,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
609614
if (!fn)
610615
continue;
611616

612-
stress = kmalloc(sizeof(*stress), GFP_KERNEL);
613-
if (!stress)
614-
break;
617+
stress = &stress_array[count++];
615618

616619
INIT_WORK(&stress->work, fn);
617620
stress->locks = locks;
@@ -626,6 +629,7 @@ static int stress(int nlocks, int nthreads, unsigned int flags)
626629

627630
for (n = 0; n < nlocks; n++)
628631
ww_mutex_destroy(&locks[n]);
632+
kfree(stress_array);
629633
kfree(locks);
630634

631635
return 0;

0 commit comments

Comments
 (0)