@@ -377,7 +377,7 @@ kill_pending_current_main_and_watchdogs(PyThreadState *ts)
377
377
}
378
378
379
379
static void
380
- run_other_threads (PyObject * * sleep , int count )
380
+ run_other_threads (PyObject * * sleep , Py_ssize_t count )
381
381
{
382
382
if (count == 0 ) {
383
383
/* shortcut */
@@ -545,10 +545,10 @@ void slp_kill_tasks_with_stacks(PyThreadState *target_ts)
545
545
* states. That will hopefully happen when their threads exit.
546
546
*/
547
547
{
548
- PyCStackObject * csfirst , * cs ;
548
+ PyCStackObject * cs ;
549
549
PyTaskletObject * t ;
550
550
PyObject * sleepfunc = NULL ;
551
- int count ;
551
+ Py_ssize_t count ;
552
552
553
553
/* other threads, first pass: kill (pending) current, main and watchdog tasklets */
554
554
if (target_ts == NULL ) {
@@ -580,50 +580,120 @@ void slp_kill_tasks_with_stacks(PyThreadState *target_ts)
580
580
581
581
/* other threads, second pass: kill tasklets with nesting-level > 0 and
582
582
* clear tstate if target_ts != NULL && target_ts != cts. */
583
- csfirst = slp_cstack_chain ;
584
- if (csfirst == NULL ) {
583
+ if (slp_cstack_chain == NULL ) {
585
584
Py_XDECREF (sleepfunc );
586
585
goto current_main ;
587
586
}
588
587
589
588
count = 0 ;
590
589
in_loop = 0 ;
591
- for (cs = csfirst ; !(in_loop && cs == csfirst ); cs = cs -> next ) {
590
+ /* build a tuple of all tasklets to be killed:
591
+ * 1. count the tasklets
592
+ * 2. alloc a tuple and record them
593
+ * 3. kill them
594
+ * Steps 1 and 2 must not run Python code (release the GIL), because another thread could
595
+ * modify slp_cstack_chain.
596
+ */
597
+ for (cs = slp_cstack_chain ; cs != slp_cstack_chain || in_loop == 0 ; cs = cs -> next ) {
598
+ /* Count tasklets to be killed.
599
+ * This loop body must not release the GIL
600
+ */
601
+ assert (cs );
602
+ assert (cs -> next );
603
+ assert (cs -> next -> prev == cs );
592
604
in_loop = 1 ;
593
605
t = cs -> task ;
594
606
if (t == NULL )
595
607
continue ;
596
- Py_INCREF (t ); /* cs->task is a borrowed ref */
597
608
if (t -> cstate != cs ) {
598
- Py_DECREF (t );
599
609
continue ; /* not the current cstate of the tasklet */
600
610
}
601
611
if (cs -> tstate == NULL || cs -> tstate == cts ) {
602
- Py_DECREF (t );
603
612
continue ; /* already handled */
604
613
}
605
614
if (target_ts != NULL && cs -> tstate != target_ts ) {
606
- Py_DECREF (t );
607
615
continue ; /* we are not interested in this thread */
608
616
}
609
- if (((cs -> tstate && cs -> tstate -> st .current == t ) ? cs -> tstate -> st .nesting_level : cs -> nesting_level ) > 0 ) {
617
+ if (((cs -> tstate && cs -> tstate -> st .current == t ) ?
618
+ cs -> tstate -> st .nesting_level : cs -> nesting_level ) > 0 ) {
610
619
/* Kill only tasklets with nesting level > 0 */
611
620
count ++ ;
612
- PyTasklet_Kill (t );
613
- PyErr_Clear ();
614
- }
615
- Py_DECREF (t );
616
- if (target_ts != NULL ) {
617
- cs -> tstate = NULL ;
618
621
}
619
622
}
620
- if (target_ts == NULL ) {
621
- /* We must not release the GIL while we might hold the HEAD-lock.
622
- * Otherwise another thread (usually the thread of the killed tasklet)
623
- * could try to get the HEAD lock. The result would be a wonderful dead lock.
624
- * If target_ts is NULL, we know for sure, that we don't hold the HEAD-lock.
625
- */
626
- run_other_threads (& sleepfunc , count );
623
+ assert (cs == slp_cstack_chain );
624
+ if (count > 0 ) {
625
+ PyObject * tasklets = PyTuple_New (count );
626
+ if (NULL == tasklets ) {
627
+ PyErr_Print ();
628
+ return ;
629
+ }
630
+ assert (cs == slp_cstack_chain );
631
+ for (in_loop = 0 , count = 0 ; cs != slp_cstack_chain || in_loop == 0 ; cs = cs -> next ) {
632
+ /* Record tasklets to be killed.
633
+ * This loop body must not release the GIL.
634
+ */
635
+ assert (cs );
636
+ assert (cs -> next );
637
+ assert (cs -> next -> prev == cs );
638
+ in_loop = 1 ;
639
+ t = cs -> task ;
640
+ if (t == NULL )
641
+ continue ;
642
+ if (t -> cstate != cs ) {
643
+ continue ; /* not the current cstate of the tasklet */
644
+ }
645
+ if (cs -> tstate == NULL || cs -> tstate == cts ) {
646
+ continue ; /* already handled */
647
+ }
648
+ if (target_ts != NULL && cs -> tstate != target_ts ) {
649
+ continue ; /* we are not interested in this thread */
650
+ }
651
+ if (((cs -> tstate && cs -> tstate -> st .current == t ) ?
652
+ cs -> tstate -> st .nesting_level : cs -> nesting_level ) > 0 ) {
653
+ /* Kill only tasklets with nesting level > 0 */
654
+ Py_INCREF (t );
655
+ assert (count < PyTuple_GET_SIZE (tasklets ));
656
+ PyTuple_SET_ITEM (tasklets , count , (PyObject * )t ); /* steals a reference to t */
657
+ count ++ ;
658
+ }
659
+ }
660
+ assert (count == PyTuple_GET_SIZE (tasklets ));
661
+ for (count = 0 ; count < PyTuple_GET_SIZE (tasklets ); count ++ ) {
662
+ /* Kill the tasklets.
663
+ */
664
+ t = (PyTaskletObject * )PyTuple_GET_ITEM (tasklets , count );
665
+ cs = t -> cstate ;
666
+ assert (cs );
667
+ if (cs -> tstate == NULL || cs -> tstate == cts ) {
668
+ continue ; /* already handled */
669
+ }
670
+ if (target_ts != NULL && cs -> tstate != target_ts ) {
671
+ continue ; /* we are not interested in this thread */
672
+ }
673
+ Py_INCREF (cs );
674
+ if (((cs -> tstate && cs -> tstate -> st .current == t ) ?
675
+ cs -> tstate -> st .nesting_level : cs -> nesting_level ) > 0 ) {
676
+ /* Kill only tasklets with nesting level > 0
677
+ * We must check again, because killing one tasklet
678
+ * can change the state of other tasklets too.
679
+ */
680
+ PyTasklet_Kill (t );
681
+ PyErr_Clear ();
682
+ }
683
+ if (target_ts != NULL ) {
684
+ cs -> tstate = NULL ;
685
+ }
686
+ Py_DECREF (cs );
687
+ }
688
+ Py_DECREF (tasklets );
689
+ if (target_ts == NULL ) {
690
+ /* We must not release the GIL while we might hold the HEAD-lock.
691
+ * Otherwise another thread (usually the thread of the killed tasklet)
692
+ * could try to get the HEAD lock. The result would be a wonderful dead lock.
693
+ * If target_ts is NULL, we know for sure, that we don't hold the HEAD-lock.
694
+ */
695
+ run_other_threads (& sleepfunc , count );
696
+ }
627
697
}
628
698
Py_XDECREF (sleepfunc );
629
699
}
@@ -636,17 +706,16 @@ void slp_kill_tasks_with_stacks(PyThreadState *target_ts)
636
706
* should be left.
637
707
*/
638
708
if (target_ts == NULL || target_ts == cts ) {
639
- /* a loop to kill tasklets on the local thread */
640
- PyCStackObject * csfirst = slp_cstack_chain , * cs ;
709
+ PyCStackObject * cs ;
641
710
642
- if (csfirst == NULL )
711
+ if (slp_cstack_chain == NULL )
643
712
return ;
644
713
in_loop = 0 ;
645
- for (cs = csfirst ; ; cs = cs -> next ) {
646
- if ( in_loop && cs == csfirst ) {
647
- /* nothing found */
648
- break ;
649
- }
714
+ for (cs = slp_cstack_chain ; cs != slp_cstack_chain || in_loop == 0 ; cs = cs -> next ) {
715
+ /* This loop body must not release the GIL. */
716
+ assert ( cs );
717
+ assert ( cs -> next ) ;
718
+ assert ( cs -> next -> prev == cs );
650
719
in_loop = 1 ;
651
720
/* has tstate already been cleared or is it a foreign thread? */
652
721
if (target_ts == NULL || cs -> tstate == cts ) {
0 commit comments