@@ -1616,9 +1616,10 @@ static int migrate_hugetlbs(struct list_head *from, new_page_t get_new_page,
1616
1616
static int migrate_pages_batch (struct list_head * from , new_page_t get_new_page ,
1617
1617
free_page_t put_new_page , unsigned long private ,
1618
1618
enum migrate_mode mode , int reason , struct list_head * ret_folios ,
1619
- struct migrate_pages_stats * stats )
1619
+ struct list_head * split_folios , struct migrate_pages_stats * stats ,
1620
+ int nr_pass )
1620
1621
{
1621
- int retry ;
1622
+ int retry = 1 ;
1622
1623
int large_retry = 1 ;
1623
1624
int thp_retry = 1 ;
1624
1625
int nr_failed = 0 ;
@@ -1628,21 +1629,15 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1628
1629
bool is_large = false;
1629
1630
bool is_thp = false;
1630
1631
struct folio * folio , * folio2 , * dst = NULL , * dst2 ;
1631
- int rc , rc_saved , nr_pages ;
1632
- LIST_HEAD (split_folios );
1632
+ int rc , rc_saved = 0 , nr_pages ;
1633
1633
LIST_HEAD (unmap_folios );
1634
1634
LIST_HEAD (dst_folios );
1635
1635
bool nosplit = (reason == MR_NUMA_MISPLACED );
1636
- bool no_split_folio_counting = false;
1637
1636
1638
1637
VM_WARN_ON_ONCE (mode != MIGRATE_ASYNC &&
1639
1638
!list_empty (from ) && !list_is_singular (from ));
1640
- retry :
1641
- rc_saved = 0 ;
1642
- retry = 1 ;
1643
- for (pass = 0 ;
1644
- pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry );
1645
- pass ++ ) {
1639
+
1640
+ for (pass = 0 ; pass < nr_pass && (retry || large_retry ); pass ++ ) {
1646
1641
retry = 0 ;
1647
1642
large_retry = 0 ;
1648
1643
thp_retry = 0 ;
@@ -1673,7 +1668,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1673
1668
if (!thp_migration_supported () && is_thp ) {
1674
1669
nr_large_failed ++ ;
1675
1670
stats -> nr_thp_failed ++ ;
1676
- if (!try_split_folio (folio , & split_folios )) {
1671
+ if (!try_split_folio (folio , split_folios )) {
1677
1672
stats -> nr_thp_split ++ ;
1678
1673
continue ;
1679
1674
}
@@ -1705,7 +1700,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1705
1700
stats -> nr_thp_failed += is_thp ;
1706
1701
/* Large folio NUMA faulting doesn't split to retry. */
1707
1702
if (!nosplit ) {
1708
- int ret = try_split_folio (folio , & split_folios );
1703
+ int ret = try_split_folio (folio , split_folios );
1709
1704
1710
1705
if (!ret ) {
1711
1706
stats -> nr_thp_split += is_thp ;
@@ -1722,18 +1717,11 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1722
1717
break ;
1723
1718
}
1724
1719
}
1725
- } else if (! no_split_folio_counting ) {
1720
+ } else {
1726
1721
nr_failed ++ ;
1727
1722
}
1728
1723
1729
1724
stats -> nr_failed_pages += nr_pages + nr_retry_pages ;
1730
- /*
1731
- * There might be some split folios of fail-to-migrate large
1732
- * folios left in split_folios list. Move them to ret_folios
1733
- * list so that they could be put back to the right list by
1734
- * the caller otherwise the folio refcnt will be leaked.
1735
- */
1736
- list_splice_init (& split_folios , ret_folios );
1737
1725
/* nr_failed isn't updated for not used */
1738
1726
nr_large_failed += large_retry ;
1739
1727
stats -> nr_thp_failed += thp_retry ;
@@ -1746,7 +1734,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1746
1734
if (is_large ) {
1747
1735
large_retry ++ ;
1748
1736
thp_retry += is_thp ;
1749
- } else if (! no_split_folio_counting ) {
1737
+ } else {
1750
1738
retry ++ ;
1751
1739
}
1752
1740
nr_retry_pages += nr_pages ;
@@ -1769,7 +1757,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1769
1757
if (is_large ) {
1770
1758
nr_large_failed ++ ;
1771
1759
stats -> nr_thp_failed += is_thp ;
1772
- } else if (! no_split_folio_counting ) {
1760
+ } else {
1773
1761
nr_failed ++ ;
1774
1762
}
1775
1763
@@ -1787,9 +1775,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1787
1775
try_to_unmap_flush ();
1788
1776
1789
1777
retry = 1 ;
1790
- for (pass = 0 ;
1791
- pass < NR_MAX_MIGRATE_PAGES_RETRY && (retry || large_retry );
1792
- pass ++ ) {
1778
+ for (pass = 0 ; pass < nr_pass && (retry || large_retry ); pass ++ ) {
1793
1779
retry = 0 ;
1794
1780
large_retry = 0 ;
1795
1781
thp_retry = 0 ;
@@ -1818,7 +1804,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1818
1804
if (is_large ) {
1819
1805
large_retry ++ ;
1820
1806
thp_retry += is_thp ;
1821
- } else if (! no_split_folio_counting ) {
1807
+ } else {
1822
1808
retry ++ ;
1823
1809
}
1824
1810
nr_retry_pages += nr_pages ;
@@ -1831,7 +1817,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1831
1817
if (is_large ) {
1832
1818
nr_large_failed ++ ;
1833
1819
stats -> nr_thp_failed += is_thp ;
1834
- } else if (! no_split_folio_counting ) {
1820
+ } else {
1835
1821
nr_failed ++ ;
1836
1822
}
1837
1823
@@ -1868,27 +1854,6 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1868
1854
dst2 = list_next_entry (dst , lru );
1869
1855
}
1870
1856
1871
- /*
1872
- * Try to migrate split folios of fail-to-migrate large folios, no
1873
- * nr_failed counting in this round, since all split folios of a
1874
- * large folio is counted as 1 failure in the first round.
1875
- */
1876
- if (rc >= 0 && !list_empty (& split_folios )) {
1877
- /*
1878
- * Move non-migrated folios (after NR_MAX_MIGRATE_PAGES_RETRY
1879
- * retries) to ret_folios to avoid migrating them again.
1880
- */
1881
- list_splice_init (from , ret_folios );
1882
- list_splice_init (& split_folios , from );
1883
- /*
1884
- * Force async mode to avoid to wait lock or bit when we have
1885
- * locked more than one folios.
1886
- */
1887
- mode = MIGRATE_ASYNC ;
1888
- no_split_folio_counting = true;
1889
- goto retry ;
1890
- }
1891
-
1892
1857
return rc ;
1893
1858
}
1894
1859
@@ -1927,6 +1892,7 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1927
1892
struct folio * folio , * folio2 ;
1928
1893
LIST_HEAD (folios );
1929
1894
LIST_HEAD (ret_folios );
1895
+ LIST_HEAD (split_folios );
1930
1896
struct migrate_pages_stats stats ;
1931
1897
1932
1898
trace_mm_migrate_pages_start (mode , reason );
@@ -1960,12 +1926,24 @@ int migrate_pages(struct list_head *from, new_page_t get_new_page,
1960
1926
else
1961
1927
list_splice_init (from , & folios );
1962
1928
rc = migrate_pages_batch (& folios , get_new_page , put_new_page , private ,
1963
- mode , reason , & ret_folios , & stats );
1929
+ mode , reason , & ret_folios , & split_folios , & stats ,
1930
+ NR_MAX_MIGRATE_PAGES_RETRY );
1964
1931
list_splice_tail_init (& folios , & ret_folios );
1965
1932
if (rc < 0 ) {
1966
1933
rc_gather = rc ;
1934
+ list_splice_tail (& split_folios , & ret_folios );
1967
1935
goto out ;
1968
1936
}
1937
+ if (!list_empty (& split_folios )) {
1938
+ /*
1939
+ * Failure isn't counted since all split folios of a large folio
1940
+ * is counted as 1 failure already. And, we only try to migrate
1941
+ * with minimal effort, force MIGRATE_ASYNC mode and retry once.
1942
+ */
1943
+ migrate_pages_batch (& split_folios , get_new_page , put_new_page , private ,
1944
+ MIGRATE_ASYNC , reason , & ret_folios , NULL , & stats , 1 );
1945
+ list_splice_tail_init (& split_folios , & ret_folios );
1946
+ }
1969
1947
rc_gather += rc ;
1970
1948
if (!list_empty (from ))
1971
1949
goto again ;
0 commit comments