@@ -1899,8 +1899,23 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1899
1899
/* Unexpected PMD-mapped THP? */
1900
1900
VM_BUG_ON_FOLIO (!pvmw .pte , folio );
1901
1901
1902
- subpage = folio_page (folio ,
1903
- pte_pfn (* pvmw .pte ) - folio_pfn (folio ));
1902
+ if (folio_is_zone_device (folio )) {
1903
+ /*
1904
+ * Our PTE is a non-present device exclusive entry and
1905
+ * calculating the subpage as for the common case would
1906
+ * result in an invalid pointer.
1907
+ *
1908
+ * Since only PAGE_SIZE pages can currently be
1909
+ * migrated, just set it to page. This will need to be
1910
+ * changed when hugepage migrations to device private
1911
+ * memory are supported.
1912
+ */
1913
+ VM_BUG_ON_FOLIO (folio_nr_pages (folio ) > 1 , folio );
1914
+ subpage = & folio -> page ;
1915
+ } else {
1916
+ subpage = folio_page (folio ,
1917
+ pte_pfn (* pvmw .pte ) - folio_pfn (folio ));
1918
+ }
1904
1919
address = pvmw .address ;
1905
1920
anon_exclusive = folio_test_anon (folio ) &&
1906
1921
PageAnonExclusive (subpage );
@@ -1993,15 +2008,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
1993
2008
/*
1994
2009
* No need to invalidate here it will synchronize on
1995
2010
* against the special swap migration pte.
1996
- *
1997
- * The assignment to subpage above was computed from a
1998
- * swap PTE which results in an invalid pointer.
1999
- * Since only PAGE_SIZE pages can currently be
2000
- * migrated, just set it to page. This will need to be
2001
- * changed when hugepage migrations to device private
2002
- * memory are supported.
2003
2011
*/
2004
- subpage = & folio -> page ;
2005
2012
} else if (PageHWPoison (subpage )) {
2006
2013
pteval = swp_entry_to_pte (make_hwpoison_entry (subpage ));
2007
2014
if (folio_test_hugetlb (folio )) {
0 commit comments