@@ -1027,11 +1027,53 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
1027
1027
return rc ;
1028
1028
}
1029
1029
1030
- static int __unmap_and_move (struct folio * src , struct folio * dst ,
1030
+ /*
1031
+ * To record some information during migration, we use some unused
1032
+ * fields (mapping and private) of struct folio of the newly allocated
1033
+ * destination folio. This is safe because nobody is using them
1034
+ * except us.
1035
+ */
1036
+ static void __migrate_folio_record (struct folio * dst ,
1037
+ unsigned long page_was_mapped ,
1038
+ struct anon_vma * anon_vma )
1039
+ {
1040
+ dst -> mapping = (void * )anon_vma ;
1041
+ dst -> private = (void * )page_was_mapped ;
1042
+ }
1043
+
1044
+ static void __migrate_folio_extract (struct folio * dst ,
1045
+ int * page_was_mappedp ,
1046
+ struct anon_vma * * anon_vmap )
1047
+ {
1048
+ * anon_vmap = (void * )dst -> mapping ;
1049
+ * page_was_mappedp = (unsigned long )dst -> private ;
1050
+ dst -> mapping = NULL ;
1051
+ dst -> private = NULL ;
1052
+ }
1053
+
1054
+ /* Cleanup src folio upon migration success */
1055
+ static void migrate_folio_done (struct folio * src ,
1056
+ enum migrate_reason reason )
1057
+ {
1058
+ /*
1059
+ * Compaction can migrate also non-LRU pages which are
1060
+ * not accounted to NR_ISOLATED_*. They can be recognized
1061
+ * as __PageMovable
1062
+ */
1063
+ if (likely (!__folio_test_movable (src )))
1064
+ mod_node_page_state (folio_pgdat (src ), NR_ISOLATED_ANON +
1065
+ folio_is_file_lru (src ), - folio_nr_pages (src ));
1066
+
1067
+ if (reason != MR_MEMORY_FAILURE )
1068
+ /* We release the page in page_handle_poison. */
1069
+ folio_put (src );
1070
+ }
1071
+
1072
+ static int __migrate_folio_unmap (struct folio * src , struct folio * dst ,
1031
1073
int force , enum migrate_mode mode )
1032
1074
{
1033
1075
int rc = - EAGAIN ;
1034
- bool page_was_mapped = false ;
1076
+ int page_was_mapped = 0 ;
1035
1077
struct anon_vma * anon_vma = NULL ;
1036
1078
bool is_lru = !__PageMovable (& src -> page );
1037
1079
@@ -1107,8 +1149,8 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
1107
1149
goto out_unlock ;
1108
1150
1109
1151
if (unlikely (!is_lru )) {
1110
- rc = move_to_new_folio (dst , src , mode );
1111
- goto out_unlock_both ;
1152
+ __migrate_folio_record (dst , page_was_mapped , anon_vma );
1153
+ return MIGRATEPAGE_UNMAP ;
1112
1154
}
1113
1155
1114
1156
/*
@@ -1133,11 +1175,42 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
1133
1175
VM_BUG_ON_FOLIO (folio_test_anon (src ) &&
1134
1176
!folio_test_ksm (src ) && !anon_vma , src );
1135
1177
try_to_migrate (src , 0 );
1136
- page_was_mapped = true ;
1178
+ page_was_mapped = 1 ;
1137
1179
}
1138
1180
1139
- if (!folio_mapped (src ))
1140
- rc = move_to_new_folio (dst , src , mode );
1181
+ if (!folio_mapped (src )) {
1182
+ __migrate_folio_record (dst , page_was_mapped , anon_vma );
1183
+ return MIGRATEPAGE_UNMAP ;
1184
+ }
1185
+
1186
+ if (page_was_mapped )
1187
+ remove_migration_ptes (src , src , false);
1188
+
1189
+ out_unlock_both :
1190
+ folio_unlock (dst );
1191
+ out_unlock :
1192
+ /* Drop an anon_vma reference if we took one */
1193
+ if (anon_vma )
1194
+ put_anon_vma (anon_vma );
1195
+ folio_unlock (src );
1196
+ out :
1197
+
1198
+ return rc ;
1199
+ }
1200
+
1201
+ static int __migrate_folio_move (struct folio * src , struct folio * dst ,
1202
+ enum migrate_mode mode )
1203
+ {
1204
+ int rc ;
1205
+ int page_was_mapped = 0 ;
1206
+ struct anon_vma * anon_vma = NULL ;
1207
+ bool is_lru = !__PageMovable (& src -> page );
1208
+
1209
+ __migrate_folio_extract (dst , & page_was_mapped , & anon_vma );
1210
+
1211
+ rc = move_to_new_folio (dst , src , mode );
1212
+ if (unlikely (!is_lru ))
1213
+ goto out_unlock_both ;
1141
1214
1142
1215
/*
1143
1216
* When successful, push dst to LRU immediately: so that if it
@@ -1160,12 +1233,10 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
1160
1233
1161
1234
out_unlock_both :
1162
1235
folio_unlock (dst );
1163
- out_unlock :
1164
1236
/* Drop an anon_vma reference if we took one */
1165
1237
if (anon_vma )
1166
1238
put_anon_vma (anon_vma );
1167
1239
folio_unlock (src );
1168
- out :
1169
1240
/*
1170
1241
* If migration is successful, decrease refcount of dst,
1171
1242
* which will not free the page because new page owner increased
@@ -1177,19 +1248,15 @@ static int __unmap_and_move(struct folio *src, struct folio *dst,
1177
1248
return rc ;
1178
1249
}
1179
1250
1180
- /*
1181
- * Obtain the lock on folio, remove all ptes and migrate the folio
1182
- * to the newly allocated folio in dst.
1183
- */
1184
- static int unmap_and_move (new_page_t get_new_page ,
1185
- free_page_t put_new_page ,
1186
- unsigned long private , struct folio * src ,
1187
- int force , enum migrate_mode mode ,
1188
- enum migrate_reason reason ,
1189
- struct list_head * ret )
1251
+ /* Obtain the lock on page, remove all ptes. */
1252
+ static int migrate_folio_unmap (new_page_t get_new_page , free_page_t put_new_page ,
1253
+ unsigned long private , struct folio * src ,
1254
+ struct folio * * dstp , int force ,
1255
+ enum migrate_mode mode , enum migrate_reason reason ,
1256
+ struct list_head * ret )
1190
1257
{
1191
1258
struct folio * dst ;
1192
- int rc = MIGRATEPAGE_SUCCESS ;
1259
+ int rc = MIGRATEPAGE_UNMAP ;
1193
1260
struct page * newpage = NULL ;
1194
1261
1195
1262
if (!thp_migration_supported () && folio_test_transhuge (src ))
@@ -1200,20 +1267,49 @@ static int unmap_and_move(new_page_t get_new_page,
1200
1267
folio_clear_active (src );
1201
1268
folio_clear_unevictable (src );
1202
1269
/* free_pages_prepare() will clear PG_isolated. */
1203
- goto out ;
1270
+ list_del (& src -> lru );
1271
+ migrate_folio_done (src , reason );
1272
+ return MIGRATEPAGE_SUCCESS ;
1204
1273
}
1205
1274
1206
1275
newpage = get_new_page (& src -> page , private );
1207
1276
if (!newpage )
1208
1277
return - ENOMEM ;
1209
1278
dst = page_folio (newpage );
1279
+ * dstp = dst ;
1210
1280
1211
1281
dst -> private = NULL ;
1212
- rc = __unmap_and_move (src , dst , force , mode );
1282
+ rc = __migrate_folio_unmap (src , dst , force , mode );
1283
+ if (rc == MIGRATEPAGE_UNMAP )
1284
+ return rc ;
1285
+
1286
+ /*
1287
+ * A folio that has not been unmapped will be restored to
1288
+ * right list unless we want to retry.
1289
+ */
1290
+ if (rc != - EAGAIN )
1291
+ list_move_tail (& src -> lru , ret );
1292
+
1293
+ if (put_new_page )
1294
+ put_new_page (& dst -> page , private );
1295
+ else
1296
+ folio_put (dst );
1297
+
1298
+ return rc ;
1299
+ }
1300
+
1301
+ /* Migrate the folio to the newly allocated folio in dst. */
1302
+ static int migrate_folio_move (free_page_t put_new_page , unsigned long private ,
1303
+ struct folio * src , struct folio * dst ,
1304
+ enum migrate_mode mode , enum migrate_reason reason ,
1305
+ struct list_head * ret )
1306
+ {
1307
+ int rc ;
1308
+
1309
+ rc = __migrate_folio_move (src , dst , mode );
1213
1310
if (rc == MIGRATEPAGE_SUCCESS )
1214
1311
set_page_owner_migrate_reason (& dst -> page , reason );
1215
1312
1216
- out :
1217
1313
if (rc != - EAGAIN ) {
1218
1314
/*
1219
1315
* A folio that has been migrated has all references
@@ -1229,20 +1325,7 @@ static int unmap_and_move(new_page_t get_new_page,
1229
1325
* we want to retry.
1230
1326
*/
1231
1327
if (rc == MIGRATEPAGE_SUCCESS ) {
1232
- /*
1233
- * Compaction can migrate also non-LRU folios which are
1234
- * not accounted to NR_ISOLATED_*. They can be recognized
1235
- * as __folio_test_movable
1236
- */
1237
- if (likely (!__folio_test_movable (src )))
1238
- mod_node_page_state (folio_pgdat (src ), NR_ISOLATED_ANON +
1239
- folio_is_file_lru (src ), - folio_nr_pages (src ));
1240
-
1241
- if (reason != MR_MEMORY_FAILURE )
1242
- /*
1243
- * We release the folio in page_handle_poison.
1244
- */
1245
- folio_put (src );
1328
+ migrate_folio_done (src , reason );
1246
1329
} else {
1247
1330
if (rc != - EAGAIN )
1248
1331
list_add_tail (& src -> lru , ret );
@@ -1534,7 +1617,7 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1534
1617
int pass = 0 ;
1535
1618
bool is_large = false;
1536
1619
bool is_thp = false;
1537
- struct folio * folio , * folio2 ;
1620
+ struct folio * folio , * folio2 , * dst = NULL ;
1538
1621
int rc , nr_pages ;
1539
1622
LIST_HEAD (split_folios );
1540
1623
bool nosplit = (reason == MR_NUMA_MISPLACED );
@@ -1561,9 +1644,13 @@ static int migrate_pages_batch(struct list_head *from, new_page_t get_new_page,
1561
1644
1562
1645
cond_resched ();
1563
1646
1564
- rc = unmap_and_move (get_new_page , put_new_page ,
1565
- private , folio , pass > 2 , mode ,
1566
- reason , ret_folios );
1647
+ rc = migrate_folio_unmap (get_new_page , put_new_page , private ,
1648
+ folio , & dst , pass > 2 , mode ,
1649
+ reason , ret_folios );
1650
+ if (rc == MIGRATEPAGE_UNMAP )
1651
+ rc = migrate_folio_move (put_new_page , private ,
1652
+ folio , dst , mode ,
1653
+ reason , ret_folios );
1567
1654
/*
1568
1655
* The rules are:
1569
1656
* Success: folio will be freed
0 commit comments