Skip to content

Commit 108bcc9

Browse files
codypstorvalds
authored andcommitted
mm: add & use zone_end_pfn() and zone_spans_pfn()
Add 2 helpers (zone_end_pfn() and zone_spans_pfn()) to reduce code duplication. This also switches to using them in compaction (where an additional variable needed to be renamed), page_alloc, vmstat, memory_hotplug, and kmemleak. Note that in compaction.c I avoid calling zone_end_pfn() repeatedly because I expect at some point the sycronization issues with start_pfn & spanned_pages will need fixing, either by actually using the seqlock or clever memory barrier usage. Signed-off-by: Cody P Schafer <[email protected]> Cc: David Hansen <[email protected]> Cc: Catalin Marinas <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Mel Gorman <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 9127ab4 commit 108bcc9

File tree

6 files changed

+32
-27
lines changed

6 files changed

+32
-27
lines changed

include/linux/mmzone.h

Lines changed: 10 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -527,6 +527,16 @@ static inline int zone_is_oom_locked(const struct zone *zone)
527527
return test_bit(ZONE_OOM_LOCKED, &zone->flags);
528528
}
529529

530+
static inline unsigned zone_end_pfn(const struct zone *zone)
531+
{
532+
return zone->zone_start_pfn + zone->spanned_pages;
533+
}
534+
535+
static inline bool zone_spans_pfn(const struct zone *zone, unsigned long pfn)
536+
{
537+
return zone->zone_start_pfn <= pfn && pfn < zone_end_pfn(zone);
538+
}
539+
530540
/*
531541
* The "priority" of VM scanning is how much of the queues we will scan in one
532542
* go. A value of 12 for DEF_PRIORITY implies that we will scan 1/4096th of the

mm/compaction.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -86,7 +86,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
8686
static void __reset_isolation_suitable(struct zone *zone)
8787
{
8888
unsigned long start_pfn = zone->zone_start_pfn;
89-
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
89+
unsigned long end_pfn = zone_end_pfn(zone);
9090
unsigned long pfn;
9191

9292
zone->compact_cached_migrate_pfn = start_pfn;
@@ -647,7 +647,7 @@ static void isolate_freepages(struct zone *zone,
647647
struct compact_control *cc)
648648
{
649649
struct page *page;
650-
unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
650+
unsigned long high_pfn, low_pfn, pfn, z_end_pfn, end_pfn;
651651
int nr_freepages = cc->nr_freepages;
652652
struct list_head *freelist = &cc->freepages;
653653

@@ -666,7 +666,7 @@ static void isolate_freepages(struct zone *zone,
666666
*/
667667
high_pfn = min(low_pfn, pfn);
668668

669-
zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
669+
z_end_pfn = zone_end_pfn(zone);
670670

671671
/*
672672
* Isolate free pages until enough are available to migrate the
@@ -709,7 +709,7 @@ static void isolate_freepages(struct zone *zone,
709709
* only scans within a pageblock
710710
*/
711711
end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
712-
end_pfn = min(end_pfn, zone_end_pfn);
712+
end_pfn = min(end_pfn, z_end_pfn);
713713
isolated = isolate_freepages_block(cc, pfn, end_pfn,
714714
freelist, false);
715715
nr_freepages += isolated;
@@ -923,7 +923,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
923923
{
924924
int ret;
925925
unsigned long start_pfn = zone->zone_start_pfn;
926-
unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages;
926+
unsigned long end_pfn = zone_end_pfn(zone);
927927

928928
ret = compaction_suitable(zone, cc->order);
929929
switch (ret) {

mm/kmemleak.c

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1300,9 +1300,8 @@ static void kmemleak_scan(void)
13001300
*/
13011301
lock_memory_hotplug();
13021302
for_each_online_node(i) {
1303-
pg_data_t *pgdat = NODE_DATA(i);
1304-
unsigned long start_pfn = pgdat->node_start_pfn;
1305-
unsigned long end_pfn = start_pfn + pgdat->node_spanned_pages;
1303+
unsigned long start_pfn = node_start_pfn(i);
1304+
unsigned long end_pfn = node_end_pfn(i);
13061305
unsigned long pfn;
13071306

13081307
for (pfn = start_pfn; pfn < end_pfn; pfn++) {

mm/memory_hotplug.c

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -299,7 +299,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
299299
pgdat_resize_lock(z1->zone_pgdat, &flags);
300300

301301
/* can't move pfns which are higher than @z2 */
302-
if (end_pfn > z2->zone_start_pfn + z2->spanned_pages)
302+
if (end_pfn > zone_end_pfn(z2))
303303
goto out_fail;
304304
/* the move out part mast at the left most of @z2 */
305305
if (start_pfn > z2->zone_start_pfn)
@@ -315,7 +315,7 @@ static int __meminit move_pfn_range_left(struct zone *z1, struct zone *z2,
315315
z1_start_pfn = start_pfn;
316316

317317
resize_zone(z1, z1_start_pfn, end_pfn);
318-
resize_zone(z2, end_pfn, z2->zone_start_pfn + z2->spanned_pages);
318+
resize_zone(z2, end_pfn, zone_end_pfn(z2));
319319

320320
pgdat_resize_unlock(z1->zone_pgdat, &flags);
321321

@@ -347,15 +347,15 @@ static int __meminit move_pfn_range_right(struct zone *z1, struct zone *z2,
347347
if (z1->zone_start_pfn > start_pfn)
348348
goto out_fail;
349349
/* the move out part mast at the right most of @z1 */
350-
if (z1->zone_start_pfn + z1->spanned_pages > end_pfn)
350+
if (zone_end_pfn(z1) > end_pfn)
351351
goto out_fail;
352352
/* must included/overlap */
353-
if (start_pfn >= z1->zone_start_pfn + z1->spanned_pages)
353+
if (start_pfn >= zone_end_pfn(z1))
354354
goto out_fail;
355355

356356
/* use end_pfn for z2's end_pfn if z2 is empty */
357357
if (z2->spanned_pages)
358-
z2_end_pfn = z2->zone_start_pfn + z2->spanned_pages;
358+
z2_end_pfn = zone_end_pfn(z2);
359359
else
360360
z2_end_pfn = end_pfn;
361361

mm/page_alloc.c

Lines changed: 9 additions & 13 deletions
Original file line numberDiff line numberDiff line change
@@ -250,9 +250,7 @@ static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
250250

251251
do {
252252
seq = zone_span_seqbegin(zone);
253-
if (pfn >= zone->zone_start_pfn + zone->spanned_pages)
254-
ret = 1;
255-
else if (pfn < zone->zone_start_pfn)
253+
if (!zone_spans_pfn(zone, pfn))
256254
ret = 1;
257255
} while (zone_span_seqretry(zone, seq));
258256

@@ -990,9 +988,9 @@ int move_freepages_block(struct zone *zone, struct page *page,
990988
end_pfn = start_pfn + pageblock_nr_pages - 1;
991989

992990
/* Do not cross zone boundaries */
993-
if (start_pfn < zone->zone_start_pfn)
991+
if (!zone_spans_pfn(zone, start_pfn))
994992
start_page = page;
995-
if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
993+
if (!zone_spans_pfn(zone, end_pfn))
996994
return 0;
997995

998996
return move_freepages(zone, start_page, end_page, migratetype);
@@ -1286,7 +1284,7 @@ void mark_free_pages(struct zone *zone)
12861284

12871285
spin_lock_irqsave(&zone->lock, flags);
12881286

1289-
max_zone_pfn = zone->zone_start_pfn + zone->spanned_pages;
1287+
max_zone_pfn = zone_end_pfn(zone);
12901288
for (pfn = zone->zone_start_pfn; pfn < max_zone_pfn; pfn++)
12911289
if (pfn_valid(pfn)) {
12921290
struct page *page = pfn_to_page(pfn);
@@ -3798,7 +3796,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
37983796
* the block.
37993797
*/
38003798
start_pfn = zone->zone_start_pfn;
3801-
end_pfn = start_pfn + zone->spanned_pages;
3799+
end_pfn = zone_end_pfn(zone);
38023800
start_pfn = roundup(start_pfn, pageblock_nr_pages);
38033801
reserve = roundup(min_wmark_pages(zone), pageblock_nr_pages) >>
38043802
pageblock_order;
@@ -3912,7 +3910,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
39123910
* pfn out of zone.
39133911
*/
39143912
if ((z->zone_start_pfn <= pfn)
3915-
&& (pfn < z->zone_start_pfn + z->spanned_pages)
3913+
&& (pfn < zone_end_pfn(z))
39163914
&& !(pfn & (pageblock_nr_pages - 1)))
39173915
set_pageblock_migratetype(page, MIGRATE_MOVABLE);
39183916

@@ -4713,7 +4711,7 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat)
47134711
* for the buddy allocator to function correctly.
47144712
*/
47154713
start = pgdat->node_start_pfn & ~(MAX_ORDER_NR_PAGES - 1);
4716-
end = pgdat->node_start_pfn + pgdat->node_spanned_pages;
4714+
end = pgdat_end_pfn(pgdat);
47174715
end = ALIGN(end, MAX_ORDER_NR_PAGES);
47184716
size = (end - start) * sizeof(struct page);
47194717
map = alloc_remap(pgdat->node_id, size);
@@ -5928,8 +5926,7 @@ void set_pageblock_flags_group(struct page *page, unsigned long flags,
59285926
pfn = page_to_pfn(page);
59295927
bitmap = get_pageblock_bitmap(zone, pfn);
59305928
bitidx = pfn_to_bitidx(zone, pfn);
5931-
VM_BUG_ON(pfn < zone->zone_start_pfn);
5932-
VM_BUG_ON(pfn >= zone->zone_start_pfn + zone->spanned_pages);
5929+
VM_BUG_ON(!zone_spans_pfn(zone, pfn));
59335930

59345931
for (; start_bitidx <= end_bitidx; start_bitidx++, value <<= 1)
59355932
if (flags & value)
@@ -6027,8 +6024,7 @@ bool is_pageblock_removable_nolock(struct page *page)
60276024

60286025
zone = page_zone(page);
60296026
pfn = page_to_pfn(page);
6030-
if (zone->zone_start_pfn > pfn ||
6031-
zone->zone_start_pfn + zone->spanned_pages <= pfn)
6027+
if (!zone_spans_pfn(zone, pfn))
60326028
return false;
60336029

60346030
return !has_unmovable_pages(zone, page, 0, true);

mm/vmstat.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -891,7 +891,7 @@ static void pagetypeinfo_showblockcount_print(struct seq_file *m,
891891
int mtype;
892892
unsigned long pfn;
893893
unsigned long start_pfn = zone->zone_start_pfn;
894-
unsigned long end_pfn = start_pfn + zone->spanned_pages;
894+
unsigned long end_pfn = zone_end_pfn(zone);
895895
unsigned long count[MIGRATE_TYPES] = { 0, };
896896

897897
for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) {

0 commit comments

Comments
 (0)