@@ -202,6 +202,9 @@ static unsigned long __meminitdata nr_all_pages;
202
202
static unsigned long __meminitdata dma_reserve ;
203
203
204
204
#ifdef CONFIG_HAVE_MEMBLOCK_NODE_MAP
205
+ /* Movable memory ranges, will also be used by memblock subsystem. */
206
+ struct movablemem_map movablemem_map ;
207
+
205
208
static unsigned long __meminitdata arch_zone_lowest_possible_pfn [MAX_NR_ZONES ];
206
209
static unsigned long __meminitdata arch_zone_highest_possible_pfn [MAX_NR_ZONES ];
207
210
static unsigned long __initdata required_kernelcore ;
@@ -5078,6 +5081,134 @@ static int __init cmdline_parse_movablecore(char *p)
5078
5081
early_param ("kernelcore" , cmdline_parse_kernelcore );
5079
5082
early_param ("movablecore" , cmdline_parse_movablecore );
5080
5083
5084
+ /**
5085
+ * insert_movablemem_map - Insert a memory range in to movablemem_map.map.
5086
+ * @start_pfn: start pfn of the range
5087
+ * @end_pfn: end pfn of the range
5088
+ *
5089
+ * This function will also merge the overlapped ranges, and sort the array
5090
+ * by start_pfn in monotonic increasing order.
5091
+ */
5092
+ static void __init insert_movablemem_map (unsigned long start_pfn ,
5093
+ unsigned long end_pfn )
5094
+ {
5095
+ int pos , overlap ;
5096
+
5097
+ /*
5098
+ * pos will be at the 1st overlapped range, or the position
5099
+ * where the element should be inserted.
5100
+ */
5101
+ for (pos = 0 ; pos < movablemem_map .nr_map ; pos ++ )
5102
+ if (start_pfn <= movablemem_map .map [pos ].end_pfn )
5103
+ break ;
5104
+
5105
+ /* If there is no overlapped range, just insert the element. */
5106
+ if (pos == movablemem_map .nr_map ||
5107
+ end_pfn < movablemem_map .map [pos ].start_pfn ) {
5108
+ /*
5109
+ * If pos is not the end of array, we need to move all
5110
+ * the rest elements backward.
5111
+ */
5112
+ if (pos < movablemem_map .nr_map )
5113
+ memmove (& movablemem_map .map [pos + 1 ],
5114
+ & movablemem_map .map [pos ],
5115
+ sizeof (struct movablemem_entry ) *
5116
+ (movablemem_map .nr_map - pos ));
5117
+ movablemem_map .map [pos ].start_pfn = start_pfn ;
5118
+ movablemem_map .map [pos ].end_pfn = end_pfn ;
5119
+ movablemem_map .nr_map ++ ;
5120
+ return ;
5121
+ }
5122
+
5123
+ /* overlap will be at the last overlapped range */
5124
+ for (overlap = pos + 1 ; overlap < movablemem_map .nr_map ; overlap ++ )
5125
+ if (end_pfn < movablemem_map .map [overlap ].start_pfn )
5126
+ break ;
5127
+
5128
+ /*
5129
+ * If there are more ranges overlapped, we need to merge them,
5130
+ * and move the rest elements forward.
5131
+ */
5132
+ overlap -- ;
5133
+ movablemem_map .map [pos ].start_pfn = min (start_pfn ,
5134
+ movablemem_map .map [pos ].start_pfn );
5135
+ movablemem_map .map [pos ].end_pfn = max (end_pfn ,
5136
+ movablemem_map .map [overlap ].end_pfn );
5137
+
5138
+ if (pos != overlap && overlap + 1 != movablemem_map .nr_map )
5139
+ memmove (& movablemem_map .map [pos + 1 ],
5140
+ & movablemem_map .map [overlap + 1 ],
5141
+ sizeof (struct movablemem_entry ) *
5142
+ (movablemem_map .nr_map - overlap - 1 ));
5143
+
5144
+ movablemem_map .nr_map -= overlap - pos ;
5145
+ }
5146
+
5147
+ /**
5148
+ * movablemem_map_add_region - Add a memory range into movablemem_map.
5149
+ * @start: physical start address of range
5150
+ * @end: physical end address of range
5151
+ *
5152
+ * This function transform the physical address into pfn, and then add the
5153
+ * range into movablemem_map by calling insert_movablemem_map().
5154
+ */
5155
+ static void __init movablemem_map_add_region (u64 start , u64 size )
5156
+ {
5157
+ unsigned long start_pfn , end_pfn ;
5158
+
5159
+ /* In case size == 0 or start + size overflows */
5160
+ if (start + size <= start )
5161
+ return ;
5162
+
5163
+ if (movablemem_map .nr_map >= ARRAY_SIZE (movablemem_map .map )) {
5164
+ pr_err ("movablemem_map: too many entries;"
5165
+ " ignoring [mem %#010llx-%#010llx]\n" ,
5166
+ (unsigned long long ) start ,
5167
+ (unsigned long long ) (start + size - 1 ));
5168
+ return ;
5169
+ }
5170
+
5171
+ start_pfn = PFN_DOWN (start );
5172
+ end_pfn = PFN_UP (start + size );
5173
+ insert_movablemem_map (start_pfn , end_pfn );
5174
+ }
5175
+
5176
+ /*
5177
+ * cmdline_parse_movablemem_map - Parse boot option movablemem_map.
5178
+ * @p: The boot option of the following format:
5179
+ * movablemem_map=nn[KMG]@ss[KMG]
5180
+ *
5181
+ * This option sets the memory range [ss, ss+nn) to be used as movable memory.
5182
+ *
5183
+ * Return: 0 on success or -EINVAL on failure.
5184
+ */
5185
+ static int __init cmdline_parse_movablemem_map (char * p )
5186
+ {
5187
+ char * oldp ;
5188
+ u64 start_at , mem_size ;
5189
+
5190
+ if (!p )
5191
+ goto err ;
5192
+
5193
+ oldp = p ;
5194
+ mem_size = memparse (p , & p );
5195
+ if (p == oldp )
5196
+ goto err ;
5197
+
5198
+ if (* p == '@' ) {
5199
+ oldp = ++ p ;
5200
+ start_at = memparse (p , & p );
5201
+ if (p == oldp || * p != '\0' )
5202
+ goto err ;
5203
+
5204
+ movablemem_map_add_region (start_at , mem_size );
5205
+ return 0 ;
5206
+ }
5207
+ err :
5208
+ return - EINVAL ;
5209
+ }
5210
+ early_param ("movablemem_map" , cmdline_parse_movablemem_map );
5211
+
5081
5212
#endif /* CONFIG_HAVE_MEMBLOCK_NODE_MAP */
5082
5213
5083
5214
/**
0 commit comments