@@ -38,6 +38,7 @@ xe_svm_range_alloc(struct drm_gpusvm *gpusvm)
38
38
if (!range )
39
39
return ERR_PTR (- ENOMEM );
40
40
41
+ INIT_LIST_HEAD (& range -> garbage_collector_link );
41
42
xe_vm_get (gpusvm_to_vm (gpusvm ));
42
43
43
44
return & range -> base ;
@@ -54,6 +55,24 @@ static struct xe_svm_range *to_xe_range(struct drm_gpusvm_range *r)
54
55
return container_of (r , struct xe_svm_range , base );
55
56
}
56
57
58
+ static void
59
+ xe_svm_garbage_collector_add_range (struct xe_vm * vm , struct xe_svm_range * range ,
60
+ const struct mmu_notifier_range * mmu_range )
61
+ {
62
+ struct xe_device * xe = vm -> xe ;
63
+
64
+ drm_gpusvm_range_set_unmapped (& range -> base , mmu_range );
65
+
66
+ spin_lock (& vm -> svm .garbage_collector .lock );
67
+ if (list_empty (& range -> garbage_collector_link ))
68
+ list_add_tail (& range -> garbage_collector_link ,
69
+ & vm -> svm .garbage_collector .range_list );
70
+ spin_unlock (& vm -> svm .garbage_collector .lock );
71
+
72
+ queue_work (xe_device_get_root_tile (xe )-> primary_gt -> usm .pf_wq ,
73
+ & vm -> svm .garbage_collector .work );
74
+ }
75
+
57
76
static u8
58
77
xe_svm_range_notifier_event_begin (struct xe_vm * vm , struct drm_gpusvm_range * r ,
59
78
const struct mmu_notifier_range * mmu_range ,
@@ -98,7 +117,9 @@ xe_svm_range_notifier_event_end(struct xe_vm *vm, struct drm_gpusvm_range *r,
98
117
xe_svm_assert_in_notifier (vm );
99
118
100
119
drm_gpusvm_range_unmap_pages (& vm -> svm .gpusvm , r , & ctx );
101
- /* TODO: Add range to garbage collector if VM is not closed */
120
+ if (!xe_vm_is_closed (vm ) && mmu_range -> event == MMU_NOTIFY_UNMAP )
121
+ xe_svm_garbage_collector_add_range (vm , to_xe_range (r ),
122
+ mmu_range );
102
123
}
103
124
104
125
static void xe_svm_invalidate (struct drm_gpusvm * gpusvm ,
@@ -198,6 +219,63 @@ static void xe_svm_invalidate(struct drm_gpusvm *gpusvm,
198
219
xe_svm_range_notifier_event_end (vm , r , mmu_range );
199
220
}
200
221
222
+ static int __xe_svm_garbage_collector (struct xe_vm * vm ,
223
+ struct xe_svm_range * range )
224
+ {
225
+ /* TODO: Do unbind */
226
+
227
+ drm_gpusvm_range_remove (& vm -> svm .gpusvm , & range -> base );
228
+
229
+ return 0 ;
230
+ }
231
+
232
+ static int xe_svm_garbage_collector (struct xe_vm * vm )
233
+ {
234
+ struct xe_svm_range * range ;
235
+ int err ;
236
+
237
+ lockdep_assert_held_write (& vm -> lock );
238
+
239
+ if (xe_vm_is_closed_or_banned (vm ))
240
+ return - ENOENT ;
241
+
242
+ spin_lock (& vm -> svm .garbage_collector .lock );
243
+ for (;;) {
244
+ range = list_first_entry_or_null (& vm -> svm .garbage_collector .range_list ,
245
+ typeof (* range ),
246
+ garbage_collector_link );
247
+ if (!range )
248
+ break ;
249
+
250
+ list_del (& range -> garbage_collector_link );
251
+ spin_unlock (& vm -> svm .garbage_collector .lock );
252
+
253
+ err = __xe_svm_garbage_collector (vm , range );
254
+ if (err ) {
255
+ drm_warn (& vm -> xe -> drm ,
256
+ "Garbage collection failed: %pe\n" ,
257
+ ERR_PTR (err ));
258
+ xe_vm_kill (vm , true);
259
+ return err ;
260
+ }
261
+
262
+ spin_lock (& vm -> svm .garbage_collector .lock );
263
+ }
264
+ spin_unlock (& vm -> svm .garbage_collector .lock );
265
+
266
+ return 0 ;
267
+ }
268
+
269
+ static void xe_svm_garbage_collector_work_func (struct work_struct * w )
270
+ {
271
+ struct xe_vm * vm = container_of (w , struct xe_vm ,
272
+ svm .garbage_collector .work );
273
+
274
+ down_write (& vm -> lock );
275
+ xe_svm_garbage_collector (vm );
276
+ up_write (& vm -> lock );
277
+ }
278
+
201
279
static const struct drm_gpusvm_ops gpusvm_ops = {
202
280
.range_alloc = xe_svm_range_alloc ,
203
281
.range_free = xe_svm_range_free ,
@@ -222,6 +300,11 @@ int xe_svm_init(struct xe_vm *vm)
222
300
{
223
301
int err ;
224
302
303
+ spin_lock_init (& vm -> svm .garbage_collector .lock );
304
+ INIT_LIST_HEAD (& vm -> svm .garbage_collector .range_list );
305
+ INIT_WORK (& vm -> svm .garbage_collector .work ,
306
+ xe_svm_garbage_collector_work_func );
307
+
225
308
err = drm_gpusvm_init (& vm -> svm .gpusvm , "Xe SVM" , & vm -> xe -> drm ,
226
309
current -> mm , NULL , 0 , vm -> size ,
227
310
SZ_512M , & gpusvm_ops , fault_chunk_sizes ,
@@ -243,6 +326,7 @@ int xe_svm_init(struct xe_vm *vm)
243
326
void xe_svm_close (struct xe_vm * vm )
244
327
{
245
328
xe_assert (vm -> xe , xe_vm_is_closed (vm ));
329
+ flush_work (& vm -> svm .garbage_collector .work );
246
330
}
247
331
248
332
/**
@@ -292,7 +376,10 @@ int xe_svm_handle_pagefault(struct xe_vm *vm, struct xe_vma *vma,
292
376
xe_assert (vm -> xe , xe_vma_is_cpu_addr_mirror (vma ));
293
377
294
378
retry :
295
- /* TODO: Run garbage collector */
379
+ /* Always process UNMAPs first so view SVM ranges is current */
380
+ err = xe_svm_garbage_collector (vm );
381
+ if (err )
382
+ return err ;
296
383
297
384
r = drm_gpusvm_range_find_or_insert (& vm -> svm .gpusvm , fault_addr ,
298
385
xe_vma_start (vma ), xe_vma_end (vma ),
0 commit comments