@@ -1837,6 +1837,7 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
1837
1837
{
1838
1838
struct amdgpu_device * adev = ring -> adev ;
1839
1839
u32 idx ;
1840
+ bool sched_work = false;
1840
1841
1841
1842
if (!adev -> gfx .enable_cleaner_shader )
1842
1843
return ;
@@ -1852,15 +1853,19 @@ void amdgpu_gfx_enforce_isolation_ring_begin_use(struct amdgpu_ring *ring)
1852
1853
mutex_lock (& adev -> enforce_isolation_mutex );
1853
1854
if (adev -> enforce_isolation [idx ]) {
1854
1855
if (adev -> kfd .init_complete )
1855
- amdgpu_gfx_kfd_sch_ctrl ( adev , idx , false) ;
1856
+ sched_work = true ;
1856
1857
}
1857
1858
mutex_unlock (& adev -> enforce_isolation_mutex );
1859
+
1860
+ if (sched_work )
1861
+ amdgpu_gfx_kfd_sch_ctrl (adev , idx , false);
1858
1862
}
1859
1863
1860
1864
void amdgpu_gfx_enforce_isolation_ring_end_use (struct amdgpu_ring * ring )
1861
1865
{
1862
1866
struct amdgpu_device * adev = ring -> adev ;
1863
1867
u32 idx ;
1868
+ bool sched_work = false;
1864
1869
1865
1870
if (!adev -> gfx .enable_cleaner_shader )
1866
1871
return ;
@@ -1876,7 +1881,10 @@ void amdgpu_gfx_enforce_isolation_ring_end_use(struct amdgpu_ring *ring)
1876
1881
mutex_lock (& adev -> enforce_isolation_mutex );
1877
1882
if (adev -> enforce_isolation [idx ]) {
1878
1883
if (adev -> kfd .init_complete )
1879
- amdgpu_gfx_kfd_sch_ctrl ( adev , idx , true) ;
1884
+ sched_work = true;
1880
1885
}
1881
1886
mutex_unlock (& adev -> enforce_isolation_mutex );
1887
+
1888
+ if (sched_work )
1889
+ amdgpu_gfx_kfd_sch_ctrl (adev , idx , true);
1882
1890
}
0 commit comments