@@ -1256,7 +1256,33 @@ static const struct vm_operations_struct vcsm_vm_ops = {
1256
1256
.fault = vcsm_vma_fault ,
1257
1257
};
1258
1258
1259
- static int clean_invalid_mem_2d (const void __user * addr ,
1259
+ /* Converts VCSM_CACHE_OP_* to an operating function. */
1260
+ static void (* cache_op_to_func (const unsigned cache_op ))
1261
+ (const void * , const void * )
1262
+ {
1263
+ switch (cache_op ) {
1264
+ case VCSM_CACHE_OP_NOP :
1265
+ return NULL ;
1266
+
1267
+ case VCSM_CACHE_OP_INV :
1268
+ return dmac_inv_range ;
1269
+
1270
+ case VCSM_CACHE_OP_CLEAN :
1271
+ return dmac_clean_range ;
1272
+
1273
+ case VCSM_CACHE_OP_FLUSH :
1274
+ return dmac_flush_range ;
1275
+
1276
+ default :
1277
+ pr_err ("[%s]: Invalid cache_op: 0x%08x\n" , __func__ , cache_op );
1278
+ return NULL ;
1279
+ }
1280
+ }
1281
+
1282
+ /*
1283
+ * Clean/invalid/flush cache of which buffer is already pinned (i.e. accessed).
1284
+ */
1285
+ static int clean_invalid_contiguous_mem_2d (const void __user * addr ,
1260
1286
const size_t block_count , const size_t block_size , const size_t stride ,
1261
1287
const unsigned cache_op )
1262
1288
{
@@ -1268,37 +1294,83 @@ static int clean_invalid_mem_2d(const void __user *addr,
1268
1294
return - EINVAL ;
1269
1295
}
1270
1296
1271
- switch (cache_op ) {
1272
- case VCSM_CACHE_OP_NOP :
1273
- return 0 ;
1274
- case VCSM_CACHE_OP_INV :
1275
- op_fn = dmac_inv_range ;
1276
- break ;
1277
- case VCSM_CACHE_OP_CLEAN :
1278
- op_fn = dmac_clean_range ;
1279
- break ;
1280
- case VCSM_CACHE_OP_FLUSH :
1281
- op_fn = dmac_flush_range ;
1282
- break ;
1283
- default :
1284
- pr_err ("[%s]: Invalid cache_op: 0x%08x\n" , __func__ , cache_op );
1297
+ op_fn = cache_op_to_func (cache_op );
1298
+ if (op_fn == NULL )
1285
1299
return - EINVAL ;
1286
- }
1287
1300
1288
1301
for (i = 0 ; i < block_count ; i ++ , addr += stride )
1289
1302
op_fn (addr , addr + block_size );
1290
1303
1291
1304
return 0 ;
1292
1305
}
1293
1306
1294
- static int clean_invalid_mem (const void __user * addr , const size_t size ,
1307
+ /* Clean/invalid/flush cache of which buffer may be non-pinned. */
1308
+ /* The caller must lock current->mm->mmap_sem for read. */
1309
+ static int clean_invalid_mem_walk (unsigned long addr , const size_t size ,
1295
1310
const unsigned cache_op )
1296
1311
{
1297
- return clean_invalid_mem_2d (addr , 1 , size , 0 , cache_op );
1312
+ pgd_t * pgd ;
1313
+ pud_t * pud ;
1314
+ pmd_t * pmd ;
1315
+ pte_t * pte ;
1316
+ unsigned long pgd_next , pud_next , pmd_next ;
1317
+ const unsigned long end = ALIGN (addr + size , PAGE_SIZE );
1318
+ void (* op_fn )(const void * , const void * );
1319
+
1320
+ addr &= PAGE_MASK ;
1321
+
1322
+ if (addr >= end )
1323
+ return 0 ;
1324
+
1325
+ op_fn = cache_op_to_func (cache_op );
1326
+ if (op_fn == NULL )
1327
+ return - EINVAL ;
1328
+
1329
+ /* Walk PGD */
1330
+ pgd = pgd_offset (current -> mm , addr );
1331
+ do {
1332
+ pgd_next = pgd_addr_end (addr , end );
1333
+
1334
+ if (pgd_none (* pgd ) || pgd_bad (* pgd ))
1335
+ continue ;
1336
+
1337
+ /* Walk PUD */
1338
+ pud = pud_offset (pgd , addr );
1339
+ do {
1340
+ pud_next = pud_addr_end (addr , pgd_next );
1341
+ if (pud_none (* pud ) || pud_bad (* pud ))
1342
+ continue ;
1343
+
1344
+ /* Walk PMD */
1345
+ pmd = pmd_offset (pud , addr );
1346
+ do {
1347
+ pmd_next = pmd_addr_end (addr , pud_next );
1348
+ if (pmd_none (* pmd ) || pmd_bad (* pmd ))
1349
+ continue ;
1350
+
1351
+ /* Walk PTE */
1352
+ pte = pte_offset_map (pmd , addr );
1353
+ do {
1354
+ if (pte_none (* pte ) || !pte_present (* pte ))
1355
+ continue ;
1356
+
1357
+ op_fn ((const void __user * ) addr ,
1358
+ (const void __user * ) (addr + PAGE_SIZE ));
1359
+ } while (pte ++ , addr += PAGE_SIZE , addr != pmd_next );
1360
+ pte_unmap (pte );
1361
+
1362
+ } while (pmd ++ , addr = pmd_next , addr != pud_next );
1363
+
1364
+ } while (pud ++ , addr = pud_next , addr != pgd_next );
1365
+
1366
+ } while (pgd ++ , addr = pgd_next , addr != end );
1367
+
1368
+ return 0 ;
1298
1369
}
1299
1370
1300
- static int clean_invalid_resource (const void __user * addr , const size_t size ,
1301
- const unsigned cache_op , const int usr_hdl ,
1371
+ /* Clean/invalid/flush cache of buffer in resource */
1372
+ static int clean_invalid_resource_walk (const void __user * addr ,
1373
+ const size_t size , const unsigned cache_op , const int usr_hdl ,
1302
1374
struct sm_resource_t * resource )
1303
1375
{
1304
1376
int err ;
@@ -1355,7 +1427,10 @@ static int clean_invalid_resource(const void __user *addr, const size_t size,
1355
1427
return - EFAULT ;
1356
1428
}
1357
1429
1358
- err = clean_invalid_mem (addr , size , cache_op );
1430
+ down_read (& current -> mm -> mmap_sem );
1431
+ err = clean_invalid_mem_walk ((unsigned long ) addr , size , cache_op );
1432
+ up_read (& current -> mm -> mmap_sem );
1433
+
1359
1434
if (err )
1360
1435
resource -> res_stats [stat_failure ]++ ;
1361
1436
@@ -2004,7 +2079,7 @@ static int vc_sm_ioctl_unlock(struct sm_priv_data_t *private,
2004
2079
const unsigned long start = map -> vma -> vm_start ;
2005
2080
const unsigned long end = map -> vma -> vm_end ;
2006
2081
2007
- ret = clean_invalid_mem (( void __user * ) start , end - start ,
2082
+ ret = clean_invalid_mem_walk ( start , end - start ,
2008
2083
VCSM_CACHE_OP_FLUSH );
2009
2084
if (ret )
2010
2085
goto error ;
@@ -2886,7 +2961,7 @@ static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2886
2961
goto out ;
2887
2962
}
2888
2963
2889
- ret = clean_invalid_resource ((void __user * ) ioparam .addr ,
2964
+ ret = clean_invalid_resource_walk ((void __user * ) ioparam .addr ,
2890
2965
ioparam .size , VCSM_CACHE_OP_FLUSH , ioparam .handle ,
2891
2966
resource );
2892
2967
vmcs_sm_release_resource (resource , 0 );
@@ -2917,7 +2992,7 @@ static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2917
2992
goto out ;
2918
2993
}
2919
2994
2920
- ret = clean_invalid_resource ((void __user * ) ioparam .addr ,
2995
+ ret = clean_invalid_resource_walk ((void __user * ) ioparam .addr ,
2921
2996
ioparam .size , VCSM_CACHE_OP_INV , ioparam .handle , resource );
2922
2997
vmcs_sm_release_resource (resource , 0 );
2923
2998
if (ret )
@@ -2951,16 +3026,19 @@ static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2951
3026
goto out ;
2952
3027
}
2953
3028
2954
- ret = clean_invalid_resource (( void __user * ) ioparam . s [ i ]. addr ,
2955
- ioparam .s [i ].size , ioparam .s [i ].cmd ,
2956
- ioparam .s [i ].handle , resource );
3029
+ ret = clean_invalid_resource_walk (
3030
+ ( void __user * ) ioparam .s [i ].addr , ioparam .s [i ].size ,
3031
+ ioparam .s [i ].cmd , ioparam . s [ i ]. handle , resource );
2957
3032
vmcs_sm_release_resource (resource , 0 );
2958
3033
if (ret )
2959
3034
goto out ;
2960
3035
}
2961
3036
}
2962
3037
break ;
2963
- /* Flush/Invalidate the cache for a given mapping. */
3038
+ /*
3039
+ * Flush/Invalidate the cache for a given mapping.
3040
+ * Blocks must be pinned (i.e. accessed) before this call.
3041
+ */
2964
3042
case VMCS_SM_CMD_CLEAN_INVALID2 :
2965
3043
{
2966
3044
int i ;
@@ -2993,12 +3071,13 @@ static long vc_sm_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
2993
3071
for (i = 0 ; i < ioparam .op_count ; i ++ ) {
2994
3072
const struct vmcs_sm_ioctl_clean_invalid_block * const op = block + i ;
2995
3073
2996
- ret = clean_invalid_mem_2d ((void __user * ) op -> start_address ,
2997
- op -> block_count , op -> block_size ,
2998
- op -> inter_block_stride , op -> invalidate_mode );
2999
3074
if (op -> invalidate_mode == VCSM_CACHE_OP_NOP )
3000
3075
continue ;
3001
3076
3077
+ ret = clean_invalid_contiguous_mem_2d (
3078
+ (void __user * ) op -> start_address , op -> block_count ,
3079
+ op -> block_size , op -> inter_block_stride ,
3080
+ op -> invalidate_mode );
3002
3081
if (ret )
3003
3082
break ;
3004
3083
}
0 commit comments