@@ -279,13 +279,13 @@ do { \
279
279
} while (0)
280
280
281
281
#ifdef CONFIG_X86_32
282
- #define __get_user_asm_u64 (x , ptr , retval , errret ) \
282
+ #define __get_user_asm_u64 (x , ptr , retval ) \
283
283
({ \
284
284
__typeof__(ptr) __ptr = (ptr); \
285
- asm volatile("\n" \
285
+ asm volatile("\n" \
286
286
"1: movl %2,%%eax\n" \
287
287
"2: movl %3,%%edx\n" \
288
- "3:\n" \
288
+ "3:\n" \
289
289
".section .fixup,\"ax\"\n" \
290
290
"4: mov %4,%0\n" \
291
291
" xorl %%eax,%%eax\n" \
@@ -296,37 +296,37 @@ do { \
296
296
_ASM_EXTABLE_UA(2b, 4b) \
297
297
: "=r" (retval), "=&A"(x) \
298
298
: "m" (__m(__ptr)), "m" __m(((u32 __user *)(__ptr)) + 1), \
299
- "i" (errret ), "0" (retval)); \
299
+ "i" (-EFAULT ), "0" (retval)); \
300
300
})
301
301
302
302
#else
303
- #define __get_user_asm_u64 (x , ptr , retval , errret ) \
304
- __get_user_asm(x, ptr, retval, "q", "", "=r", errret )
303
+ #define __get_user_asm_u64 (x , ptr , retval ) \
304
+ __get_user_asm(x, ptr, retval, "q", "", "=r")
305
305
#endif
306
306
307
- #define __get_user_size (x , ptr , size , retval , errret ) \
307
+ #define __get_user_size (x , ptr , size , retval ) \
308
308
do { \
309
309
retval = 0; \
310
310
__chk_user_ptr(ptr); \
311
311
switch (size) { \
312
312
case 1: \
313
- __get_user_asm(x, ptr, retval, "b", "b", "=q", errret); \
313
+ __get_user_asm(x, ptr, retval, "b", "b", "=q"); \
314
314
break; \
315
315
case 2: \
316
- __get_user_asm(x, ptr, retval, "w", "w", "=r", errret); \
316
+ __get_user_asm(x, ptr, retval, "w", "w", "=r"); \
317
317
break; \
318
318
case 4: \
319
- __get_user_asm(x, ptr, retval, "l", "k", "=r", errret); \
319
+ __get_user_asm(x, ptr, retval, "l", "k", "=r"); \
320
320
break; \
321
321
case 8: \
322
- __get_user_asm_u64(x, ptr, retval, errret); \
322
+ __get_user_asm_u64(x, ptr, retval); \
323
323
break; \
324
324
default: \
325
325
(x) = __get_user_bad(); \
326
326
} \
327
327
} while (0)
328
328
329
- #define __get_user_asm (x , addr , err , itype , rtype , ltype , errret ) \
329
+ #define __get_user_asm (x , addr , err , itype , rtype , ltype ) \
330
330
asm volatile("\n" \
331
331
"1: mov"itype" %2,%"rtype"1\n" \
332
332
"2:\n" \
@@ -337,7 +337,7 @@ do { \
337
337
".previous\n" \
338
338
_ASM_EXTABLE_UA(1b, 3b) \
339
339
: "=r" (err), ltype(x) \
340
- : "m" (__m(addr)), "i" (errret ), "0" (err))
340
+ : "m" (__m(addr)), "i" (-EFAULT ), "0" (err))
341
341
342
342
#define __put_user_nocheck (x , ptr , size ) \
343
343
({ \
@@ -361,7 +361,7 @@ __pu_label: \
361
361
__typeof__(ptr) __gu_ptr = (ptr); \
362
362
__typeof__(size) __gu_size = (size); \
363
363
__uaccess_begin_nospec(); \
364
- __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err, -EFAULT ); \
364
+ __get_user_size(__gu_val, __gu_ptr, __gu_size, __gu_err); \
365
365
__uaccess_end(); \
366
366
(x) = (__force __typeof__(*(ptr)))__gu_val; \
367
367
__builtin_expect(__gu_err, 0); \
@@ -485,7 +485,7 @@ static __must_check __always_inline bool user_access_begin(const void __user *pt
485
485
do { \
486
486
int __gu_err; \
487
487
__inttype(*(ptr)) __gu_val; \
488
- __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err, -EFAULT); \
488
+ __get_user_size(__gu_val, (ptr), sizeof(*(ptr)), __gu_err); \
489
489
(x) = (__force __typeof__(*(ptr)))__gu_val; \
490
490
if (unlikely(__gu_err)) goto err_label; \
491
491
} while (0)
0 commit comments