|
15 | 15 | #define TASK_SIZE_MAX TASK_SIZE_USER64 |
16 | 16 | #endif |
17 | 17 |
|
| 18 | +/* Threshold above which VMX copy path is used */ |
| 19 | +#define VMX_COPY_THRESHOLD 3328 |
| 20 | + |
18 | 21 | #include <asm-generic/access_ok.h> |
19 | 22 |
|
20 | 23 | /* |
@@ -326,40 +329,62 @@ do { \ |
326 | 329 | extern unsigned long __copy_tofrom_user(void __user *to, |
327 | 330 | const void __user *from, unsigned long size); |
328 | 331 |
|
329 | | -#ifdef __powerpc64__ |
330 | | -static inline unsigned long |
331 | | -raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) |
| 332 | +unsigned long __copy_tofrom_user_base(void __user *to, |
| 333 | + const void __user *from, unsigned long size); |
| 334 | + |
| 335 | +unsigned long __copy_tofrom_user_power7_vmx(void __user *to, |
| 336 | + const void __user *from, unsigned long size); |
| 337 | + |
| 338 | +static __always_inline bool will_use_vmx(unsigned long n) |
| 339 | +{ |
| 340 | + return IS_ENABLED(CONFIG_ALTIVEC) && cpu_has_feature(CPU_FTR_VMX_COPY) && |
| 341 | + n > VMX_COPY_THRESHOLD; |
| 342 | +} |
| 343 | + |
| 344 | +static __always_inline unsigned long |
| 345 | +raw_copy_tofrom_user(void __user *to, const void __user *from, |
| 346 | + unsigned long n, unsigned long dir) |
332 | 347 | { |
333 | 348 | unsigned long ret; |
334 | 349 |
|
335 | | - barrier_nospec(); |
336 | | - allow_user_access(to, KUAP_READ_WRITE); |
| 350 | + if (will_use_vmx(n) && enter_vmx_usercopy()) { |
| 351 | + allow_user_access(to, dir); |
| 352 | + ret = __copy_tofrom_user_power7_vmx(to, from, n); |
| 353 | + prevent_user_access(dir); |
| 354 | + exit_vmx_usercopy(); |
| 355 | + |
| 356 | + if (unlikely(ret)) { |
| 357 | + allow_user_access(to, dir); |
| 358 | + ret = __copy_tofrom_user_base(to, from, n); |
| 359 | + prevent_user_access(dir); |
| 360 | + } |
| 361 | + return ret; |
| 362 | + } |
| 363 | + |
| 364 | + allow_user_access(to, dir); |
337 | 365 | ret = __copy_tofrom_user(to, from, n); |
338 | | - prevent_user_access(KUAP_READ_WRITE); |
| 366 | + prevent_user_access(dir); |
339 | 367 | return ret; |
340 | 368 | } |
341 | | -#endif /* __powerpc64__ */ |
342 | 369 |
|
343 | | -static inline unsigned long raw_copy_from_user(void *to, |
344 | | - const void __user *from, unsigned long n) |
| 370 | +#ifdef CONFIG_PPC64 |
| 371 | +static inline unsigned long |
| 372 | +raw_copy_in_user(void __user *to, const void __user *from, unsigned long n) |
345 | 373 | { |
346 | | - unsigned long ret; |
| 374 | + barrier_nospec(); |
| 375 | + return raw_copy_tofrom_user(to, from, n, KUAP_READ_WRITE); |
| 376 | +} |
| 377 | +#endif /* CONFIG_PPC64 */ |
347 | 378 |
|
348 | | - allow_user_access(NULL, KUAP_READ); |
349 | | - ret = __copy_tofrom_user((__force void __user *)to, from, n); |
350 | | - prevent_user_access(KUAP_READ); |
351 | | - return ret; |
| 379 | +static inline unsigned long raw_copy_from_user(void *to, const void __user *from, unsigned long n) |
| 380 | +{ |
| 381 | + return raw_copy_tofrom_user((__force void __user *)to, from, n, KUAP_READ); |
352 | 382 | } |
353 | 383 |
|
354 | 384 | static inline unsigned long |
355 | 385 | raw_copy_to_user(void __user *to, const void *from, unsigned long n) |
356 | 386 | { |
357 | | - unsigned long ret; |
358 | | - |
359 | | - allow_user_access(to, KUAP_WRITE); |
360 | | - ret = __copy_tofrom_user(to, (__force const void __user *)from, n); |
361 | | - prevent_user_access(KUAP_WRITE); |
362 | | - return ret; |
| 387 | + return raw_copy_tofrom_user(to, (__force const void __user *)from, n, KUAP_WRITE); |
363 | 388 | } |
364 | 389 |
|
365 | 390 | unsigned long __arch_clear_user(void __user *addr, unsigned long size); |
|
0 commit comments