|
12 | 12 |
|
13 | 13 | #define FUTEX_MAX_LOOPS 128 /* What's the largest number you can think of? */ |
14 | 14 |
|
15 | | -#define __futex_atomic_op(insn, ret, oldval, uaddr, tmp, oparg) \ |
16 | | -do { \ |
| 15 | +#define LLSC_FUTEX_ATOMIC_OP(op, insn) \ |
| 16 | +static __always_inline int \ |
| 17 | +__llsc_futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ |
| 18 | +{ \ |
17 | 19 | unsigned int loops = FUTEX_MAX_LOOPS; \ |
| 20 | + int ret, oldval, newval; \ |
18 | 21 | \ |
19 | 22 | uaccess_enable_privileged(); \ |
20 | | - asm volatile( \ |
21 | | -" prfm pstl1strm, %2\n" \ |
22 | | -"1: ldxr %w1, %2\n" \ |
| 23 | + asm volatile("// __llsc_futex_atomic_" #op "\n" \ |
| 24 | +" prfm pstl1strm, %[uaddr]\n" \ |
| 25 | +"1: ldxr %w[oldval], %[uaddr]\n" \ |
23 | 26 | insn "\n" \ |
24 | | -"2: stlxr %w0, %w3, %2\n" \ |
25 | | -" cbz %w0, 3f\n" \ |
26 | | -" sub %w4, %w4, %w0\n" \ |
27 | | -" cbnz %w4, 1b\n" \ |
28 | | -" mov %w0, %w6\n" \ |
| 27 | +"2: stlxr %w[ret], %w[newval], %[uaddr]\n" \ |
| 28 | +" cbz %w[ret], 3f\n" \ |
| 29 | +" sub %w[loops], %w[loops], %w[ret]\n" \ |
| 30 | +" cbnz %w[loops], 1b\n" \ |
| 31 | +" mov %w[ret], %w[err]\n" \ |
29 | 32 | "3:\n" \ |
30 | 33 | " dmb ish\n" \ |
31 | | - _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w0) \ |
32 | | - _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w0) \ |
33 | | - : "=&r" (ret), "=&r" (oldval), "+Q" (*uaddr), "=&r" (tmp), \ |
34 | | - "+r" (loops) \ |
35 | | - : "r" (oparg), "Ir" (-EAGAIN) \ |
| 34 | + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %w[ret]) \ |
| 35 | + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %w[ret]) \ |
| 36 | + : [ret] "=&r" (ret), [oldval] "=&r" (oldval), \ |
| 37 | + [uaddr] "+Q" (*uaddr), [newval] "=&r" (newval), \ |
| 38 | + [loops] "+r" (loops) \ |
| 39 | + : [oparg] "r" (oparg), [err] "Ir" (-EAGAIN) \ |
36 | 40 | : "memory"); \ |
37 | 41 | uaccess_disable_privileged(); \ |
38 | | -} while (0) |
| 42 | + \ |
| 43 | + if (!ret) \ |
| 44 | + *oval = oldval; \ |
| 45 | + \ |
| 46 | + return ret; \ |
| 47 | +} |
| 48 | + |
| 49 | +LLSC_FUTEX_ATOMIC_OP(add, "add %w[newval], %w[oldval], %w[oparg]") |
| 50 | +LLSC_FUTEX_ATOMIC_OP(or, "orr %w[newval], %w[oldval], %w[oparg]") |
| 51 | +LLSC_FUTEX_ATOMIC_OP(and, "and %w[newval], %w[oldval], %w[oparg]") |
| 52 | +LLSC_FUTEX_ATOMIC_OP(eor, "eor %w[newval], %w[oldval], %w[oparg]") |
| 53 | +LLSC_FUTEX_ATOMIC_OP(set, "mov %w[newval], %w[oparg]") |
| 54 | + |
| 55 | +static __always_inline int |
| 56 | +__llsc_futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) |
| 57 | +{ |
| 58 | + int ret = 0; |
| 59 | + unsigned int loops = FUTEX_MAX_LOOPS; |
| 60 | + u32 val, tmp; |
| 61 | + |
| 62 | + uaccess_enable_privileged(); |
| 63 | + asm volatile("//__llsc_futex_cmpxchg\n" |
| 64 | +" prfm pstl1strm, %[uaddr]\n" |
| 65 | +"1: ldxr %w[curval], %[uaddr]\n" |
| 66 | +" eor %w[tmp], %w[curval], %w[oldval]\n" |
| 67 | +" cbnz %w[tmp], 4f\n" |
| 68 | +"2: stlxr %w[tmp], %w[newval], %[uaddr]\n" |
| 69 | +" cbz %w[tmp], 3f\n" |
| 70 | +" sub %w[loops], %w[loops], %w[tmp]\n" |
| 71 | +" cbnz %w[loops], 1b\n" |
| 72 | +" mov %w[ret], %w[err]\n" |
| 73 | +"3:\n" |
| 74 | +" dmb ish\n" |
| 75 | +"4:\n" |
| 76 | + _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w[ret]) |
| 77 | + _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w[ret]) |
| 78 | + : [ret] "+r" (ret), [curval] "=&r" (val), |
| 79 | + [uaddr] "+Q" (*uaddr), [tmp] "=&r" (tmp), |
| 80 | + [loops] "+r" (loops) |
| 81 | + : [oldval] "r" (oldval), [newval] "r" (newval), |
| 82 | + [err] "Ir" (-EAGAIN) |
| 83 | + : "memory"); |
| 84 | + uaccess_disable_privileged(); |
| 85 | + |
| 86 | + if (!ret) |
| 87 | + *oval = val; |
| 88 | + |
| 89 | + return ret; |
| 90 | +} |
| 91 | + |
| 92 | +#define FUTEX_ATOMIC_OP(op) \ |
| 93 | +static __always_inline int \ |
| 94 | +__futex_atomic_##op(int oparg, u32 __user *uaddr, int *oval) \ |
| 95 | +{ \ |
| 96 | + return __llsc_futex_atomic_##op(oparg, uaddr, oval); \ |
| 97 | +} |
| 98 | + |
| 99 | +FUTEX_ATOMIC_OP(add) |
| 100 | +FUTEX_ATOMIC_OP(or) |
| 101 | +FUTEX_ATOMIC_OP(and) |
| 102 | +FUTEX_ATOMIC_OP(eor) |
| 103 | +FUTEX_ATOMIC_OP(set) |
| 104 | + |
| 105 | +static __always_inline int |
| 106 | +__futex_cmpxchg(u32 __user *uaddr, u32 oldval, u32 newval, u32 *oval) |
| 107 | +{ |
| 108 | + return __llsc_futex_cmpxchg(uaddr, oldval, newval, oval); |
| 109 | +} |
39 | 110 |
|
40 | 111 | static inline int |
41 | 112 | arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *_uaddr) |
42 | 113 | { |
43 | | - int oldval = 0, ret, tmp; |
44 | | - u32 __user *uaddr = __uaccess_mask_ptr(_uaddr); |
| 114 | + int ret; |
| 115 | + u32 __user *uaddr; |
45 | 116 |
|
46 | 117 | if (!access_ok(_uaddr, sizeof(u32))) |
47 | 118 | return -EFAULT; |
48 | 119 |
|
| 120 | + uaddr = __uaccess_mask_ptr(_uaddr); |
| 121 | + |
49 | 122 | switch (op) { |
50 | 123 | case FUTEX_OP_SET: |
51 | | - __futex_atomic_op("mov %w3, %w5", |
52 | | - ret, oldval, uaddr, tmp, oparg); |
| 124 | + ret = __futex_atomic_set(oparg, uaddr, oval); |
53 | 125 | break; |
54 | 126 | case FUTEX_OP_ADD: |
55 | | - __futex_atomic_op("add %w3, %w1, %w5", |
56 | | - ret, oldval, uaddr, tmp, oparg); |
| 127 | + ret = __futex_atomic_add(oparg, uaddr, oval); |
57 | 128 | break; |
58 | 129 | case FUTEX_OP_OR: |
59 | | - __futex_atomic_op("orr %w3, %w1, %w5", |
60 | | - ret, oldval, uaddr, tmp, oparg); |
| 130 | + ret = __futex_atomic_or(oparg, uaddr, oval); |
61 | 131 | break; |
62 | 132 | case FUTEX_OP_ANDN: |
63 | | - __futex_atomic_op("and %w3, %w1, %w5", |
64 | | - ret, oldval, uaddr, tmp, ~oparg); |
| 133 | + ret = __futex_atomic_and(~oparg, uaddr, oval); |
65 | 134 | break; |
66 | 135 | case FUTEX_OP_XOR: |
67 | | - __futex_atomic_op("eor %w3, %w1, %w5", |
68 | | - ret, oldval, uaddr, tmp, oparg); |
| 136 | + ret = __futex_atomic_eor(oparg, uaddr, oval); |
69 | 137 | break; |
70 | 138 | default: |
71 | 139 | ret = -ENOSYS; |
72 | 140 | } |
73 | 141 |
|
74 | | - if (!ret) |
75 | | - *oval = oldval; |
76 | | - |
77 | 142 | return ret; |
78 | 143 | } |
79 | 144 |
|
80 | 145 | static inline int |
81 | 146 | futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *_uaddr, |
82 | 147 | u32 oldval, u32 newval) |
83 | 148 | { |
84 | | - int ret = 0; |
85 | | - unsigned int loops = FUTEX_MAX_LOOPS; |
86 | | - u32 val, tmp; |
87 | 149 | u32 __user *uaddr; |
88 | 150 |
|
89 | 151 | if (!access_ok(_uaddr, sizeof(u32))) |
90 | 152 | return -EFAULT; |
91 | 153 |
|
92 | 154 | uaddr = __uaccess_mask_ptr(_uaddr); |
93 | | - uaccess_enable_privileged(); |
94 | | - asm volatile("// futex_atomic_cmpxchg_inatomic\n" |
95 | | -" prfm pstl1strm, %2\n" |
96 | | -"1: ldxr %w1, %2\n" |
97 | | -" sub %w3, %w1, %w5\n" |
98 | | -" cbnz %w3, 4f\n" |
99 | | -"2: stlxr %w3, %w6, %2\n" |
100 | | -" cbz %w3, 3f\n" |
101 | | -" sub %w4, %w4, %w3\n" |
102 | | -" cbnz %w4, 1b\n" |
103 | | -" mov %w0, %w7\n" |
104 | | -"3:\n" |
105 | | -" dmb ish\n" |
106 | | -"4:\n" |
107 | | - _ASM_EXTABLE_UACCESS_ERR(1b, 4b, %w0) |
108 | | - _ASM_EXTABLE_UACCESS_ERR(2b, 4b, %w0) |
109 | | - : "+r" (ret), "=&r" (val), "+Q" (*uaddr), "=&r" (tmp), "+r" (loops) |
110 | | - : "r" (oldval), "r" (newval), "Ir" (-EAGAIN) |
111 | | - : "memory"); |
112 | | - uaccess_disable_privileged(); |
113 | 155 |
|
114 | | - if (!ret) |
115 | | - *uval = val; |
116 | | - |
117 | | - return ret; |
| 156 | + return __futex_cmpxchg(uaddr, oldval, newval, uval); |
118 | 157 | } |
119 | 158 |
|
120 | 159 | #endif /* __ASM_FUTEX_H */ |
0 commit comments