Skip to content

Commit a1d34a4

Browse files
author
Peter Zijlstra
committed
KVM: x86: Remove fastops
No more FASTOPs, remove the remains. Signed-off-by: Peter Zijlstra (Intel) <[email protected]> Acked-by: Sean Christopherson <[email protected]> Link: https://lkml.kernel.org/r/[email protected]
1 parent 77892e1 commit a1d34a4

1 file changed

Lines changed: 1 addition & 171 deletions

File tree

arch/x86/kvm/emulate.c

Lines changed: 1 addition & 171 deletions
Original file line numberDiff line numberDiff line change
@@ -167,7 +167,6 @@
167167
#define Unaligned ((u64)2 << 41) /* Explicitly unaligned (e.g. MOVDQU) */
168168
#define Avx ((u64)3 << 41) /* Advanced Vector Extensions */
169169
#define Aligned16 ((u64)4 << 41) /* Aligned to 16 byte boundary (e.g. FXSAVE) */
170-
#define Fastop ((u64)1 << 44) /* Use opcode::u.fastop */
171170
#define NoWrite ((u64)1 << 45) /* No writeback */
172171
#define SrcWrite ((u64)1 << 46) /* Write back src operand */
173172
#define NoMod ((u64)1 << 47) /* Mod field is ignored */
@@ -203,7 +202,6 @@ struct opcode {
203202
const struct escape *esc;
204203
const struct instr_dual *idual;
205204
const struct mode_dual *mdual;
206-
void (*fastop)(struct fastop *fake);
207205
} u;
208206
int (*check_perm)(struct x86_emulate_ctxt *ctxt);
209207
};
@@ -383,152 +381,6 @@ static int em_##op(struct x86_emulate_ctxt *ctxt) \
383381
ON64(case 8: __EM_ASM_3(op##q, rax, rdx, cl); break;) \
384382
EM_ASM_END
385383

386-
387-
/*
388-
* fastop functions have a special calling convention:
389-
*
390-
* dst: rax (in/out)
391-
* src: rdx (in/out)
392-
* src2: rcx (in)
393-
* flags: rflags (in/out)
394-
* ex: rsi (in:fastop pointer, out:zero if exception)
395-
*
396-
* Moreover, they are all exactly FASTOP_SIZE bytes long, so functions for
397-
* different operand sizes can be reached by calculation, rather than a jump
398-
* table (which would be bigger than the code).
399-
*
400-
* The 16 byte alignment, considering 5 bytes for the RET thunk, 3 for ENDBR
401-
* and 1 for the straight line speculation INT3, leaves 7 bytes for the
402-
* body of the function. Currently none is larger than 4.
403-
*/
404-
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop);
405-
406-
#define FASTOP_SIZE 16
407-
408-
#define __FOP_FUNC(name) \
409-
".align " __stringify(FASTOP_SIZE) " \n\t" \
410-
".type " name ", @function \n\t" \
411-
name ":\n\t" \
412-
ASM_ENDBR \
413-
IBT_NOSEAL(name)
414-
415-
#define FOP_FUNC(name) \
416-
__FOP_FUNC(#name)
417-
418-
#define __FOP_RET(name) \
419-
"11: " ASM_RET \
420-
".size " name ", .-" name "\n\t"
421-
422-
#define FOP_RET(name) \
423-
__FOP_RET(#name)
424-
425-
#define __FOP_START(op, align) \
426-
extern void em_##op(struct fastop *fake); \
427-
asm(".pushsection .text, \"ax\" \n\t" \
428-
".global em_" #op " \n\t" \
429-
".align " __stringify(align) " \n\t" \
430-
"em_" #op ":\n\t"
431-
432-
#define FOP_START(op) __FOP_START(op, FASTOP_SIZE)
433-
434-
#define FOP_END \
435-
".popsection")
436-
437-
#define __FOPNOP(name) \
438-
__FOP_FUNC(name) \
439-
__FOP_RET(name)
440-
441-
#define FOPNOP() \
442-
__FOPNOP(__stringify(__UNIQUE_ID(nop)))
443-
444-
#define FOP1E(op, dst) \
445-
__FOP_FUNC(#op "_" #dst) \
446-
"10: " #op " %" #dst " \n\t" \
447-
__FOP_RET(#op "_" #dst)
448-
449-
#define FOP1EEX(op, dst) \
450-
FOP1E(op, dst) _ASM_EXTABLE_TYPE_REG(10b, 11b, EX_TYPE_ZERO_REG, %%esi)
451-
452-
#define FASTOP1(op) \
453-
FOP_START(op) \
454-
FOP1E(op##b, al) \
455-
FOP1E(op##w, ax) \
456-
FOP1E(op##l, eax) \
457-
ON64(FOP1E(op##q, rax)) \
458-
FOP_END
459-
460-
/* 1-operand, using src2 (for MUL/DIV r/m) */
461-
#define FASTOP1SRC2(op, name) \
462-
FOP_START(name) \
463-
FOP1E(op, cl) \
464-
FOP1E(op, cx) \
465-
FOP1E(op, ecx) \
466-
ON64(FOP1E(op, rcx)) \
467-
FOP_END
468-
469-
/* 1-operand, using src2 (for MUL/DIV r/m), with exceptions */
470-
#define FASTOP1SRC2EX(op, name) \
471-
FOP_START(name) \
472-
FOP1EEX(op, cl) \
473-
FOP1EEX(op, cx) \
474-
FOP1EEX(op, ecx) \
475-
ON64(FOP1EEX(op, rcx)) \
476-
FOP_END
477-
478-
#define FOP2E(op, dst, src) \
479-
__FOP_FUNC(#op "_" #dst "_" #src) \
480-
#op " %" #src ", %" #dst " \n\t" \
481-
__FOP_RET(#op "_" #dst "_" #src)
482-
483-
#define FASTOP2(op) \
484-
FOP_START(op) \
485-
FOP2E(op##b, al, dl) \
486-
FOP2E(op##w, ax, dx) \
487-
FOP2E(op##l, eax, edx) \
488-
ON64(FOP2E(op##q, rax, rdx)) \
489-
FOP_END
490-
491-
/* 2 operand, word only */
492-
#define FASTOP2W(op) \
493-
FOP_START(op) \
494-
FOPNOP() \
495-
FOP2E(op##w, ax, dx) \
496-
FOP2E(op##l, eax, edx) \
497-
ON64(FOP2E(op##q, rax, rdx)) \
498-
FOP_END
499-
500-
/* 2 operand, src is CL */
501-
#define FASTOP2CL(op) \
502-
FOP_START(op) \
503-
FOP2E(op##b, al, cl) \
504-
FOP2E(op##w, ax, cl) \
505-
FOP2E(op##l, eax, cl) \
506-
ON64(FOP2E(op##q, rax, cl)) \
507-
FOP_END
508-
509-
/* 2 operand, src and dest are reversed */
510-
#define FASTOP2R(op, name) \
511-
FOP_START(name) \
512-
FOP2E(op##b, dl, al) \
513-
FOP2E(op##w, dx, ax) \
514-
FOP2E(op##l, edx, eax) \
515-
ON64(FOP2E(op##q, rdx, rax)) \
516-
FOP_END
517-
518-
#define FOP3E(op, dst, src, src2) \
519-
__FOP_FUNC(#op "_" #dst "_" #src "_" #src2) \
520-
#op " %" #src2 ", %" #src ", %" #dst " \n\t"\
521-
__FOP_RET(#op "_" #dst "_" #src "_" #src2)
522-
523-
/* 3-operand, word-only, src2=cl */
524-
#define FASTOP3WCL(op) \
525-
FOP_START(op) \
526-
FOPNOP() \
527-
FOP3E(op##w, ax, dx, cl) \
528-
FOP3E(op##l, eax, edx, cl) \
529-
ON64(FOP3E(op##q, rax, rdx, cl)) \
530-
FOP_END
531-
532384
static int em_salc(struct x86_emulate_ctxt *ctxt)
533385
{
534386
/*
@@ -4052,7 +3904,6 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
40523904
#define MD(_f, _m) { .flags = ((_f) | ModeDual), .u.mdual = (_m) }
40533905
#define E(_f, _e) { .flags = ((_f) | Escape | ModRM), .u.esc = (_e) }
40543906
#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
4055-
#define F(_f, _e) { .flags = (_f) | Fastop, .u.fastop = (_e) }
40563907
#define II(_f, _e, _i) \
40573908
{ .flags = (_f)|Intercept, .u.execute = (_e), .intercept = x86_intercept_##_i }
40583909
#define IIP(_f, _e, _i, _p) \
@@ -5158,24 +5009,6 @@ static void fetch_possible_mmx_operand(struct operand *op)
51585009
kvm_read_mmx_reg(op->addr.mm, &op->mm_val);
51595010
}
51605011

5161-
static int fastop(struct x86_emulate_ctxt *ctxt, fastop_t fop)
5162-
{
5163-
ulong flags = (ctxt->eflags & EFLAGS_MASK) | X86_EFLAGS_IF;
5164-
5165-
if (!(ctxt->d & ByteOp))
5166-
fop += __ffs(ctxt->dst.bytes) * FASTOP_SIZE;
5167-
5168-
asm("push %[flags]; popf; " CALL_NOSPEC " ; pushf; pop %[flags]\n"
5169-
: "+a"(ctxt->dst.val), "+d"(ctxt->src.val), [flags]"+D"(flags),
5170-
[thunk_target]"+S"(fop), ASM_CALL_CONSTRAINT
5171-
: "c"(ctxt->src2.val));
5172-
5173-
ctxt->eflags = (ctxt->eflags & ~EFLAGS_MASK) | (flags & EFLAGS_MASK);
5174-
if (!fop) /* exception is returned in fop variable */
5175-
return emulate_de(ctxt);
5176-
return X86EMUL_CONTINUE;
5177-
}
5178-
51795012
void init_decode_cache(struct x86_emulate_ctxt *ctxt)
51805013
{
51815014
/* Clear fields that are set conditionally but read without a guard. */
@@ -5340,10 +5173,7 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
53405173
ctxt->eflags &= ~X86_EFLAGS_RF;
53415174

53425175
if (ctxt->execute) {
5343-
if (ctxt->d & Fastop)
5344-
rc = fastop(ctxt, ctxt->fop);
5345-
else
5346-
rc = ctxt->execute(ctxt);
5176+
rc = ctxt->execute(ctxt);
53475177
if (rc != X86EMUL_CONTINUE)
53485178
goto done;
53495179
goto writeback;

0 commit comments

Comments
 (0)