Commit 234f3ce485d54017f15cf5e0699cff4100121601

Authored by Nadav Amit
Committed by Paolo Bonzini
1 parent 05c83ec9b7

KVM: x86: Emulator fixes for eip canonical checks on near branches

Before changing rip (during jmp, call, ret, etc.) the target should be asserted
to be canonical one, as real CPUs do.  During sysret, both target rsp and rip
should be canonical. If any of these values is noncanonical, a #GP exception
should occur.  The exception to this rule are syscall and sysenter instructions
in which the assigned rip is checked during the assignment to the relevant
MSRs.

This patch fixes the emulator to behave as real CPUs do for near branches.
Far branches are handled by the next patch.

This fixes CVE-2014-3647.

Cc: stable@vger.kernel.org
Signed-off-by: Nadav Amit <namit@cs.technion.ac.il>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>

Showing 1 changed file with 54 additions and 24 deletions Side-by-side Diff

arch/x86/kvm/emulate.c
... ... @@ -564,7 +564,8 @@
564 564 return emulate_exception(ctxt, NM_VECTOR, 0, false);
565 565 }
566 566  
567   -static inline void assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
  567 +static inline int assign_eip_far(struct x86_emulate_ctxt *ctxt, ulong dst,
  568 + int cs_l)
568 569 {
569 570 switch (ctxt->op_bytes) {
570 571 case 2:
571 572  
572 573  
573 574  
574 575  
... ... @@ -574,18 +575,27 @@
574 575 ctxt->_eip = (u32)dst;
575 576 break;
576 577 case 8:
  578 + if ((cs_l && is_noncanonical_address(dst)) ||
  579 + (!cs_l && (dst & ~(u32)-1)))
  580 + return emulate_gp(ctxt, 0);
577 581 ctxt->_eip = dst;
578 582 break;
579 583 default:
580 584 WARN(1, "unsupported eip assignment size\n");
581 585 }
  586 + return X86EMUL_CONTINUE;
582 587 }
583 588  
584   -static inline void jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
  589 +static inline int assign_eip_near(struct x86_emulate_ctxt *ctxt, ulong dst)
585 590 {
586   - assign_eip_near(ctxt, ctxt->_eip + rel);
  591 + return assign_eip_far(ctxt, dst, ctxt->mode == X86EMUL_MODE_PROT64);
587 592 }
588 593  
  594 +static inline int jmp_rel(struct x86_emulate_ctxt *ctxt, int rel)
  595 +{
  596 + return assign_eip_near(ctxt, ctxt->_eip + rel);
  597 +}
  598 +
589 599 static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
590 600 {
591 601 u16 selector;
592 602  
... ... @@ -1998,13 +2008,15 @@
1998 2008 case 2: /* call near abs */ {
1999 2009 long int old_eip;
2000 2010 old_eip = ctxt->_eip;
2001   - ctxt->_eip = ctxt->src.val;
  2011 + rc = assign_eip_near(ctxt, ctxt->src.val);
  2012 + if (rc != X86EMUL_CONTINUE)
  2013 + break;
2002 2014 ctxt->src.val = old_eip;
2003 2015 rc = em_push(ctxt);
2004 2016 break;
2005 2017 }
2006 2018 case 4: /* jmp abs */
2007   - ctxt->_eip = ctxt->src.val;
  2019 + rc = assign_eip_near(ctxt, ctxt->src.val);
2008 2020 break;
2009 2021 case 5: /* jmp far */
2010 2022 rc = em_jmp_far(ctxt);
... ... @@ -2039,10 +2051,14 @@
2039 2051  
2040 2052 static int em_ret(struct x86_emulate_ctxt *ctxt)
2041 2053 {
2042   - ctxt->dst.type = OP_REG;
2043   - ctxt->dst.addr.reg = &ctxt->_eip;
2044   - ctxt->dst.bytes = ctxt->op_bytes;
2045   - return em_pop(ctxt);
  2054 + int rc;
  2055 + unsigned long eip;
  2056 +
  2057 + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
  2058 + if (rc != X86EMUL_CONTINUE)
  2059 + return rc;
  2060 +
  2061 + return assign_eip_near(ctxt, eip);
2046 2062 }
2047 2063  
2048 2064 static int em_ret_far(struct x86_emulate_ctxt *ctxt)
... ... @@ -2323,7 +2339,7 @@
2323 2339 {
2324 2340 const struct x86_emulate_ops *ops = ctxt->ops;
2325 2341 struct desc_struct cs, ss;
2326   - u64 msr_data;
  2342 + u64 msr_data, rcx, rdx;
2327 2343 int usermode;
2328 2344 u16 cs_sel = 0, ss_sel = 0;
2329 2345  
... ... @@ -2339,6 +2355,9 @@
2339 2355 else
2340 2356 usermode = X86EMUL_MODE_PROT32;
2341 2357  
  2358 + rcx = reg_read(ctxt, VCPU_REGS_RCX);
  2359 + rdx = reg_read(ctxt, VCPU_REGS_RDX);
  2360 +
2342 2361 cs.dpl = 3;
2343 2362 ss.dpl = 3;
2344 2363 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
... ... @@ -2356,6 +2375,9 @@
2356 2375 ss_sel = cs_sel + 8;
2357 2376 cs.d = 0;
2358 2377 cs.l = 1;
  2378 + if (is_noncanonical_address(rcx) ||
  2379 + is_noncanonical_address(rdx))
  2380 + return emulate_gp(ctxt, 0);
2359 2381 break;
2360 2382 }
2361 2383 cs_sel |= SELECTOR_RPL_MASK;
... ... @@ -2364,8 +2386,8 @@
2364 2386 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2365 2387 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2366 2388  
2367   - ctxt->_eip = reg_read(ctxt, VCPU_REGS_RDX);
2368   - *reg_write(ctxt, VCPU_REGS_RSP) = reg_read(ctxt, VCPU_REGS_RCX);
  2389 + ctxt->_eip = rdx;
  2390 + *reg_write(ctxt, VCPU_REGS_RSP) = rcx;
2369 2391  
2370 2392 return X86EMUL_CONTINUE;
2371 2393 }
2372 2394  
... ... @@ -2905,10 +2927,13 @@
2905 2927  
2906 2928 static int em_call(struct x86_emulate_ctxt *ctxt)
2907 2929 {
  2930 + int rc;
2908 2931 long rel = ctxt->src.val;
2909 2932  
2910 2933 ctxt->src.val = (unsigned long)ctxt->_eip;
2911   - jmp_rel(ctxt, rel);
  2934 + rc = jmp_rel(ctxt, rel);
  2935 + if (rc != X86EMUL_CONTINUE)
  2936 + return rc;
2912 2937 return em_push(ctxt);
2913 2938 }
2914 2939  
2915 2940  
2916 2941  
... ... @@ -2940,13 +2965,14 @@
2940 2965 static int em_ret_near_imm(struct x86_emulate_ctxt *ctxt)
2941 2966 {
2942 2967 int rc;
  2968 + unsigned long eip;
2943 2969  
2944   - ctxt->dst.type = OP_REG;
2945   - ctxt->dst.addr.reg = &ctxt->_eip;
2946   - ctxt->dst.bytes = ctxt->op_bytes;
2947   - rc = emulate_pop(ctxt, &ctxt->dst.val, ctxt->op_bytes);
  2970 + rc = emulate_pop(ctxt, &eip, ctxt->op_bytes);
2948 2971 if (rc != X86EMUL_CONTINUE)
2949 2972 return rc;
  2973 + rc = assign_eip_near(ctxt, eip);
  2974 + if (rc != X86EMUL_CONTINUE)
  2975 + return rc;
2950 2976 rsp_increment(ctxt, ctxt->src.val);
2951 2977 return X86EMUL_CONTINUE;
2952 2978 }
2953 2979  
2954 2980  
2955 2981  
2956 2982  
2957 2983  
... ... @@ -3271,20 +3297,24 @@
3271 3297  
3272 3298 static int em_loop(struct x86_emulate_ctxt *ctxt)
3273 3299 {
  3300 + int rc = X86EMUL_CONTINUE;
  3301 +
3274 3302 register_address_increment(ctxt, reg_rmw(ctxt, VCPU_REGS_RCX), -1);
3275 3303 if ((address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) != 0) &&
3276 3304 (ctxt->b == 0xe2 || test_cc(ctxt->b ^ 0x5, ctxt->eflags)))
3277   - jmp_rel(ctxt, ctxt->src.val);
  3305 + rc = jmp_rel(ctxt, ctxt->src.val);
3278 3306  
3279   - return X86EMUL_CONTINUE;
  3307 + return rc;
3280 3308 }
3281 3309  
3282 3310 static int em_jcxz(struct x86_emulate_ctxt *ctxt)
3283 3311 {
  3312 + int rc = X86EMUL_CONTINUE;
  3313 +
3284 3314 if (address_mask(ctxt, reg_read(ctxt, VCPU_REGS_RCX)) == 0)
3285   - jmp_rel(ctxt, ctxt->src.val);
  3315 + rc = jmp_rel(ctxt, ctxt->src.val);
3286 3316  
3287   - return X86EMUL_CONTINUE;
  3317 + return rc;
3288 3318 }
3289 3319  
3290 3320 static int em_in(struct x86_emulate_ctxt *ctxt)
... ... @@ -4743,7 +4773,7 @@
4743 4773 break;
4744 4774 case 0x70 ... 0x7f: /* jcc (short) */
4745 4775 if (test_cc(ctxt->b, ctxt->eflags))
4746   - jmp_rel(ctxt, ctxt->src.val);
  4776 + rc = jmp_rel(ctxt, ctxt->src.val);
4747 4777 break;
4748 4778 case 0x8d: /* lea r16/r32, m */
4749 4779 ctxt->dst.val = ctxt->src.addr.mem.ea;
... ... @@ -4773,7 +4803,7 @@
4773 4803 break;
4774 4804 case 0xe9: /* jmp rel */
4775 4805 case 0xeb: /* jmp rel short */
4776   - jmp_rel(ctxt, ctxt->src.val);
  4806 + rc = jmp_rel(ctxt, ctxt->src.val);
4777 4807 ctxt->dst.type = OP_NONE; /* Disable writeback. */
4778 4808 break;
4779 4809 case 0xf4: /* hlt */
... ... @@ -4898,7 +4928,7 @@
4898 4928 break;
4899 4929 case 0x80 ... 0x8f: /* jnz rel, etc*/
4900 4930 if (test_cc(ctxt->b, ctxt->eflags))
4901   - jmp_rel(ctxt, ctxt->src.val);
  4931 + rc = jmp_rel(ctxt, ctxt->src.val);
4902 4932 break;
4903 4933 case 0x90 ... 0x9f: /* setcc r/m8 */
4904 4934 ctxt->dst.val = test_cc(ctxt->b, ctxt->eflags);