--- a/debian/patches/series +++ b/debian/patches/series @@ -156,3 +156,4 @@ ucs/0080-CVE-2013-2212.4-VMX-fix-cr0.cd- xsa172-4.3.patch xsa97-hap-4.1-prereq2.patch xsa97-hap-4.1-prereq3.patch +xsa173-4.1.patch --- /dev/null +++ b/debian/patches/xsa173-4.1.patch @@ -0,0 +1,201 @@ +commit 95dd1b6e87b61222fc856724a5d828c9bdc30c80 +Author: Tim Deegan +Date: Wed Mar 16 17:07:18 2016 +0000 + + x86: limit GFNs to 32 bits for shadowed superpages. + + Superpage shadows store the shadowed GFN in the backpointer field, + which for non-BIGMEM builds is 32 bits wide. Shadowing a superpage + mapping of a guest-physical address above 2^44 would lead to the GFN + being truncated there, and a crash when we come to remove the shadow + from the hash table. + + Track the valid width of a GFN for each guest, including reporting it + through CPUID, and enforce it in the shadow pagetables. Set the + maximum witth to 32 for guests where this truncation could occur. + + This is XSA-173. + + Signed-off-by: Tim Deegan + Signed-off-by: Jan Beulich + +Reported-by: Ling Liu +--- a/xen/arch/x86/cpu/common.c ++++ b/xen/arch/x86/cpu/common.c +@@ -44,6 +44,7 @@ integer_param("cpuid_mask_ext_edx", opt_ + struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; + + unsigned int paddr_bits __read_mostly = 36; ++unsigned int hap_paddr_bits __read_mostly = 36; + + /* + * Default host IA32_CR_PAT value to cover all memory types. +@@ -279,7 +280,7 @@ static void __init early_cpu_detect(void + + void __cpuinit generic_identify(struct cpuinfo_x86 * c) + { +- u32 tfms, xlvl; ++ u32 tfms, xlvl, eax; + + if (have_cpuid_p()) { + /* Get vendor name */ +@@ -321,8 +322,11 @@ void __cpuinit generic_identify(struct c + } + if ( xlvl >= 0x80000004 ) + get_model_name(c); /* Default name */ +- if ( xlvl >= 0x80000008 ) +- paddr_bits = cpuid_eax(0x80000008) & 0xff; ++ if ( xlvl >= 0x80000008 ) { ++ eax = cpuid_eax(0x80000008); ++ paddr_bits = eax & 0xff; ++ hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits; ++ } + } + + /* Intel-defined flags: level 0x00000007 */ +--- a/xen/arch/x86/mm/guest_walk.c ++++ b/xen/arch/x86/mm/guest_walk.c +@@ -327,5 +327,11 @@ guest_walk_tables(struct vcpu *v, struct + #endif + if ( l1p ) unmap_domain_page(l1p); + ++ /* If this guest has a restricted physical address space then the ++ * target GFN must fit within it. */ ++ if ( !(rc & _PAGE_PRESENT) ++ && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits ) ++ rc |= _PAGE_INVALID_BITS; ++ + return rc; + } +--- a/xen/arch/x86/mm/hap/hap.c ++++ b/xen/arch/x86/mm/hap/hap.c +@@ -536,6 +536,7 @@ static void hap_destroy_monitor_table(st + void hap_domain_init(struct domain *d) + { + INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist); ++ d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT; + } + + /* return 0 for success, -errno for failure */ +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -48,6 +48,16 @@ void shadow_domain_init(struct domain *d + INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist); + INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows); + ++ d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT; ++#ifndef CONFIG_BIGMEM ++ /* ++ * Shadowed superpages store GFNs in 32-bit page_info fields. ++ * Note that we cannot use guest_supports_superpages() here. ++ */ ++ if ( is_hvm_domain(d) || opt_allow_superpage ) ++ d->arch.paging.gfn_bits = 32; ++#endif ++ + /* Use shadow pagetables for log-dirty support */ + paging_log_dirty_init(d, shadow_enable_log_dirty, + shadow_disable_log_dirty, shadow_clean_dirty_bitmap); +--- a/xen/arch/x86/mm/shadow/multi.c ++++ b/xen/arch/x86/mm/shadow/multi.c +@@ -525,7 +525,8 @@ _sh_propagate(struct vcpu *v, + ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3); + + /* Check there's something for the shadows to map to */ +- if ( !p2m_is_valid(p2mt) && !p2m_is_grant(p2mt) ) ++ if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ++ || gfn_x(target_gfn) >> d->arch.paging.gfn_bits ) + { + *sp = shadow_l1e_empty(); + goto done; +--- a/xen/include/asm-x86/domain.h ++++ b/xen/include/asm-x86/domain.h +@@ -193,6 +193,9 @@ struct paging_domain { + /* log dirty support */ + struct log_dirty_domain log_dirty; + ++ /* Number of valid bits in a gfn. */ ++ unsigned int gfn_bits; ++ + /* preemption handling */ + struct { + const struct domain *dom; +--- a/xen/include/asm-x86/guest_pt.h ++++ b/xen/include/asm-x86/guest_pt.h +@@ -204,15 +204,17 @@ guest_supports_nx(struct vcpu *v) + } + + +-/* Some bits are invalid in any pagetable entry. */ +-#if GUEST_PAGING_LEVELS == 2 +-#define _PAGE_INVALID_BITS (0) +-#elif GUEST_PAGING_LEVELS == 3 ++/* ++ * Some bits are invalid in any pagetable entry. ++ * Normal flags values get represented in 24-bit values (see ++ * get_pte_flags() and put_pte_flags()), so set bit 24 in ++ * addition to be able to flag out of range frame numbers. ++ */ ++#if GUEST_PAGING_LEVELS == 3 + #define _PAGE_INVALID_BITS \ +- get_pte_flags(((1ull<<63) - 1) & ~((1ull<