Skip to Content.
Sympa Menu

sm-commit - [SM-Commit] GIT changes to master grimoire by Vlad Glagolev (ef51862533d5b2b8aa63e2f27d35aaca20630bac)

sm-commit AT lists.ibiblio.org

Subject: Source Mage code commit list

List archive

Chronological Thread  
  • From: Vlad Glagolev <scm AT sourcemage.org>
  • To: sm-commit AT lists.ibiblio.org
  • Subject: [SM-Commit] GIT changes to master grimoire by Vlad Glagolev (ef51862533d5b2b8aa63e2f27d35aaca20630bac)
  • Date: Thu, 6 Dec 2012 15:40:20 -0600

GIT changes to master grimoire by Vlad Glagolev <stealth AT sourcemage.org>:

utils/xen/HISTORY | 6 +
utils/xen/PRE_BUILD | 6 +
utils/xen/xsa-26.patch | 107 +++++++++++++++++++++++++++++++
utils/xen/xsa-27.patch | 168
+++++++++++++++++++++++++++++++++++++++++++++++++
utils/xen/xsa-28.patch | 36 ++++++++++
utils/xen/xsa-29.patch | 49 ++++++++++++++
utils/xen/xsa-30.patch | 57 ++++++++++++++++
utils/xen/xsa-31.patch | 50 ++++++++++++++
8 files changed, 479 insertions(+)

New commits:
commit ef51862533d5b2b8aa63e2f27d35aaca20630bac
Author: Vlad Glagolev <stealth AT sourcemage.org>
Commit: Vlad Glagolev <stealth AT sourcemage.org>

xen: security update

diff --git a/utils/xen/HISTORY b/utils/xen/HISTORY
index 72ba730..e11e93d 100644
--- a/utils/xen/HISTORY
+++ b/utils/xen/HISTORY
@@ -1,3 +1,9 @@
+2012-12-07 Vlad Glagolev <stealth AT sourcemage.org>
+ * DETAILS: SECURITY_PATCH++
+ * PRE_BUILD: apply patches
+ * xsa-{26-31}.patch: added, to fix CVE-2012-5510, CVE-2012-5511,
+ CVE-2012-5512, CVE-2012-5513, CVE-2012-5514, CVE-2012-5515
+
2012-11-17 Vlad Glagolev <stealth AT sourcemage.org>
* DETAILS: SECURITY_PATCH++
* PRE_BUILD: apply patches
diff --git a/utils/xen/PRE_BUILD b/utils/xen/PRE_BUILD
index ec73184..25ecad0 100755
--- a/utils/xen/PRE_BUILD
+++ b/utils/xen/PRE_BUILD
@@ -8,6 +8,12 @@ patch -p1 < "$SPELL_DIRECTORY/xsa-22.patch" &&
patch -p1 < "$SPELL_DIRECTORY/xsa-23.patch" &&
patch -p1 < "$SPELL_DIRECTORY/xsa-24.patch" &&
patch -p1 < "$SPELL_DIRECTORY/xsa-25.patch" &&
+patch -p1 < "$SPELL_DIRECTORY/xsa-26.patch" &&
+patch -p1 < "$SPELL_DIRECTORY/xsa-27.patch" &&
+patch -p1 < "$SPELL_DIRECTORY/xsa-28.patch" &&
+patch -p1 < "$SPELL_DIRECTORY/xsa-29.patch" &&
+patch -p1 < "$SPELL_DIRECTORY/xsa-30.patch" &&
+patch -p1 < "$SPELL_DIRECTORY/xsa-31.patch" &&

if [[ $XEN_UPINIT != y ]]; then
sedit "s:all install-initd:all:" tools/hotplug/Linux/Makefile
diff --git a/utils/xen/xsa-26.patch b/utils/xen/xsa-26.patch
new file mode 100644
index 0000000..e8b8e7d
--- /dev/null
+++ b/utils/xen/xsa-26.patch
@@ -0,0 +1,107 @@
+gnttab: fix releasing of memory upon switches between versions
+
+gnttab_unpopulate_status_frames() incompletely freed the pages
+previously used as status frame in that they did not get removed from
+the domain's xenpage_list, thus causing subsequent list corruption
+when those pages did get allocated again for the same or another purpose.
+
+Similarly, grant_table_create() and gnttab_grow_table() both improperly
+clean up in the event of an error - pages already shared with the guest
+can't be freed by just passing them to free_xenheap_page(). Fix this by
+sharing the pages only after all allocations succeeded.
+
+This is CVE-2012-5510 / XSA-26.
+
+Signed-off-by: Jan Beulich <jbeulich AT suse.com>
+Acked-by: Ian Campbell <ian.campbell AT citrix.com>
+
+diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
+index 6c0aa6f..a180aef 100644
+--- a/xen/common/grant_table.c
++++ b/xen/common/grant_table.c
+@@ -1126,12 +1126,13 @@ fault:
+ }
+
+ static int
+-gnttab_populate_status_frames(struct domain *d, struct grant_table *gt)
++gnttab_populate_status_frames(struct domain *d, struct grant_table *gt,
++ unsigned int req_nr_frames)
+ {
+ unsigned i;
+ unsigned req_status_frames;
+
+- req_status_frames = grant_to_status_frames(gt->nr_grant_frames);
++ req_status_frames = grant_to_status_frames(req_nr_frames);
+ for ( i = nr_status_frames(gt); i < req_status_frames; i++ )
+ {
+ if ( (gt->status[i] = alloc_xenheap_page()) == NULL )
+@@ -1162,7 +1163,12 @@ gnttab_unpopulate_status_frames(struct domain *d,
struct grant_table *gt)
+
+ for ( i = 0; i < nr_status_frames(gt); i++ )
+ {
+- page_set_owner(virt_to_page(gt->status[i]), dom_xen);
++ struct page_info *pg = virt_to_page(gt->status[i]);
++
++ BUG_ON(page_get_owner(pg) != d);
++ if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
++ put_page(pg);
++ BUG_ON(pg->count_info & ~PGC_xen_heap);
+ free_xenheap_page(gt->status[i]);
+ gt->status[i] = NULL;
+ }
+@@ -1200,19 +1206,18 @@ gnttab_grow_table(struct domain *d, unsigned int
req_nr_frames)
+ clear_page(gt->shared_raw[i]);
+ }
+
+- /* Share the new shared frames with the recipient domain */
+- for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
+- gnttab_create_shared_page(d, gt, i);
+-
+- gt->nr_grant_frames = req_nr_frames;
+-
+ /* Status pages - version 2 */
+ if (gt->gt_version > 1)
+ {
+- if ( gnttab_populate_status_frames(d, gt) )
++ if ( gnttab_populate_status_frames(d, gt, req_nr_frames) )
+ goto shared_alloc_failed;
+ }
+
++ /* Share the new shared frames with the recipient domain */
++ for ( i = nr_grant_frames(gt); i < req_nr_frames; i++ )
++ gnttab_create_shared_page(d, gt, i);
++ gt->nr_grant_frames = req_nr_frames;
++
+ return 1;
+
+ shared_alloc_failed:
+@@ -2134,7 +2139,7 @@
gnttab_set_version(XEN_GUEST_HANDLE(gnttab_set_version_t uop))
+
+ if ( op.version == 2 && gt->gt_version < 2 )
+ {
+- res = gnttab_populate_status_frames(d, gt);
++ res = gnttab_populate_status_frames(d, gt, nr_grant_frames(gt));
+ if ( res < 0)
+ goto out_unlock;
+ }
+@@ -2449,9 +2454,6 @@ grant_table_create(
+ clear_page(t->shared_raw[i]);
+ }
+
+- for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+- gnttab_create_shared_page(d, t, i);
+-
+ /* Status pages for grant table - for version 2 */
+ t->status = xmalloc_array(grant_status_t *,
+ grant_to_status_frames(max_nr_grant_frames));
+@@ -2459,6 +2461,10 @@ grant_table_create(
+ goto no_mem_4;
+ memset(t->status, 0,
+ grant_to_status_frames(max_nr_grant_frames) *
sizeof(t->status[0]));
++
++ for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
++ gnttab_create_shared_page(d, t, i);
++
+ t->nr_status_frames = 0;
+
+ /* Okay, install the structure. */
diff --git a/utils/xen/xsa-27.patch b/utils/xen/xsa-27.patch
new file mode 100644
index 0000000..f0764cb
--- /dev/null
+++ b/utils/xen/xsa-27.patch
@@ -0,0 +1,168 @@
+hvm: Limit the size of large HVM op batches
+
+Doing large p2m updates for HVMOP_track_dirty_vram without preemption
+ties up the physical processor. Integrating preemption into the p2m
+updates is hard so simply limit to 1GB which is sufficient for a 15000
+* 15000 * 32bpp framebuffer.
+
+For HVMOP_modified_memory and HVMOP_set_mem_type preemptible add the
+necessary machinery to handle preemption.
+
+This is CVE-2012-5511 / XSA-27.
+
+Signed-off-by: Tim Deegan <tim AT xen.org>
+Signed-off-by: Ian Campbell <ian.campbell AT citrix.com>
+Acked-by: Ian Jackson <ian.jackson AT eu.citrix.com>
+
+x86/paging: Don't allocate user-controlled amounts of stack memory.
+
+This is XSA-27 / CVE-2012-5511.
+
+Signed-off-by: Tim Deegan <tim AT xen.org>
+Acked-by: Jan Beulich <jbeulich AT suse.com>
+v2: Provide definition of GB to fix x86-32 compile.
+
+Signed-off-by: Jan Beulich <JBeulich AT suse.com>
+Acked-by: Ian Jackson <ian.jackson AT eu.citrix.com>
+
+
+diff -r 5639047d6c9f xen/arch/x86/hvm/hvm.c
+--- a/xen/arch/x86/hvm/hvm.c Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/hvm/hvm.c Mon Nov 19 16:00:33 2012 +0000
+@@ -3471,6 +3471,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+ if ( !is_hvm_domain(d) )
+ goto param_fail2;
+
++ if ( a.nr > GB(1) >> PAGE_SHIFT )
++ goto param_fail2;
++
+ rc = xsm_hvm_param(d, op);
+ if ( rc )
+ goto param_fail2;
+@@ -3498,7 +3501,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
+ struct xen_hvm_modified_memory a;
+ struct domain *d;
+ struct p2m_domain *p2m;
+- unsigned long pfn;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+@@ -3526,8 +3528,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+ goto param_fail3;
+
+ p2m = p2m_get_hostp2m(d);
+- for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
++ while ( a.nr > 0 )
+ {
++ unsigned long pfn = a.first_pfn;
+ p2m_type_t t;
+ mfn_t mfn = gfn_to_mfn(p2m, pfn, &t);
+ if ( p2m_is_paging(t) )
+@@ -3548,6 +3551,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
+ /* don't take a long time and don't die either */
+ sh_remove_shadows(d->vcpu[0], mfn, 1, 0);
+ }
++
++ a.first_pfn++;
++ a.nr--;
++
++ /* Check for continuation if it's not the last interation */
++ if ( a.nr > 0 && hypercall_preempt_check() )
++ {
++ if ( copy_to_guest(arg, &a, 1) )
++ rc = -EFAULT;
++ else
++ rc = -EAGAIN;
++ break;
++ }
+ }
+
+ param_fail3:
+@@ -3595,7 +3611,6 @@ long do_hvm_op(unsigned long op, XEN_GUE
+ struct xen_hvm_set_mem_type a;
+ struct domain *d;
+ struct p2m_domain *p2m;
+- unsigned long pfn;
+
+ /* Interface types to internal p2m types */
+ p2m_type_t memtype[] = {
+@@ -3625,8 +3640,9 @@ long do_hvm_op(unsigned long op, XEN_GUE
+ goto param_fail4;
+
+ p2m = p2m_get_hostp2m(d);
+- for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
++ while ( a.nr > 0 )
+ {
++ unsigned long pfn = a.first_pfn;
+ p2m_type_t t;
+ p2m_type_t nt;
+ mfn_t mfn;
+@@ -3662,6 +3678,19 @@ long do_hvm_op(unsigned long op, XEN_GUE
+ goto param_fail4;
+ }
+ }
++
++ a.first_pfn++;
++ a.nr--;
++
++ /* Check for continuation if it's not the last interation */
++ if ( a.nr > 0 && hypercall_preempt_check() )
++ {
++ if ( copy_to_guest(arg, &a, 1) )
++ rc = -EFAULT;
++ else
++ rc = -EAGAIN;
++ goto param_fail4;
++ }
+ }
+
+ rc = 0;
+diff -r 5639047d6c9f xen/arch/x86/mm/paging.c
+--- a/xen/arch/x86/mm/paging.c Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/mm/paging.c Mon Nov 19 16:00:33 2012 +0000
+@@ -529,13 +529,18 @@ int paging_log_dirty_range(struct domain
+
+ if ( !d->arch.paging.log_dirty.fault_count &&
+ !d->arch.paging.log_dirty.dirty_count ) {
+- int size = (nr + BITS_PER_LONG - 1) / BITS_PER_LONG;
+- unsigned long zeroes[size];
+- memset(zeroes, 0x00, size * BYTES_PER_LONG);
++ static uint8_t zeroes[PAGE_SIZE];
++ int off, size;
++
++ size = ((nr + BITS_PER_LONG - 1) / BITS_PER_LONG) * sizeof (long);
+ rv = 0;
+- if ( copy_to_guest_offset(dirty_bitmap, 0, (uint8_t *) zeroes,
+- size * BYTES_PER_LONG) != 0 )
+- rv = -EFAULT;
++ for ( off = 0; !rv && off < size; off += sizeof zeroes )
++ {
++ int todo = min(size - off, (int) PAGE_SIZE);
++ if ( copy_to_guest_offset(dirty_bitmap, off, zeroes, todo) )
++ rv = -EFAULT;
++ off += todo;
++ }
+ goto out;
+ }
+ d->arch.paging.log_dirty.fault_count = 0;
+diff -r 5639047d6c9f xen/include/asm-x86/config.h
+--- a/xen/include/asm-x86/config.h Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/include/asm-x86/config.h Mon Nov 19 16:00:33 2012 +0000
+@@ -108,6 +108,9 @@ extern unsigned int trampoline_xen_phys_
+ extern unsigned char trampoline_cpu_started;
+ extern char wakeup_start[];
+ extern unsigned int video_mode, video_flags;
++
++#define GB(_gb) (_gb ## UL << 30)
++
+ #endif
+
+ #define asmlinkage
+@@ -123,7 +126,6 @@ extern unsigned int video_mode, video_fl
+ #define PML4_ADDR(_slot) \
+ ((((_slot ## UL) >> 8) * 0xffff000000000000UL) | \
+ (_slot ## UL << PML4_ENTRY_BITS))
+-#define GB(_gb) (_gb ## UL << 30)
+ #else
+ #define PML4_ENTRY_BYTES (1 << PML4_ENTRY_BITS)
+ #define PML4_ADDR(_slot) \
diff --git a/utils/xen/xsa-28.patch b/utils/xen/xsa-28.patch
new file mode 100644
index 0000000..fe4638e
--- /dev/null
+++ b/utils/xen/xsa-28.patch
@@ -0,0 +1,36 @@
+x86/HVM: range check xen_hvm_set_mem_access.hvmmem_access before use
+
+Otherwise an out of bounds array access can happen if changing the
+default access is being requested, which - if it doesn't crash Xen -
+would subsequently allow reading arbitrary memory through
+HVMOP_get_mem_access (again, unless that operation crashes Xen).
+
+This is XSA-28 / CVE-2012-5512.
+
+Signed-off-by: Jan Beulich <jbeulich AT suse.com>
+Acked-by: Tim Deegan <tim AT xen.org>
+Acked-by: Ian Campbell <ian.campbell AT citrix.com>
+
+diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
+index 66cf805..08b6418 100644
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -3699,7 +3699,7 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE(void) arg)
+ return rc;
+
+ rc = -EINVAL;
+- if ( !is_hvm_domain(d) )
++ if ( !is_hvm_domain(d) || a.hvmmem_access >= ARRAY_SIZE(memaccess) )
+ goto param_fail5;
+
+ p2m = p2m_get_hostp2m(d);
+@@ -3719,9 +3719,6 @@ long do_hvm_op(unsigned long op,
XEN_GUEST_HANDLE(void) arg)
+ ((a.first_pfn + a.nr - 1) > domain_get_maximum_gpfn(d)) )
+ goto param_fail5;
+
+- if ( a.hvmmem_access >= ARRAY_SIZE(memaccess) )
+- goto param_fail5;
+-
+ for ( pfn = a.first_pfn; pfn < a.first_pfn + a.nr; pfn++ )
+ {
+ p2m_type_t t;
diff --git a/utils/xen/xsa-29.patch b/utils/xen/xsa-29.patch
new file mode 100644
index 0000000..f8f6e38
--- /dev/null
+++ b/utils/xen/xsa-29.patch
@@ -0,0 +1,49 @@
+xen: add missing guest address range checks to XENMEM_exchange handlers
+
+Ever since its existence (3.0.3 iirc) the handler for this has been
+using non address range checking guest memory accessors (i.e.
+the ones prefixed with two underscores) without first range
+checking the accessed space (via guest_handle_okay()), allowing
+a guest to access and overwrite hypervisor memory.
+
+This is XSA-29 / CVE-2012-5513.
+
+Signed-off-by: Jan Beulich <jbeulich AT suse.com>
+Acked-by: Ian Campbell <ian.campbell AT citrix.com>
+Acked-by: Ian Jackson <ian.jackson AT eu.citrix.com>
+
+diff --git a/xen/common/compat/memory.c b/xen/common/compat/memory.c
+index 2402984..1d877fc 100644
+--- a/xen/common/compat/memory.c
++++ b/xen/common/compat/memory.c
+@@ -114,6 +114,12 @@ int compat_memory_op(unsigned int cmd,
XEN_GUEST_HANDLE(void) compat)
+ (cmp.xchg.out.nr_extents << cmp.xchg.out.extent_order)) )
+ return -EINVAL;
+
++ if ( !compat_handle_okay(cmp.xchg.in.extent_start,
++ cmp.xchg.in.nr_extents) ||
++ !compat_handle_okay(cmp.xchg.out.extent_start,
++ cmp.xchg.out.nr_extents) )
++ return -EFAULT;
++
+ start_extent = cmp.xchg.nr_exchanged;
+ end_extent = (COMPAT_ARG_XLAT_SIZE - sizeof(*nat.xchg)) /
+ (((1U << ABS(order_delta)) + 1) *
+diff --git a/xen/common/memory.c b/xen/common/memory.c
+index 4e7c234..59379d3 100644
+--- a/xen/common/memory.c
++++ b/xen/common/memory.c
+@@ -289,6 +289,13 @@ static long
memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
+ goto fail_early;
+ }
+
++ if ( !guest_handle_okay(exch.in.extent_start, exch.in.nr_extents) ||
++ !guest_handle_okay(exch.out.extent_start, exch.out.nr_extents) )
++ {
++ rc = -EFAULT;
++ goto fail_early;
++ }
++
+ /* Only privileged guests can allocate multi-page contiguous extents. */
+ if ( !multipage_allocation_permitted(current->domain,
+ exch.in.extent_order) ||
diff --git a/utils/xen/xsa-30.patch b/utils/xen/xsa-30.patch
new file mode 100644
index 0000000..817879a
--- /dev/null
+++ b/utils/xen/xsa-30.patch
@@ -0,0 +1,57 @@
+xen: fix error handling of guest_physmap_mark_populate_on_demand()
+
+The only user of the "out" label bypasses a necessary unlock, thus
+enabling the caller to lock up Xen.
+
+Also, the function was never meant to be called by a guest for itself,
+so rather than inspecting the code paths in depth for potential other
+problems this might cause, and adjusting e.g. the non-guest printk()
+in the above error path, just disallow the guest access to it.
+
+Finally, the printk() (considering its potential of spamming the log,
+the more that it's not using XENLOG_GUEST), is being converted to
+P2M_DEBUG(), as debugging is what it apparently was added for in the
+first place.
+
+This is XSA-30 / CVE-2012-5514.
+
+Signed-off-by: Jan Beulich <jbeulich AT suse.com>
+Acked-by: Ian Campbell <ian.campbell AT citrix.com>
+Acked-by: George Dunlap <george.dunlap AT eu.citrix.com>
+Acked-by: Ian Jackson <ian.jackson AT eu.citrix.com>
+
+diff -r 5639047d6c9f xen/arch/x86/mm/p2m.c
+--- a/xen/arch/x86/mm/p2m.c Mon Nov 19 09:43:48 2012 +0100
++++ b/xen/arch/x86/mm/p2m.c Thu Nov 22 17:07:37 2012 +0000
+@@ -2412,6 +2412,9 @@ guest_physmap_mark_populate_on_demand(st
+ mfn_t omfn;
+ int rc = 0;
+
++ if ( !IS_PRIV_FOR(current->domain, d) )
++ return -EPERM;
++
+ if ( !paging_mode_translate(d) )
+ return -EINVAL;
+
+@@ -2430,8 +2433,7 @@ guest_physmap_mark_populate_on_demand(st
+ omfn = gfn_to_mfn_query(p2m, gfn + i, &ot);
+ if ( p2m_is_ram(ot) )
+ {
+- printk("%s: gfn_to_mfn returned type %d!\n",
+- __func__, ot);
++ P2M_DEBUG("gfn_to_mfn returned type %d!\n", ot);
+ rc = -EBUSY;
+ goto out;
+ }
+@@ -2453,10 +2455,10 @@ guest_physmap_mark_populate_on_demand(st
+ BUG_ON(p2m->pod.entry_count < 0);
+ }
+
++out:
+ audit_p2m(p2m, 1);
+ p2m_unlock(p2m);
+
+-out:
+ return rc;
+ }
+
diff --git a/utils/xen/xsa-31.patch b/utils/xen/xsa-31.patch
new file mode 100644
index 0000000..1f3d929
--- /dev/null
+++ b/utils/xen/xsa-31.patch
@@ -0,0 +1,50 @@
+memop: limit guest specified extent order
+
+Allowing unbounded order values here causes almost unbounded loops
+and/or partially incomplete requests, particularly in PoD code.
+
+The added range checks in populate_physmap(), decrease_reservation(),
+and the "in" one in memory_exchange() architecturally all could use
+PADDR_BITS - PAGE_SHIFT, and are being artificially constrained to
+MAX_ORDER.
+
+This is XSA-31 / CVE-2012-5515.
+
+Signed-off-by: Jan Beulich <jbeulich AT suse.com>
+Acked-by: Tim Deegan <tim AT xen.org>
+Acked-by: Ian Jackson <ian.jackson AT eu.citrix.com>
+
+diff --git a/xen/common/memory.c b/xen/common/memory.c
+index 4e7c234..9b9fb18 100644
+--- a/xen/common/memory.c
++++ b/xen/common/memory.c
+@@ -117,7 +117,8 @@ static void populate_physmap(struct memop_args *a)
+
+ if ( a->memflags & MEMF_populate_on_demand )
+ {
+- if ( guest_physmap_mark_populate_on_demand(d, gpfn,
++ if ( a->extent_order > MAX_ORDER ||
++ guest_physmap_mark_populate_on_demand(d, gpfn,
+ a->extent_order) < 0
)
+ goto out;
+ }
+@@ -216,7 +217,8 @@ static void decrease_reservation(struct memop_args *a)
+ xen_pfn_t gmfn;
+
+ if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done,
+- a->nr_extents-1) )
++ a->nr_extents-1) ||
++ a->extent_order > MAX_ORDER )
+ return;
+
+ for ( i = a->nr_done; i < a->nr_extents; i++ )
+@@ -278,6 +280,9 @@ static long
memory_exchange(XEN_GUEST_HANDLE(xen_memory_exchange_t) arg)
+ if ( (exch.nr_exchanged > exch.in.nr_extents) ||
+ /* Input and output domain identifiers match? */
+ (exch.in.domid != exch.out.domid) ||
++ /* Extent orders are sensible? */
++ (exch.in.extent_order > MAX_ORDER) ||
++ (exch.out.extent_order > MAX_ORDER) ||
+ /* Sizes of input and output lists do not overflow a long? */
+ ((~0UL >> exch.in.extent_order) < exch.in.nr_extents) ||
+ ((~0UL >> exch.out.extent_order) < exch.out.nr_extents) ||



  • [SM-Commit] GIT changes to master grimoire by Vlad Glagolev (ef51862533d5b2b8aa63e2f27d35aaca20630bac), Vlad Glagolev, 12/06/2012

Archive powered by MHonArc 2.6.24.

Top of Page